diff options
Diffstat (limited to 'drivers')
751 files changed, 84105 insertions, 12410 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index f42a03029b7c..91874e048552 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ obj-y += video/ +obj-y += idle/ obj-$(CONFIG_ACPI) += acpi/ obj-$(CONFIG_SFI) += sfi/ # PnP must come after ACPI since it will eventually need to check if acpi @@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/ obj-y += lguest/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ -obj-y += idle/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 93d2c7971df6..746411518802 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -360,4 +360,13 @@ config ACPI_SBS To compile this driver as a module, choose M here: the modules will be called sbs and sbshc. +config ACPI_HED + tristate "Hardware Error Device" + help + This driver supports the Hardware Error Device (PNP0C33), + which is used to report some hardware errors notified via + SCI, mainly the corrected errors. + +source "drivers/acpi/apei/Kconfig" + endif # ACPI diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index a8d8998dd5c5..6ee33169e1dc 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -19,7 +19,7 @@ obj-y += acpi.o \ # All the builtin files are in the "acpi." module_param namespace. acpi-y += osl.o utils.o reboot.o -acpi-y += hest.o +acpi-y += atomicio.o # sleep related files acpi-y += wakeup.o @@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_SBS) += sbshc.o obj-$(CONFIG_ACPI_SBS) += sbs.o obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o +obj-$(CONFIG_ACPI_HED) += hed.o # processor has its own "processor." module_param namespace processor-y := processor_driver.o processor_throttling.o @@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o + +obj-$(CONFIG_ACPI_APEI) += apei/ diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 62122134693b..d269a8f3329c 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock); #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) #define CPUID5_ECX_INTERRUPT_BREAK (0x2) static unsigned long power_saving_mwait_eax; + +static unsigned char tsc_detected_unstable; +static unsigned char tsc_marked_unstable; + static void power_saving_mwait_init(void) { unsigned int eax, ebx, ecx, edx; @@ -87,8 +91,8 @@ static void power_saving_mwait_init(void) /*FALL THROUGH*/ default: - /* TSC could halt in idle, so notify users */ - mark_tsc_unstable("TSC halts in idle"); + /* TSC could halt in idle */ + tsc_detected_unstable = 1; } #endif } @@ -168,16 +172,14 @@ static int power_saving_thread(void *data) do_sleep = 0; - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we test - * NEED_RESCHED: - */ - smp_mb(); - expire_time = jiffies + HZ * (100 - idle_pct) / 100; while (!need_resched()) { + if (tsc_detected_unstable && !tsc_marked_unstable) { + /* TSC could halt in idle, so notify users */ + mark_tsc_unstable("TSC halts in idle"); + tsc_marked_unstable = 1; + } local_irq_disable(); cpu = smp_processor_id(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, @@ -200,8 +202,6 @@ static int power_saving_thread(void *data) } } - current_thread_info()->status |= TS_POLLING; - /* * current sched_rt has threshold for rt task running time. * When a rt task uses 95% CPU time, the rt thread will be diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 7c7bbb4d402c..d5a5efc043bf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_status acpi_enable(void) { - acpi_status status = AE_OK; + acpi_status status; ACPI_FUNCTION_TRACE(acpi_enable); @@ -84,21 +84,30 @@ acpi_status acpi_enable(void) if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in ACPI mode\n")); - } else { - /* Transition to ACPI mode */ + return_ACPI_STATUS(AE_OK); + } - status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); - if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, - "Could not transition to ACPI mode")); - return_ACPI_STATUS(status); - } + /* Transition to ACPI mode */ - ACPI_DEBUG_PRINT((ACPI_DB_INIT, - "Transition to ACPI mode successful\n")); + status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); + if (ACPI_FAILURE(status)) { + ACPI_ERROR((AE_INFO, + "Could not transition to ACPI mode")); + return_ACPI_STATUS(status); } - return_ACPI_STATUS(status); + /* Sanity check that transition succeeded */ + + if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { + ACPI_ERROR((AE_INFO, + "Hardware did not enter ACPI mode")); + return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INIT, + "Transition to ACPI mode successful\n")); + + return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enable) diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 679a112a7d26..b44274a0b62c 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode) { acpi_status status; - u32 retry; ACPI_FUNCTION_TRACE(hw_set_mode); @@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode) return_ACPI_STATUS(status); } - /* - * Some hardware takes a LONG time to switch modes. Give them 3 sec to - * do so, but allow faster systems to proceed more quickly. - */ - retry = 3000; - while (retry) { - if (acpi_hw_get_mode() == mode) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Mode %X successfully enabled\n", - mode)); - return_ACPI_STATUS(AE_OK); - } - acpi_os_stall(1000); - retry--; - } - - ACPI_ERROR((AE_INFO, "Hardware did not change modes")); - return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); + return_ACPI_STATUS(AE_OK); } /******************************************************************************* diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig new file mode 100644 index 000000000000..f8c668f27b5a --- /dev/null +++ b/drivers/acpi/apei/Kconfig @@ -0,0 +1,30 @@ +config ACPI_APEI + bool "ACPI Platform Error Interface (APEI)" + depends on X86 + help + APEI allows to report errors (for example from the chipset) + to the operating system. This improves NMI handling + especially. In addition it supports error serialization and + error injection. + +config ACPI_APEI_GHES + tristate "APEI Generic Hardware Error Source" + depends on ACPI_APEI && X86 + select ACPI_HED + help + Generic Hardware Error Source provides a way to report + platform hardware errors (such as that from chipset). It + works in so called "Firmware First" mode, that is, hardware + errors are reported to firmware firstly, then reported to + Linux by firmware. This way, some non-standard hardware + error registers or non-standard hardware link can be checked + by firmware to produce more valuable hardware error + information for Linux. + +config ACPI_APEI_EINJ + tristate "APEI Error INJection (EINJ)" + depends on ACPI_APEI && DEBUG_FS + help + EINJ provides a hardware error injection mechanism, it is + mainly used for debugging and testing the other parts of + APEI and some other RAS features. diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile new file mode 100644 index 000000000000..b13b03a17789 --- /dev/null +++ b/drivers/acpi/apei/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_ACPI_APEI) += apei.o +obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o +obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o + +apei-y := apei-base.o hest.o cper.o erst.o diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c new file mode 100644 index 000000000000..db3946e9c66b --- /dev/null +++ b/drivers/acpi/apei/apei-base.c @@ -0,0 +1,593 @@ +/* + * apei-base.c - ACPI Platform Error Interface (APEI) supporting + * infrastructure + * + * APEI allows to report errors (for example from the chipset) to the + * the operating system. This improves NMI handling especially. In + * addition it supports error serialization and error injection. + * + * For more information about APEI, please refer to ACPI Specification + * version 4.0, chapter 17. + * + * This file has Common functions used by more than one APEI table, + * including framework of interpreter for ERST and EINJ; resource + * management for APEI registers. + * + * Copyright (C) 2009, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/io.h> +#include <linux/kref.h> +#include <linux/rculist.h> +#include <linux/interrupt.h> +#include <linux/debugfs.h> +#include <acpi/atomicio.h> + +#include "apei-internal.h" + +#define APEI_PFX "APEI: " + +/* + * APEI ERST (Error Record Serialization Table) and EINJ (Error + * INJection) interpreter framework. + */ + +#define APEI_EXEC_PRESERVE_REGISTER 0x1 + +void apei_exec_ctx_init(struct apei_exec_context *ctx, + struct apei_exec_ins_type *ins_table, + u32 instructions, + struct acpi_whea_header *action_table, + u32 entries) +{ + ctx->ins_table = ins_table; + ctx->instructions = instructions; + ctx->action_table = action_table; + ctx->entries = entries; +} +EXPORT_SYMBOL_GPL(apei_exec_ctx_init); + +int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) +{ + int rc; + + rc = acpi_atomic_read(val, &entry->register_region); + if (rc) + return rc; + *val >>= entry->register_region.bit_offset; + *val &= entry->mask; + + return 0; +} + +int apei_exec_read_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val = 0; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + ctx->value = val; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_read_register); + +int apei_exec_read_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + + rc = apei_exec_read_register(ctx, entry); + if (rc) + return rc; + ctx->value = (ctx->value == entry->value); + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_read_register_value); + +int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) +{ + int rc; + + val &= entry->mask; + val <<= entry->register_region.bit_offset; + if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { + u64 valr = 0; + rc = acpi_atomic_read(&valr, &entry->register_region); + if (rc) + return rc; + valr &= ~(entry->mask << entry->register_region.bit_offset); + val |= valr; + } + rc = acpi_atomic_write(val, &entry->register_region); + + return rc; +} + +int apei_exec_write_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_write_register(entry, ctx->value); +} +EXPORT_SYMBOL_GPL(apei_exec_write_register); + +int apei_exec_write_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + + ctx->value = entry->value; + rc = apei_exec_write_register(ctx, entry); + + return rc; +} +EXPORT_SYMBOL_GPL(apei_exec_write_register_value); + +int apei_exec_noop(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_noop); + +/* + * Interpret the specified action. Go through whole action table, + * execute all instructions belong to the action. + */ +int apei_exec_run(struct apei_exec_context *ctx, u8 action) +{ + int rc; + u32 i, ip; + struct acpi_whea_header *entry; + apei_exec_ins_func_t run; + + ctx->ip = 0; + + /* + * "ip" is the instruction pointer of current instruction, + * "ctx->ip" specifies the next instruction to executed, + * instruction "run" function may change the "ctx->ip" to + * implement "goto" semantics. + */ +rewind: + ip = 0; + for (i = 0; i < ctx->entries; i++) { + entry = &ctx->action_table[i]; + if (entry->action != action) + continue; + if (ip == ctx->ip) { + if (entry->instruction >= ctx->instructions || + !ctx->ins_table[entry->instruction].run) { + pr_warning(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + entry->instruction); + return -EINVAL; + } + run = ctx->ins_table[entry->instruction].run; + rc = run(ctx, entry); + if (rc < 0) + return rc; + else if (rc != APEI_EXEC_SET_IP) + ctx->ip++; + } + ip++; + if (ctx->ip < ip) + goto rewind; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_run); + +typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data); + +static int apei_exec_for_each_entry(struct apei_exec_context *ctx, + apei_exec_entry_func_t func, + void *data, + int *end) +{ + u8 ins; + int i, rc; + struct acpi_whea_header *entry; + struct apei_exec_ins_type *ins_table = ctx->ins_table; + + for (i = 0; i < ctx->entries; i++) { + entry = ctx->action_table + i; + ins = entry->instruction; + if (end) + *end = i; + if (ins >= ctx->instructions || !ins_table[ins].run) { + pr_warning(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + ins); + return -EINVAL; + } + rc = func(ctx, entry, data); + if (rc) + return rc; + } + + return 0; +} + +static int pre_map_gar_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + u8 ins = entry->instruction; + + if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) + return acpi_pre_map_gar(&entry->register_region); + + return 0; +} + +/* + * Pre-map all GARs in action table to make it possible to access them + * in NMI handler. + */ +int apei_exec_pre_map_gars(struct apei_exec_context *ctx) +{ + int rc, end; + + rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, + NULL, &end); + if (rc) { + struct apei_exec_context ctx_unmap; + memcpy(&ctx_unmap, ctx, sizeof(*ctx)); + ctx_unmap.entries = end; + apei_exec_post_unmap_gars(&ctx_unmap); + } + + return rc; +} +EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); + +static int post_unmap_gar_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + u8 ins = entry->instruction; + + if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) + acpi_post_unmap_gar(&entry->register_region); + + return 0; +} + +/* Post-unmap all GAR in action table. */ +int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) +{ + return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, + NULL, NULL); +} +EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); + +/* + * Resource management for GARs in APEI + */ +struct apei_res { + struct list_head list; + unsigned long start; + unsigned long end; +}; + +/* Collect all resources requested, to avoid conflict */ +struct apei_resources apei_resources_all = { + .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), + .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), +}; + +static int apei_res_add(struct list_head *res_list, + unsigned long start, unsigned long size) +{ + struct apei_res *res, *resn, *res_ins = NULL; + unsigned long end = start + size; + + if (end <= start) + return 0; +repeat: + list_for_each_entry_safe(res, resn, res_list, list) { + if (res->start > end || res->end < start) + continue; + else if (end <= res->end && start >= res->start) { + kfree(res_ins); + return 0; + } + list_del(&res->list); + res->start = start = min(res->start, start); + res->end = end = max(res->end, end); + kfree(res_ins); + res_ins = res; + goto repeat; + } + + if (res_ins) + list_add(&res_ins->list, res_list); + else { + res_ins = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res_ins) + return -ENOMEM; + res_ins->start = start; + res_ins->end = end; + list_add(&res_ins->list, res_list); + } + + return 0; +} + +static int apei_res_sub(struct list_head *res_list1, + struct list_head *res_list2) +{ + struct apei_res *res1, *resn1, *res2, *res; + res1 = list_entry(res_list1->next, struct apei_res, list); + resn1 = list_entry(res1->list.next, struct apei_res, list); + while (&res1->list != res_list1) { + list_for_each_entry(res2, res_list2, list) { + if (res1->start >= res2->end || + res1->end <= res2->start) + continue; + else if (res1->end <= res2->end && + res1->start >= res2->start) { + list_del(&res1->list); + kfree(res1); + break; + } else if (res1->end > res2->end && + res1->start < res2->start) { + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + res->start = res2->end; + res->end = res1->end; + res1->end = res2->start; + list_add(&res->list, &res1->list); + resn1 = res; + } else { + if (res1->start < res2->start) + res1->end = res2->start; + else + res1->start = res2->end; + } + } + res1 = resn1; + resn1 = list_entry(resn1->list.next, struct apei_res, list); + } + + return 0; +} + +static void apei_res_clean(struct list_head *res_list) +{ + struct apei_res *res, *resn; + + list_for_each_entry_safe(res, resn, res_list, list) { + list_del(&res->list); + kfree(res); + } +} + +void apei_resources_fini(struct apei_resources *resources) +{ + apei_res_clean(&resources->iomem); + apei_res_clean(&resources->ioport); +} +EXPORT_SYMBOL_GPL(apei_resources_fini); + +static int apei_resources_merge(struct apei_resources *resources1, + struct apei_resources *resources2) +{ + int rc; + struct apei_res *res; + + list_for_each_entry(res, &resources2->iomem, list) { + rc = apei_res_add(&resources1->iomem, res->start, + res->end - res->start); + if (rc) + return rc; + } + list_for_each_entry(res, &resources2->ioport, list) { + rc = apei_res_add(&resources1->ioport, res->start, + res->end - res->start); + if (rc) + return rc; + } + + return 0; +} + +/* + * EINJ has two groups of GARs (EINJ table entry and trigger table + * entry), so common resources are subtracted from the trigger table + * resources before the second requesting. + */ +int apei_resources_sub(struct apei_resources *resources1, + struct apei_resources *resources2) +{ + int rc; + + rc = apei_res_sub(&resources1->iomem, &resources2->iomem); + if (rc) + return rc; + return apei_res_sub(&resources1->ioport, &resources2->ioport); +} +EXPORT_SYMBOL_GPL(apei_resources_sub); + +/* + * IO memory/port rersource management mechanism is used to check + * whether memory/port area used by GARs conflicts with normal memory + * or IO memory/port of devices. + */ +int apei_resources_request(struct apei_resources *resources, + const char *desc) +{ + struct apei_res *res, *res_bak; + struct resource *r; + + apei_resources_sub(resources, &apei_resources_all); + + list_for_each_entry(res, &resources->iomem, list) { + r = request_mem_region(res->start, res->end - res->start, + desc); + if (!r) { + pr_err(APEI_PFX + "Can not request iomem region <%016llx-%016llx> for GARs.\n", + (unsigned long long)res->start, + (unsigned long long)res->end); + res_bak = res; + goto err_unmap_iomem; + } + } + + list_for_each_entry(res, &resources->ioport, list) { + r = request_region(res->start, res->end - res->start, desc); + if (!r) { + pr_err(APEI_PFX + "Can not request ioport region <%016llx-%016llx> for GARs.\n", + (unsigned long long)res->start, + (unsigned long long)res->end); + res_bak = res; + goto err_unmap_ioport; + } + } + + apei_resources_merge(&apei_resources_all, resources); + + return 0; +err_unmap_ioport: + list_for_each_entry(res, &resources->ioport, list) { + if (res == res_bak) + break; + release_mem_region(res->start, res->end - res->start); + } + res_bak = NULL; +err_unmap_iomem: + list_for_each_entry(res, &resources->iomem, list) { + if (res == res_bak) + break; + release_region(res->start, res->end - res->start); + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(apei_resources_request); + +void apei_resources_release(struct apei_resources *resources) +{ + struct apei_res *res; + + list_for_each_entry(res, &resources->iomem, list) + release_mem_region(res->start, res->end - res->start); + list_for_each_entry(res, &resources->ioport, list) + release_region(res->start, res->end - res->start); + + apei_resources_sub(&apei_resources_all, resources); +} +EXPORT_SYMBOL_GPL(apei_resources_release); + +static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) +{ + u32 width, space_id; + + width = reg->bit_width; + space_id = reg->space_id; + /* Handle possible alignment issues */ + memcpy(paddr, ®->address, sizeof(*paddr)); + if (!*paddr) { + pr_warning(FW_BUG APEI_PFX + "Invalid physical address in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { + pr_warning(FW_BUG APEI_PFX + "Invalid bit width in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && + space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + pr_warning(FW_BUG APEI_PFX + "Invalid address space type in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + return 0; +} + +static int collect_res_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + struct apei_resources *resources = data; + struct acpi_generic_address *reg = &entry->register_region; + u8 ins = entry->instruction; + u64 paddr; + int rc; + + if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) + return 0; + + rc = apei_check_gar(reg, &paddr); + if (rc) + return rc; + + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return apei_res_add(&resources->iomem, paddr, + reg->bit_width / 8); + case ACPI_ADR_SPACE_SYSTEM_IO: + return apei_res_add(&resources->ioport, paddr, + reg->bit_width / 8); + default: + return -EINVAL; + } +} + +/* + * Same register may be used by multiple instructions in GARs, so + * resources are collected before requesting. + */ +int apei_exec_collect_resources(struct apei_exec_context *ctx, + struct apei_resources *resources) +{ + return apei_exec_for_each_entry(ctx, collect_res_callback, + resources, NULL); +} +EXPORT_SYMBOL_GPL(apei_exec_collect_resources); + +struct dentry *apei_get_debugfs_dir(void) +{ + static struct dentry *dapei; + + if (!dapei) + dapei = debugfs_create_dir("apei", NULL); + + return dapei; +} +EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h new file mode 100644 index 000000000000..18df1e940276 --- /dev/null +++ b/drivers/acpi/apei/apei-internal.h @@ -0,0 +1,114 @@ +/* + * apei-internal.h - ACPI Platform Error Interface internal + * definations. + */ + +#ifndef APEI_INTERNAL_H +#define APEI_INTERNAL_H + +#include <linux/cper.h> + +struct apei_exec_context; + +typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); + +#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001 + +struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +}; + +struct apei_exec_context { + u32 ip; + u64 value; + u64 var1; + u64 var2; + u64 src_base; + u64 dst_base; + struct apei_exec_ins_type *ins_table; + u32 instructions; + struct acpi_whea_header *action_table; + u32 entries; +}; + +void apei_exec_ctx_init(struct apei_exec_context *ctx, + struct apei_exec_ins_type *ins_table, + u32 instructions, + struct acpi_whea_header *action_table, + u32 entries); + +static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx, + u64 input) +{ + ctx->value = input; +} + +static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) +{ + return ctx->value; +} + +int apei_exec_run(struct apei_exec_context *ctx, u8 action); + +/* Common instruction implementation */ + +/* IP has been set in instruction function */ +#define APEI_EXEC_SET_IP 1 + +int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); +int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); +int apei_exec_read_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_read_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_write_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_write_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_noop(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_pre_map_gars(struct apei_exec_context *ctx); +int apei_exec_post_unmap_gars(struct apei_exec_context *ctx); + +struct apei_resources { + struct list_head iomem; + struct list_head ioport; +}; + +static inline void apei_resources_init(struct apei_resources *resources) +{ + INIT_LIST_HEAD(&resources->iomem); + INIT_LIST_HEAD(&resources->ioport); +} + +void apei_resources_fini(struct apei_resources *resources); +int apei_resources_sub(struct apei_resources *resources1, + struct apei_resources *resources2); +int apei_resources_request(struct apei_resources *resources, + const char *desc); +void apei_resources_release(struct apei_resources *resources); +int apei_exec_collect_resources(struct apei_exec_context *ctx, + struct apei_resources *resources); + +struct dentry; +struct dentry *apei_get_debugfs_dir(void); + +#define apei_estatus_for_each_section(estatus, section) \ + for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ + (void *)section - (void *)estatus < estatus->data_length; \ + section = (void *)(section+1) + section->error_data_length) + +static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus) +{ + if (estatus->raw_data_length) + return estatus->raw_data_offset + \ + estatus->raw_data_length; + else + return sizeof(*estatus) + estatus->data_length; +} + +int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); +int apei_estatus_check(const struct acpi_hest_generic_status *estatus); +#endif diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c new file mode 100644 index 000000000000..f4cf2fc4c8c1 --- /dev/null +++ b/drivers/acpi/apei/cper.c @@ -0,0 +1,84 @@ +/* + * UEFI Common Platform Error Record (CPER) support + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * CPER is the format used to describe platform hardware error by + * various APEI tables, such as ERST, BERT and HEST etc. + * + * For more information about CPER, please refer to Appendix N of UEFI + * Specification version 2.3. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/time.h> +#include <linux/cper.h> +#include <linux/acpi.h> + +/* + * CPER record ID need to be unique even after reboot, because record + * ID is used as index for ERST storage, while CPER records from + * multiple boot may co-exist in ERST. + */ +u64 cper_next_record_id(void) +{ + static atomic64_t seq; + + if (!atomic64_read(&seq)) + atomic64_set(&seq, ((u64)get_seconds()) << 32); + + return atomic64_inc_return(&seq); +} +EXPORT_SYMBOL_GPL(cper_next_record_id); + +int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) +{ + if (estatus->data_length && + estatus->data_length < sizeof(struct acpi_hest_generic_data)) + return -EINVAL; + if (estatus->raw_data_length && + estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_estatus_check_header); + +int apei_estatus_check(const struct acpi_hest_generic_status *estatus) +{ + struct acpi_hest_generic_data *gdata; + unsigned int data_len, gedata_len; + int rc; + + rc = apei_estatus_check_header(estatus); + if (rc) + return rc; + data_len = estatus->data_length; + gdata = (struct acpi_hest_generic_data *)(estatus + 1); + while (data_len > sizeof(*gdata)) { + gedata_len = gdata->error_data_length; + if (gedata_len > data_len - sizeof(*gdata)) + return -EINVAL; + data_len -= gedata_len + sizeof(*gdata); + } + if (data_len) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_estatus_check); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c new file mode 100644 index 000000000000..465c885938ee --- /dev/null +++ b/drivers/acpi/apei/einj.c @@ -0,0 +1,548 @@ +/* + * APEI Error INJection support + * + * EINJ provides a hardware error injection mechanism, this is useful + * for debugging and testing of other APEI and RAS features. + * + * For more information about EINJ, please refer to ACPI Specification + * version 4.0, section 17.5. + * + * Copyright 2009-2010 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/nmi.h> +#include <linux/delay.h> +#include <acpi/acpi.h> + +#include "apei-internal.h" + +#define EINJ_PFX "EINJ: " + +#define SPIN_UNIT 100 /* 100ns */ +/* Firmware should respond within 1 miliseconds */ +#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) + +/* + * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the + * EINJ table through an unpublished extension. Use with caution as + * most will ignore the parameter and make their own choice of address + * for error injection. + */ +struct einj_parameter { + u64 type; + u64 reserved1; + u64 reserved2; + u64 param1; + u64 param2; +}; + +#define EINJ_OP_BUSY 0x1 +#define EINJ_STATUS_SUCCESS 0x0 +#define EINJ_STATUS_FAIL 0x1 +#define EINJ_STATUS_INVAL 0x2 + +#define EINJ_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_einj))) + +static struct acpi_table_einj *einj_tab; + +static struct apei_resources einj_resources; + +static struct apei_exec_ins_type einj_ins_type[] = { + [ACPI_EINJ_READ_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_EINJ_READ_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_EINJ_WRITE_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_EINJ_WRITE_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_EINJ_NOOP] = { + .flags = 0, + .run = apei_exec_noop, + }, +}; + +/* + * Prevent EINJ interpreter to run simultaneously, because the + * corresponding firmware implementation may not work properly when + * invoked simultaneously. + */ +static DEFINE_MUTEX(einj_mutex); + +static struct einj_parameter *einj_param; + +static void einj_exec_ctx_init(struct apei_exec_context *ctx) +{ + apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), + EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); +} + +static int __einj_get_available_error_type(u32 *type) +{ + struct apei_exec_context ctx; + int rc; + + einj_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); + if (rc) + return rc; + *type = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +/* Get error injection capabilities of the platform */ +static int einj_get_available_error_type(u32 *type) +{ + int rc; + + mutex_lock(&einj_mutex); + rc = __einj_get_available_error_type(type); + mutex_unlock(&einj_mutex); + + return rc; +} + +static int einj_timedout(u64 *t) +{ + if ((s64)*t < SPIN_UNIT) { + pr_warning(FW_WARN EINJ_PFX + "Firmware does not respond in time\n"); + return 1; + } + *t -= SPIN_UNIT; + ndelay(SPIN_UNIT); + touch_nmi_watchdog(); + return 0; +} + +static u64 einj_get_parameter_address(void) +{ + int i; + u64 paddr = 0; + struct acpi_whea_header *entry; + + entry = EINJ_TAB_ENTRY(einj_tab); + for (i = 0; i < einj_tab->entries; i++) { + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + memcpy(&paddr, &entry->register_region.address, + sizeof(paddr)); + entry++; + } + + return paddr; +} + +/* do sanity check to trigger table */ +static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) +{ + if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) + return -EINVAL; + if (trigger_tab->table_size > PAGE_SIZE || + trigger_tab->table_size <= trigger_tab->header_size) + return -EINVAL; + if (trigger_tab->entry_count != + (trigger_tab->table_size - trigger_tab->header_size) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +/* Execute instructions in trigger error action table */ +static int __einj_error_trigger(u64 trigger_paddr) +{ + struct acpi_einj_trigger *trigger_tab = NULL; + struct apei_exec_context trigger_ctx; + struct apei_resources trigger_resources; + struct acpi_whea_header *trigger_entry; + struct resource *r; + u32 table_size; + int rc = -EIO; + + r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err(EINJ_PFX + "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", + (unsigned long long)trigger_paddr, + (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); + goto out; + } + trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); + if (!trigger_tab) { + pr_err(EINJ_PFX "Failed to map trigger table!\n"); + goto out_rel_header; + } + rc = einj_check_trigger_header(trigger_tab); + if (rc) { + pr_warning(FW_BUG EINJ_PFX + "The trigger error action table is invalid\n"); + goto out_rel_header; + } + rc = -EIO; + table_size = trigger_tab->table_size; + r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), + table_size - sizeof(*trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err(EINJ_PFX +"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", + (unsigned long long)trigger_paddr+sizeof(*trigger_tab), + (unsigned long long)trigger_paddr + table_size); + goto out_rel_header; + } + iounmap(trigger_tab); + trigger_tab = ioremap_cache(trigger_paddr, table_size); + if (!trigger_tab) { + pr_err(EINJ_PFX "Failed to map trigger table!\n"); + goto out_rel_entry; + } + trigger_entry = (struct acpi_whea_header *) + ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); + apei_resources_init(&trigger_resources); + apei_exec_ctx_init(&trigger_ctx, einj_ins_type, + ARRAY_SIZE(einj_ins_type), + trigger_entry, trigger_tab->entry_count); + rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); + if (rc) + goto out_fini; + rc = apei_resources_sub(&trigger_resources, &einj_resources); + if (rc) + goto out_fini; + rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); + if (rc) + goto out_fini; + rc = apei_exec_pre_map_gars(&trigger_ctx); + if (rc) + goto out_release; + + rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); + + apei_exec_post_unmap_gars(&trigger_ctx); +out_release: + apei_resources_release(&trigger_resources); +out_fini: + apei_resources_fini(&trigger_resources); +out_rel_entry: + release_mem_region(trigger_paddr + sizeof(*trigger_tab), + table_size - sizeof(*trigger_tab)); +out_rel_header: + release_mem_region(trigger_paddr, sizeof(*trigger_tab)); +out: + if (trigger_tab) + iounmap(trigger_tab); + + return rc; +} + +static int __einj_error_inject(u32 type, u64 param1, u64 param2) +{ + struct apei_exec_context ctx; + u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; + int rc; + + einj_exec_ctx_init(&ctx); + + rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, type); + rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); + if (rc) + return rc; + if (einj_param) { + writeq(param1, &einj_param->param1); + writeq(param2, &einj_param->param2); + } + rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!(val & EINJ_OP_BUSY)) + break; + if (einj_timedout(&timeout)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (val != EINJ_STATUS_SUCCESS) + return -EBUSY; + + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); + if (rc) + return rc; + trigger_paddr = apei_exec_ctx_get_output(&ctx); + rc = __einj_error_trigger(trigger_paddr); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); + + return rc; +} + +/* Inject the specified hardware error */ +static int einj_error_inject(u32 type, u64 param1, u64 param2) +{ + int rc; + + mutex_lock(&einj_mutex); + rc = __einj_error_inject(type, param1, param2); + mutex_unlock(&einj_mutex); + + return rc; +} + +static u32 error_type; +static u64 error_param1; +static u64 error_param2; +static struct dentry *einj_debug_dir; + +static int available_error_type_show(struct seq_file *m, void *v) +{ + int rc; + u32 available_error_type = 0; + + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (available_error_type & 0x0001) + seq_printf(m, "0x00000001\tProcessor Correctable\n"); + if (available_error_type & 0x0002) + seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n"); + if (available_error_type & 0x0004) + seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n"); + if (available_error_type & 0x0008) + seq_printf(m, "0x00000008\tMemory Correctable\n"); + if (available_error_type & 0x0010) + seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n"); + if (available_error_type & 0x0020) + seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n"); + if (available_error_type & 0x0040) + seq_printf(m, "0x00000040\tPCI Express Correctable\n"); + if (available_error_type & 0x0080) + seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n"); + if (available_error_type & 0x0100) + seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n"); + if (available_error_type & 0x0200) + seq_printf(m, "0x00000200\tPlatform Correctable\n"); + if (available_error_type & 0x0400) + seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n"); + if (available_error_type & 0x0800) + seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n"); + + return 0; +} + +static int available_error_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, available_error_type_show, NULL); +} + +static const struct file_operations available_error_type_fops = { + .open = available_error_type_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int error_type_get(void *data, u64 *val) +{ + *val = error_type; + + return 0; +} + +static int error_type_set(void *data, u64 val) +{ + int rc; + u32 available_error_type = 0; + + /* Only one error type can be specified */ + if (val & (val - 1)) + return -EINVAL; + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (!(val & available_error_type)) + return -EINVAL; + error_type = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, + error_type_set, "0x%llx\n"); + +static int error_inject_set(void *data, u64 val) +{ + if (!error_type) + return -EINVAL; + + return einj_error_inject(error_type, error_param1, error_param2); +} + +DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, + error_inject_set, "%llu\n"); + +static int einj_check_table(struct acpi_table_einj *einj_tab) +{ + if (einj_tab->header_length != sizeof(struct acpi_table_einj)) + return -EINVAL; + if (einj_tab->header.length < sizeof(struct acpi_table_einj)) + return -EINVAL; + if (einj_tab->entries != + (einj_tab->header.length - sizeof(struct acpi_table_einj)) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +static int __init einj_init(void) +{ + int rc; + u64 param_paddr; + acpi_status status; + struct dentry *fentry; + struct apei_exec_context ctx; + + if (acpi_disabled) + return -ENODEV; + + status = acpi_get_table(ACPI_SIG_EINJ, 0, + (struct acpi_table_header **)&einj_tab); + if (status == AE_NOT_FOUND) { + pr_info(EINJ_PFX "Table is not found!\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(EINJ_PFX "Failed to get table, %s\n", msg); + return -EINVAL; + } + + rc = einj_check_table(einj_tab); + if (rc) { + pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n"); + return -EINVAL; + } + + rc = -ENOMEM; + einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); + if (!einj_debug_dir) + goto err_cleanup; + fentry = debugfs_create_file("available_error_type", S_IRUSR, + einj_debug_dir, NULL, + &available_error_type_fops); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, + einj_debug_dir, NULL, &error_type_fops); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param1); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param2); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_file("error_inject", S_IWUSR, + einj_debug_dir, NULL, &error_inject_fops); + if (!fentry) + goto err_cleanup; + + apei_resources_init(&einj_resources); + einj_exec_ctx_init(&ctx); + rc = apei_exec_collect_resources(&ctx, &einj_resources); + if (rc) + goto err_fini; + rc = apei_resources_request(&einj_resources, "APEI EINJ"); + if (rc) + goto err_fini; + rc = apei_exec_pre_map_gars(&ctx); + if (rc) + goto err_release; + param_paddr = einj_get_parameter_address(); + if (param_paddr) { + einj_param = ioremap(param_paddr, sizeof(*einj_param)); + rc = -ENOMEM; + if (!einj_param) + goto err_unmap; + } + + pr_info(EINJ_PFX "Error INJection is initialized.\n"); + + return 0; + +err_unmap: + apei_exec_post_unmap_gars(&ctx); +err_release: + apei_resources_release(&einj_resources); +err_fini: + apei_resources_fini(&einj_resources); +err_cleanup: + debugfs_remove_recursive(einj_debug_dir); + + return rc; +} + +static void __exit einj_exit(void) +{ + struct apei_exec_context ctx; + + if (einj_param) + iounmap(einj_param); + einj_exec_ctx_init(&ctx); + apei_exec_post_unmap_gars(&ctx); + apei_resources_release(&einj_resources); + apei_resources_fini(&einj_resources); + debugfs_remove_recursive(einj_debug_dir); +} + +module_init(einj_init); +module_exit(einj_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Error INJection support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c new file mode 100644 index 000000000000..2ebc39115507 --- /dev/null +++ b/drivers/acpi/apei/erst.c @@ -0,0 +1,855 @@ +/* + * APEI Error Record Serialization Table support + * + * ERST is a way provided by APEI to save and retrieve hardware error + * infomation to and from a persistent store. + * + * For more information about ERST, please refer to ACPI Specification + * version 4.0, section 17.4. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/acpi.h> +#include <linux/uaccess.h> +#include <linux/cper.h> +#include <linux/nmi.h> +#include <acpi/apei.h> + +#include "apei-internal.h" + +#define ERST_PFX "ERST: " + +/* ERST command status */ +#define ERST_STATUS_SUCCESS 0x0 +#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 +#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 +#define ERST_STATUS_FAILED 0x3 +#define ERST_STATUS_RECORD_STORE_EMPTY 0x4 +#define ERST_STATUS_RECORD_NOT_FOUND 0x5 + +#define ERST_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_erst))) + +#define SPIN_UNIT 100 /* 100ns */ +/* Firmware should respond within 1 miliseconds */ +#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) +#define FIRMWARE_MAX_STALL 50 /* 50us */ + +int erst_disable; +EXPORT_SYMBOL_GPL(erst_disable); + +static struct acpi_table_erst *erst_tab; + +/* ERST Error Log Address Range atrributes */ +#define ERST_RANGE_RESERVED 0x0001 +#define ERST_RANGE_NVRAM 0x0002 +#define ERST_RANGE_SLOW 0x0004 + +/* + * ERST Error Log Address Range, used as buffer for reading/writing + * error records. + */ +static struct erst_erange { + u64 base; + u64 size; + void __iomem *vaddr; + u32 attr; +} erst_erange; + +/* + * Prevent ERST interpreter to run simultaneously, because the + * corresponding firmware implementation may not work properly when + * invoked simultaneously. + * + * It is used to provide exclusive accessing for ERST Error Log + * Address Range too. + */ +static DEFINE_SPINLOCK(erst_lock); + +static inline int erst_errno(int command_status) +{ + switch (command_status) { + case ERST_STATUS_SUCCESS: + return 0; + case ERST_STATUS_HARDWARE_NOT_AVAILABLE: + return -ENODEV; + case ERST_STATUS_NOT_ENOUGH_SPACE: + return -ENOSPC; + case ERST_STATUS_RECORD_STORE_EMPTY: + case ERST_STATUS_RECORD_NOT_FOUND: + return -ENOENT; + default: + return -EINVAL; + } +} + +static int erst_timedout(u64 *t, u64 spin_unit) +{ + if ((s64)*t < spin_unit) { + pr_warning(FW_WARN ERST_PFX + "Firmware does not respond in time\n"); + return 1; + } + *t -= spin_unit; + ndelay(spin_unit); + touch_nmi_watchdog(); + return 0; +} + +static int erst_exec_load_var1(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->var1); +} + +static int erst_exec_load_var2(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->var2); +} + +static int erst_exec_store_var1(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_write_register(entry, ctx->var1); +} + +static int erst_exec_add(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->var1 += ctx->var2; + return 0; +} + +static int erst_exec_subtract(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->var1 -= ctx->var2; + return 0; +} + +static int erst_exec_add_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + val += ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; +} + +static int erst_exec_subtract_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + val -= ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; +} + +static int erst_exec_stall(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + u64 stall_time; + + if (ctx->value > FIRMWARE_MAX_STALL) { + if (!in_nmi()) + pr_warning(FW_WARN ERST_PFX + "Too long stall time for stall instruction: %llx.\n", + ctx->value); + stall_time = FIRMWARE_MAX_STALL; + } else + stall_time = ctx->value; + udelay(stall_time); + return 0; +} + +static int erst_exec_stall_while_true(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + u64 timeout = FIRMWARE_TIMEOUT; + u64 stall_time; + + if (ctx->var1 > FIRMWARE_MAX_STALL) { + if (!in_nmi()) + pr_warning(FW_WARN ERST_PFX + "Too long stall time for stall while true instruction: %llx.\n", + ctx->var1); + stall_time = FIRMWARE_MAX_STALL; + } else + stall_time = ctx->var1; + + for (;;) { + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + if (val != ctx->value) + break; + if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC)) + return -EIO; + } + return 0; +} + +static int erst_exec_skip_next_instruction_if_true( + struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + if (val == ctx->value) { + ctx->ip += 2; + return APEI_EXEC_SET_IP; + } + + return 0; +} + +static int erst_exec_goto(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->ip = ctx->value; + return APEI_EXEC_SET_IP; +} + +static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->src_base); +} + +static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->dst_base); +} + +static int erst_exec_move_data(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 offset; + + rc = __apei_exec_read_register(entry, &offset); + if (rc) + return rc; + memmove((void *)ctx->dst_base + offset, + (void *)ctx->src_base + offset, + ctx->var2); + + return 0; +} + +static struct apei_exec_ins_type erst_ins_type[] = { + [ACPI_ERST_READ_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_ERST_READ_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_ERST_WRITE_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_ERST_WRITE_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_ERST_NOOP] = { + .flags = 0, + .run = apei_exec_noop, + }, + [ACPI_ERST_LOAD_VAR1] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var1, + }, + [ACPI_ERST_LOAD_VAR2] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var2, + }, + [ACPI_ERST_STORE_VAR1] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_store_var1, + }, + [ACPI_ERST_ADD] = { + .flags = 0, + .run = erst_exec_add, + }, + [ACPI_ERST_SUBTRACT] = { + .flags = 0, + .run = erst_exec_subtract, + }, + [ACPI_ERST_ADD_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_add_value, + }, + [ACPI_ERST_SUBTRACT_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_subtract_value, + }, + [ACPI_ERST_STALL] = { + .flags = 0, + .run = erst_exec_stall, + }, + [ACPI_ERST_STALL_WHILE_TRUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_stall_while_true, + }, + [ACPI_ERST_SKIP_NEXT_IF_TRUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_skip_next_instruction_if_true, + }, + [ACPI_ERST_GOTO] = { + .flags = 0, + .run = erst_exec_goto, + }, + [ACPI_ERST_SET_SRC_ADDRESS_BASE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_src_address_base, + }, + [ACPI_ERST_SET_DST_ADDRESS_BASE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_dst_address_base, + }, + [ACPI_ERST_MOVE_DATA] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_move_data, + }, +}; + +static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) +{ + apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), + ERST_TAB_ENTRY(erst_tab), erst_tab->entries); +} + +static int erst_get_erange(struct erst_erange *range) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); + if (rc) + return rc; + range->base = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); + if (rc) + return rc; + range->size = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); + if (rc) + return rc; + range->attr = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +static ssize_t __erst_get_record_count(void) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); + if (rc) + return rc; + return apei_exec_ctx_get_output(&ctx); +} + +ssize_t erst_get_record_count(void) +{ + ssize_t count; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + count = __erst_get_record_count(); + spin_unlock_irqrestore(&erst_lock, flags); + + return count; +} +EXPORT_SYMBOL_GPL(erst_get_record_count); + +static int __erst_get_next_record_id(u64 *record_id) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); + if (rc) + return rc; + *record_id = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +/* + * Get the record ID of an existing error record on the persistent + * storage. If there is no error record on the persistent storage, the + * returned record_id is APEI_ERST_INVALID_RECORD_ID. + */ +int erst_get_next_record_id(u64 *record_id) +{ + int rc; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(record_id); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_get_next_record_id); + +static int __erst_write_to_storage(u64 offset) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +static int __erst_read_from_storage(u64 record_id, u64 offset) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + }; + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +static int __erst_clear_from_storage(u64 record_id) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +/* NVRAM ERST Error Log Address Range is not supported yet */ +static void pr_unimpl_nvram(void) +{ + if (printk_ratelimit()) + pr_warning(ERST_PFX + "NVRAM ERST Log Address Range is not implemented yet\n"); +} + +static int __erst_write_to_nvram(const struct cper_record_header *record) +{ + /* do not print message, because printk is not safe for NMI */ + return -ENOSYS; +} + +static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset) +{ + pr_unimpl_nvram(); + return -ENOSYS; +} + +static int __erst_clear_from_nvram(u64 record_id) +{ + pr_unimpl_nvram(); + return -ENOSYS; +} + +int erst_write(const struct cper_record_header *record) +{ + int rc; + unsigned long flags; + struct cper_record_header *rcd_erange; + + if (erst_disable) + return -ENODEV; + + if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE)) + return -EINVAL; + + if (erst_erange.attr & ERST_RANGE_NVRAM) { + if (!spin_trylock_irqsave(&erst_lock, flags)) + return -EBUSY; + rc = __erst_write_to_nvram(record); + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } + + if (record->record_length > erst_erange.size) + return -EINVAL; + + if (!spin_trylock_irqsave(&erst_lock, flags)) + return -EBUSY; + memcpy(erst_erange.vaddr, record, record->record_length); + rcd_erange = erst_erange.vaddr; + /* signature for serialization system */ + memcpy(&rcd_erange->persistence_information, "ER", 2); + + rc = __erst_write_to_storage(0); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_write); + +static int __erst_read_to_erange(u64 record_id, u64 *offset) +{ + int rc; + + if (erst_erange.attr & ERST_RANGE_NVRAM) + return __erst_read_to_erange_from_nvram( + record_id, offset); + + rc = __erst_read_from_storage(record_id, 0); + if (rc) + return rc; + *offset = 0; + + return 0; +} + +static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen) +{ + int rc; + u64 offset, len = 0; + struct cper_record_header *rcd_tmp; + + rc = __erst_read_to_erange(record_id, &offset); + if (rc) + return rc; + rcd_tmp = erst_erange.vaddr + offset; + len = rcd_tmp->record_length; + if (len <= buflen) + memcpy(record, rcd_tmp, len); + + return len; +} + +/* + * If return value > buflen, the buffer size is not big enough, + * else if return value < 0, something goes wrong, + * else everything is OK, and return value is record length + */ +ssize_t erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen) +{ + ssize_t len; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); + return len; +} +EXPORT_SYMBOL_GPL(erst_read); + +/* + * If return value > buflen, the buffer size is not big enough, + * else if return value = 0, there is no more record to read, + * else if return value < 0, something goes wrong, + * else everything is OK, and return value is record length + */ +ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) +{ + int rc; + ssize_t len; + unsigned long flags; + u64 record_id; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(&record_id); + if (rc) { + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } + /* no more record */ + if (record_id == APEI_ERST_INVALID_RECORD_ID) { + spin_unlock_irqrestore(&erst_lock, flags); + return 0; + } + + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); + + return len; +} +EXPORT_SYMBOL_GPL(erst_read_next); + +int erst_clear(u64 record_id) +{ + int rc; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + if (erst_erange.attr & ERST_RANGE_NVRAM) + rc = __erst_clear_from_nvram(record_id); + else + rc = __erst_clear_from_storage(record_id); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_clear); + +static int __init setup_erst_disable(char *str) +{ + erst_disable = 1; + return 0; +} + +__setup("erst_disable", setup_erst_disable); + +static int erst_check_table(struct acpi_table_erst *erst_tab) +{ + if (erst_tab->header_length != sizeof(struct acpi_table_erst)) + return -EINVAL; + if (erst_tab->header.length < sizeof(struct acpi_table_erst)) + return -EINVAL; + if (erst_tab->entries != + (erst_tab->header.length - sizeof(struct acpi_table_erst)) / + sizeof(struct acpi_erst_entry)) + return -EINVAL; + + return 0; +} + +static int __init erst_init(void) +{ + int rc = 0; + acpi_status status; + struct apei_exec_context ctx; + struct apei_resources erst_resources; + struct resource *r; + + if (acpi_disabled) + goto err; + + if (erst_disable) { + pr_info(ERST_PFX + "Error Record Serialization Table (ERST) support is disabled.\n"); + goto err; + } + + status = acpi_get_table(ACPI_SIG_ERST, 0, + (struct acpi_table_header **)&erst_tab); + if (status == AE_NOT_FOUND) { + pr_err(ERST_PFX "Table is not found!\n"); + goto err; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(ERST_PFX "Failed to get table, %s\n", msg); + rc = -EINVAL; + goto err; + } + + rc = erst_check_table(erst_tab); + if (rc) { + pr_err(FW_BUG ERST_PFX "ERST table is invalid\n"); + goto err; + } + + apei_resources_init(&erst_resources); + erst_exec_ctx_init(&ctx); + rc = apei_exec_collect_resources(&ctx, &erst_resources); + if (rc) + goto err_fini; + rc = apei_resources_request(&erst_resources, "APEI ERST"); + if (rc) + goto err_fini; + rc = apei_exec_pre_map_gars(&ctx); + if (rc) + goto err_release; + rc = erst_get_erange(&erst_erange); + if (rc) { + if (rc == -ENODEV) + pr_info(ERST_PFX + "The corresponding hardware device or firmware implementation " + "is not available.\n"); + else + pr_err(ERST_PFX + "Failed to get Error Log Address Range.\n"); + goto err_unmap_reg; + } + + r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); + if (!r) { + pr_err(ERST_PFX + "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n", + (unsigned long long)erst_erange.base, + (unsigned long long)erst_erange.base + erst_erange.size); + rc = -EIO; + goto err_unmap_reg; + } + rc = -ENOMEM; + erst_erange.vaddr = ioremap_cache(erst_erange.base, + erst_erange.size); + if (!erst_erange.vaddr) + goto err_release_erange; + + pr_info(ERST_PFX + "Error Record Serialization Table (ERST) support is initialized.\n"); + + return 0; + +err_release_erange: + release_mem_region(erst_erange.base, erst_erange.size); +err_unmap_reg: + apei_exec_post_unmap_gars(&ctx); +err_release: + apei_resources_release(&erst_resources); +err_fini: + apei_resources_fini(&erst_resources); +err: + erst_disable = 1; + return rc; +} + +device_initcall(erst_init); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c new file mode 100644 index 000000000000..fd0cc016a099 --- /dev/null +++ b/drivers/acpi/apei/ghes.c @@ -0,0 +1,427 @@ +/* + * APEI Generic Hardware Error Source support + * + * Generic Hardware Error Source provides a way to report platform + * hardware errors (such as that from chipset). It works in so called + * "Firmware First" mode, that is, hardware errors are reported to + * firmware firstly, then reported to Linux by firmware. This way, + * some non-standard hardware error registers or non-standard hardware + * link can be checked by firmware to produce more hardware error + * information for Linux. + * + * For more information about Generic Hardware Error Source, please + * refer to ACPI Specification version 4.0, section 17.3.2.6 + * + * Now, only SCI notification type and memory errors are + * supported. More notification type and hardware error type will be + * added later. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/cper.h> +#include <linux/kdebug.h> +#include <acpi/apei.h> +#include <acpi/atomicio.h> +#include <acpi/hed.h> +#include <asm/mce.h> + +#include "apei-internal.h" + +#define GHES_PFX "GHES: " + +#define GHES_ESTATUS_MAX_SIZE 65536 + +/* + * One struct ghes is created for each generic hardware error + * source. + * + * It provides the context for APEI hardware error timer/IRQ/SCI/NMI + * handler. Handler for one generic hardware error source is only + * triggered after the previous one is done. So handler can uses + * struct ghes without locking. + * + * estatus: memory buffer for error status block, allocated during + * HEST parsing. + */ +#define GHES_TO_CLEAR 0x0001 + +struct ghes { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_status *estatus; + struct list_head list; + u64 buffer_paddr; + unsigned long flags; +}; + +/* + * Error source lists, one list for each notification method. The + * members in lists are struct ghes. + * + * The list members are only added in HEST parsing and deleted during + * module_exit, that is, single-threaded. So no lock is needed for + * that. + * + * But the mutual exclusion is needed between members adding/deleting + * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is + * used for that. + */ +static LIST_HEAD(ghes_sci); + +static struct ghes *ghes_new(struct acpi_hest_generic *generic) +{ + struct ghes *ghes; + unsigned int error_block_length; + int rc; + + ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); + if (!ghes) + return ERR_PTR(-ENOMEM); + ghes->generic = generic; + INIT_LIST_HEAD(&ghes->list); + rc = acpi_pre_map_gar(&generic->error_status_address); + if (rc) + goto err_free; + error_block_length = generic->error_block_length; + if (error_block_length > GHES_ESTATUS_MAX_SIZE) { + pr_warning(FW_WARN GHES_PFX + "Error status block length is too long: %u for " + "generic hardware error source: %d.\n", + error_block_length, generic->header.source_id); + error_block_length = GHES_ESTATUS_MAX_SIZE; + } + ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); + if (!ghes->estatus) { + rc = -ENOMEM; + goto err_unmap; + } + + return ghes; + +err_unmap: + acpi_post_unmap_gar(&generic->error_status_address); +err_free: + kfree(ghes); + return ERR_PTR(rc); +} + +static void ghes_fini(struct ghes *ghes) +{ + kfree(ghes->estatus); + acpi_post_unmap_gar(&ghes->generic->error_status_address); +} + +enum { + GHES_SER_NO = 0x0, + GHES_SER_CORRECTED = 0x1, + GHES_SER_RECOVERABLE = 0x2, + GHES_SER_PANIC = 0x3, +}; + +static inline int ghes_severity(int severity) +{ + switch (severity) { + case CPER_SER_INFORMATIONAL: + return GHES_SER_NO; + case CPER_SER_CORRECTED: + return GHES_SER_CORRECTED; + case CPER_SER_RECOVERABLE: + return GHES_SER_RECOVERABLE; + case CPER_SER_FATAL: + return GHES_SER_PANIC; + default: + /* Unkown, go panic */ + return GHES_SER_PANIC; + } +} + +/* SCI handler run in work queue, so ioremap can be used here */ +static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, + int from_phys) +{ + void *vaddr; + + vaddr = ioremap_cache(paddr, len); + if (!vaddr) + return -ENOMEM; + if (from_phys) + memcpy(buffer, vaddr, len); + else + memcpy(vaddr, buffer, len); + iounmap(vaddr); + + return 0; +} + +static int ghes_read_estatus(struct ghes *ghes, int silent) +{ + struct acpi_hest_generic *g = ghes->generic; + u64 buf_paddr; + u32 len; + int rc; + + rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); + if (rc) { + if (!silent && printk_ratelimit()) + pr_warning(FW_WARN GHES_PFX +"Failed to read error status block address for hardware error source: %d.\n", + g->header.source_id); + return -EIO; + } + if (!buf_paddr) + return -ENOENT; + + rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, + sizeof(*ghes->estatus), 1); + if (rc) + return rc; + if (!ghes->estatus->block_status) + return -ENOENT; + + ghes->buffer_paddr = buf_paddr; + ghes->flags |= GHES_TO_CLEAR; + + rc = -EIO; + len = apei_estatus_len(ghes->estatus); + if (len < sizeof(*ghes->estatus)) + goto err_read_block; + if (len > ghes->generic->error_block_length) + goto err_read_block; + if (apei_estatus_check_header(ghes->estatus)) + goto err_read_block; + rc = ghes_copy_tofrom_phys(ghes->estatus + 1, + buf_paddr + sizeof(*ghes->estatus), + len - sizeof(*ghes->estatus), 1); + if (rc) + return rc; + if (apei_estatus_check(ghes->estatus)) + goto err_read_block; + rc = 0; + +err_read_block: + if (rc && !silent) + pr_warning(FW_WARN GHES_PFX + "Failed to read error status block!\n"); + return rc; +} + +static void ghes_clear_estatus(struct ghes *ghes) +{ + ghes->estatus->block_status = 0; + if (!(ghes->flags & GHES_TO_CLEAR)) + return; + ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, + sizeof(ghes->estatus->block_status), 0); + ghes->flags &= ~GHES_TO_CLEAR; +} + +static void ghes_do_proc(struct ghes *ghes) +{ + int ser, processed = 0; + struct acpi_hest_generic_data *gdata; + + ser = ghes_severity(ghes->estatus->error_severity); + apei_estatus_for_each_section(ghes->estatus, gdata) { +#ifdef CONFIG_X86_MCE + if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, + CPER_SEC_PLATFORM_MEM)) { + apei_mce_report_mem_error( + ser == GHES_SER_CORRECTED, + (struct cper_sec_mem_err *)(gdata+1)); + processed = 1; + } +#endif + } + + if (!processed && printk_ratelimit()) + pr_warning(GHES_PFX + "Unknown error record from generic hardware error source: %d\n", + ghes->generic->header.source_id); +} + +static int ghes_proc(struct ghes *ghes) +{ + int rc; + + rc = ghes_read_estatus(ghes, 0); + if (rc) + goto out; + ghes_do_proc(ghes); + +out: + ghes_clear_estatus(ghes); + return 0; +} + +static int ghes_notify_sci(struct notifier_block *this, + unsigned long event, void *data) +{ + struct ghes *ghes; + int ret = NOTIFY_DONE; + + rcu_read_lock(); + list_for_each_entry_rcu(ghes, &ghes_sci, list) { + if (!ghes_proc(ghes)) + ret = NOTIFY_OK; + } + rcu_read_unlock(); + + return ret; +} + +static struct notifier_block ghes_notifier_sci = { + .notifier_call = ghes_notify_sci, +}; + +static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + struct acpi_hest_generic *generic; + struct ghes *ghes = NULL; + int rc = 0; + + if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) + return 0; + + generic = (struct acpi_hest_generic *)hest_hdr; + if (!generic->enabled) + return 0; + + if (generic->error_block_length < + sizeof(struct acpi_hest_generic_status)) { + pr_warning(FW_BUG GHES_PFX +"Invalid error block length: %u for generic hardware error source: %d\n", + generic->error_block_length, + generic->header.source_id); + goto err; + } + if (generic->records_to_preallocate == 0) { + pr_warning(FW_BUG GHES_PFX +"Invalid records to preallocate: %u for generic hardware error source: %d\n", + generic->records_to_preallocate, + generic->header.source_id); + goto err; + } + ghes = ghes_new(generic); + if (IS_ERR(ghes)) { + rc = PTR_ERR(ghes); + ghes = NULL; + goto err; + } + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via POLL is not supported!\n", + generic->header.source_id); + break; + case ACPI_HEST_NOTIFY_EXTERNAL: + case ACPI_HEST_NOTIFY_LOCAL: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via IRQ is not supported!\n", + generic->header.source_id); + break; + case ACPI_HEST_NOTIFY_SCI: + if (list_empty(&ghes_sci)) + register_acpi_hed_notifier(&ghes_notifier_sci); + list_add_rcu(&ghes->list, &ghes_sci); + break; + case ACPI_HEST_NOTIFY_NMI: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via NMI is not supported!\n", + generic->header.source_id); + break; + default: + pr_warning(FW_WARN GHES_PFX + "Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); + break; + } + + return 0; +err: + if (ghes) + ghes_fini(ghes); + return rc; +} + +static void ghes_cleanup(void) +{ + struct ghes *ghes, *nghes; + + if (!list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + + synchronize_rcu(); + + list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) { + list_del(&ghes->list); + ghes_fini(ghes); + kfree(ghes); + } +} + +static int __init ghes_init(void) +{ + int rc; + + if (acpi_disabled) + return -ENODEV; + + if (hest_disable) { + pr_info(GHES_PFX "HEST is not enabled!\n"); + return -EINVAL; + } + + rc = apei_hest_parse(hest_ghes_parse, NULL); + if (rc) { + pr_err(GHES_PFX + "Error during parsing HEST generic hardware error sources.\n"); + goto err_cleanup; + } + + if (list_empty(&ghes_sci)) { + pr_info(GHES_PFX + "No functional generic hardware error sources.\n"); + rc = -ENODEV; + goto err_cleanup; + } + + pr_info(GHES_PFX + "Generic Hardware Error Source support is initialized.\n"); + + return 0; +err_cleanup: + ghes_cleanup(); + return rc; +} + +static void __exit ghes_exit(void) +{ + ghes_cleanup(); +} + +module_init(ghes_init); +module_exit(ghes_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c new file mode 100644 index 000000000000..e7f40d362cb3 --- /dev/null +++ b/drivers/acpi/apei/hest.c @@ -0,0 +1,173 @@ +/* + * APEI Hardware Error Souce Table support + * + * HEST describes error sources in detail; communicates operational + * parameters (i.e. severity levels, masking bits, and threshold + * values) to Linux as necessary. It also allows the BIOS to report + * non-standard error sources to Linux (for example, chipset-specific + * error registers). + * + * For more information about HEST, please refer to ACPI Specification + * version 4.0, section 17.3.2. + * + * Copyright 2009 Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/kdebug.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <acpi/apei.h> + +#include "apei-internal.h" + +#define HEST_PFX "HEST: " + +int hest_disable; +EXPORT_SYMBOL_GPL(hest_disable); + +/* HEST table parsing */ + +static struct acpi_table_hest *hest_tab; + +static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + return 0; +} + +static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { + [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ + [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, + [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), + [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), + [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), + [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), + [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), +}; + +static int hest_esrc_len(struct acpi_hest_header *hest_hdr) +{ + u16 hest_type = hest_hdr->type; + int len; + + if (hest_type >= ACPI_HEST_TYPE_RESERVED) + return 0; + + len = hest_esrc_len_tab[hest_type]; + + if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) { + struct acpi_hest_ia_corrected *cmc; + cmc = (struct acpi_hest_ia_corrected *)hest_hdr; + len = sizeof(*cmc) + cmc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); + } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { + struct acpi_hest_ia_machine_check *mc; + mc = (struct acpi_hest_ia_machine_check *)hest_hdr; + len = sizeof(*mc) + mc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); + } + BUG_ON(len == -1); + + return len; +}; + +int apei_hest_parse(apei_hest_func_t func, void *data) +{ + struct acpi_hest_header *hest_hdr; + int i, rc, len; + + if (hest_disable) + return -EINVAL; + + hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); + for (i = 0; i < hest_tab->error_source_count; i++) { + len = hest_esrc_len(hest_hdr); + if (!len) { + pr_warning(FW_WARN HEST_PFX + "Unknown or unused hardware error source " + "type: %d for hardware error source: %d.\n", + hest_hdr->type, hest_hdr->source_id); + return -EINVAL; + } + if ((void *)hest_hdr + len > + (void *)hest_tab + hest_tab->header.length) { + pr_warning(FW_BUG HEST_PFX + "Table contents overflow for hardware error source: %d.\n", + hest_hdr->source_id); + return -EINVAL; + } + + rc = func(hest_hdr, data); + if (rc) + return rc; + + hest_hdr = (void *)hest_hdr + len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_hest_parse); + +static int __init setup_hest_disable(char *str) +{ + hest_disable = 1; + return 0; +} + +__setup("hest_disable", setup_hest_disable); + +static int __init hest_init(void) +{ + acpi_status status; + int rc = -ENODEV; + + if (acpi_disabled) + goto err; + + if (hest_disable) { + pr_info(HEST_PFX "HEST tabling parsing is disabled.\n"); + goto err; + } + + status = acpi_get_table(ACPI_SIG_HEST, 0, + (struct acpi_table_header **)&hest_tab); + if (status == AE_NOT_FOUND) { + pr_info(HEST_PFX "Table is not found!\n"); + goto err; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(HEST_PFX "Failed to get table, %s\n", msg); + rc = -EINVAL; + goto err; + } + + rc = apei_hest_parse(hest_void_parse, NULL); + if (rc) + goto err; + + pr_info(HEST_PFX "HEST table parsing is initialized.\n"); + + return 0; +err: + hest_disable = 1; + return rc; +} + +subsys_initcall(hest_init); diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c new file mode 100644 index 000000000000..814b19249616 --- /dev/null +++ b/drivers/acpi/atomicio.c @@ -0,0 +1,360 @@ +/* + * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then + * accessing in atomic context. + * + * This is used for NMI handler to access IO memory area, because + * ioremap/iounmap can not be used in NMI handler. The IO memory area + * is pre-mapped in process context and accessed in NMI handler. + * + * Copyright (C) 2009-2010, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/io.h> +#include <linux/kref.h> +#include <linux/rculist.h> +#include <linux/interrupt.h> +#include <acpi/atomicio.h> + +#define ACPI_PFX "ACPI: " + +static LIST_HEAD(acpi_iomaps); +/* + * Used for mutual exclusion between writers of acpi_iomaps list, for + * synchronization between readers and writer, RCU is used. + */ +static DEFINE_SPINLOCK(acpi_iomaps_lock); + +struct acpi_iomap { + struct list_head list; + void __iomem *vaddr; + unsigned long size; + phys_addr_t paddr; + struct kref ref; +}; + +/* acpi_iomaps_lock or RCU read lock must be held before calling */ +static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + list_for_each_entry_rcu(map, &acpi_iomaps, list) { + if (map->paddr + map->size >= paddr + size && + map->paddr <= paddr) + return map; + } + return NULL; +} + +/* + * Atomic "ioremap" used by NMI handler, if the specified IO memory + * area is not pre-mapped, NULL will be returned. + * + * acpi_iomaps_lock or RCU read lock must be held before calling + */ +static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + map = __acpi_find_iomap(paddr, size); + if (map) + return map->vaddr + (paddr - map->paddr); + else + return NULL; +} + +/* acpi_iomaps_lock must be held before calling */ +static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + map = __acpi_find_iomap(paddr, size); + if (map) { + kref_get(&map->ref); + return map->vaddr + (paddr - map->paddr); + } else + return NULL; +} + +/* + * Used to pre-map the specified IO memory area. First try to find + * whether the area is already pre-mapped, if it is, increase the + * reference count (in __acpi_try_ioremap) and return; otherwise, do + * the real ioremap, and add the mapping into acpi_iomaps list. + */ +static void __iomem *acpi_pre_map(phys_addr_t paddr, + unsigned long size) +{ + void __iomem *vaddr; + struct acpi_iomap *map; + unsigned long pg_sz, flags; + phys_addr_t pg_off; + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + vaddr = __acpi_try_ioremap(paddr, size); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + if (vaddr) + return vaddr; + + pg_off = paddr & PAGE_MASK; + pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; + vaddr = ioremap(pg_off, pg_sz); + if (!vaddr) + return NULL; + map = kmalloc(sizeof(*map), GFP_KERNEL); + if (!map) + goto err_unmap; + INIT_LIST_HEAD(&map->list); + map->paddr = pg_off; + map->size = pg_sz; + map->vaddr = vaddr; + kref_init(&map->ref); + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + vaddr = __acpi_try_ioremap(paddr, size); + if (vaddr) { + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + iounmap(map->vaddr); + kfree(map); + return vaddr; + } + list_add_tail_rcu(&map->list, &acpi_iomaps); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + + return vaddr + (paddr - pg_off); +err_unmap: + iounmap(vaddr); + return NULL; +} + +/* acpi_iomaps_lock must be held before calling */ +static void __acpi_kref_del_iomap(struct kref *ref) +{ + struct acpi_iomap *map; + + map = container_of(ref, struct acpi_iomap, ref); + list_del_rcu(&map->list); +} + +/* + * Used to post-unmap the specified IO memory area. The iounmap is + * done only if the reference count goes zero. + */ +static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) +{ + struct acpi_iomap *map; + unsigned long flags; + int del; + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + map = __acpi_find_iomap(paddr, size); + BUG_ON(!map); + del = kref_put(&map->ref, __acpi_kref_del_iomap); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + + if (!del) + return; + + synchronize_rcu(); + iounmap(map->vaddr); + kfree(map); +} + +/* In NMI handler, should set silent = 1 */ +static int acpi_check_gar(struct acpi_generic_address *reg, + u64 *paddr, int silent) +{ + u32 width, space_id; + + width = reg->bit_width; + space_id = reg->space_id; + /* Handle possible alignment issues */ + memcpy(paddr, ®->address, sizeof(*paddr)); + if (!*paddr) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid physical address in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid bit width in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && + space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid address space type in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + return 0; +} + +/* Pre-map, working on GAR */ +int acpi_pre_map_gar(struct acpi_generic_address *reg) +{ + u64 paddr; + void __iomem *vaddr; + int rc; + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return 0; + + rc = acpi_check_gar(reg, &paddr, 0); + if (rc) + return rc; + + vaddr = acpi_pre_map(paddr, reg->bit_width / 8); + if (!vaddr) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_pre_map_gar); + +/* Post-unmap, working on GAR */ +int acpi_post_unmap_gar(struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return 0; + + rc = acpi_check_gar(reg, &paddr, 0); + if (rc) + return rc; + + acpi_post_unmap(paddr, reg->bit_width / 8); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); + +/* + * Can be used in atomic (including NMI) or process context. RCU read + * lock can only be released after the IO memory area accessing. + */ +static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) +{ + void __iomem *addr; + + rcu_read_lock(); + addr = __acpi_ioremap_fast(paddr, width); + switch (width) { + case 8: + *val = readb(addr); + break; + case 16: + *val = readw(addr); + break; + case 32: + *val = readl(addr); + break; + case 64: + *val = readq(addr); + break; + default: + return -EINVAL; + } + rcu_read_unlock(); + + return 0; +} + +static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) +{ + void __iomem *addr; + + rcu_read_lock(); + addr = __acpi_ioremap_fast(paddr, width); + switch (width) { + case 8: + writeb(val, addr); + break; + case 16: + writew(val, addr); + break; + case 32: + writel(val, addr); + break; + case 64: + writeq(val, addr); + break; + default: + return -EINVAL; + } + rcu_read_unlock(); + + return 0; +} + +/* GAR accessing in atomic (including NMI) or process context */ +int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + rc = acpi_check_gar(reg, &paddr, 1); + if (rc) + return rc; + + *val = 0; + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return acpi_atomic_read_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(acpi_atomic_read); + +int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + rc = acpi_check_gar(reg, &paddr, 1); + if (rc) + return rc; + + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return acpi_atomic_write_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_write_port(paddr, val, reg->bit_width); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(acpi_atomic_write); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 9042a8579668..c1d23cd71652 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -401,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle, printk("\n"); } -static u8 hex_val(unsigned char c) -{ - return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; -} - static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) { int i; @@ -422,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) return AE_BAD_PARAMETER; } for (i = 0; i < 16; i++) { - uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4; - uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]); + uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4; + uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]); } return AE_OK; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f2234db85da0..e61d4f8e62a5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void) /* Don't trust ECDT, which comes from ASUSTek */ if (!EC_FLAGS_VALIDATE_ECDT) goto install; - saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); + saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) return -ENOMEM; - memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); /* fall through */ } diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c new file mode 100644 index 000000000000..d0c1967f7597 --- /dev/null +++ b/drivers/acpi/hed.c @@ -0,0 +1,112 @@ +/* + * ACPI Hardware Error Device (PNP0C33) Driver + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * ACPI Hardware Error Device is used to report some hardware errors + * notified via SCI, mainly the corrected errors. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> +#include <acpi/hed.h> + +static struct acpi_device_id acpi_hed_ids[] = { + {"PNP0C33", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, acpi_hed_ids); + +static acpi_handle hed_handle; + +static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list); + +int register_acpi_hed_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_hed_notify_list, nb); +} +EXPORT_SYMBOL_GPL(register_acpi_hed_notifier); + +void unregister_acpi_hed_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb); +} +EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier); + +/* + * SCI to report hardware error is forwarded to the listeners of HED, + * it is used by HEST Generic Hardware Error Source with notify type + * SCI. + */ +static void acpi_hed_notify(struct acpi_device *device, u32 event) +{ + blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL); +} + +static int __devinit acpi_hed_add(struct acpi_device *device) +{ + /* Only one hardware error device */ + if (hed_handle) + return -EINVAL; + hed_handle = device->handle; + return 0; +} + +static int __devexit acpi_hed_remove(struct acpi_device *device, int type) +{ + hed_handle = NULL; + return 0; +} + +static struct acpi_driver acpi_hed_driver = { + .name = "hardware_error_device", + .class = "hardware_error", + .ids = acpi_hed_ids, + .ops = { + .add = acpi_hed_add, + .remove = acpi_hed_remove, + .notify = acpi_hed_notify, + }, +}; + +static int __init acpi_hed_init(void) +{ + if (acpi_disabled) + return -ENODEV; + + if (acpi_bus_register_driver(&acpi_hed_driver) < 0) + return -ENODEV; + + return 0; +} + +static void __exit acpi_hed_exit(void) +{ + acpi_bus_unregister_driver(&acpi_hed_driver); +} + +module_init(acpi_hed_init); +module_exit(acpi_hed_exit); + +ACPI_MODULE_NAME("hed"); +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c deleted file mode 100644 index 1c527a192872..000000000000 --- a/drivers/acpi/hest.c +++ /dev/null @@ -1,139 +0,0 @@ -#include <linux/acpi.h> -#include <linux/pci.h> - -#define PREFIX "ACPI: " - -static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p) -{ - return sizeof(*p) + - (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); -} - -static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p) -{ - return sizeof(*p) + - (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); -} - -static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p) -{ - return sizeof(*p); -} - -static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p) -{ - return sizeof(*p); -} - -static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci) -{ - return (0 == pci_domain_nr(pci->bus) && - p->bus == pci->bus->number && - p->device == PCI_SLOT(pci->devfn) && - p->function == PCI_FUNC(pci->devfn)); -} - -static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first) -{ - struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header); - unsigned long rc=0; - u8 pcie_type = 0; - u8 bridge = 0; - switch (type) { - case ACPI_HEST_TYPE_AER_ROOT_PORT: - rc = sizeof(struct acpi_hest_aer_root); - pcie_type = PCI_EXP_TYPE_ROOT_PORT; - break; - case ACPI_HEST_TYPE_AER_ENDPOINT: - rc = sizeof(struct acpi_hest_aer); - pcie_type = PCI_EXP_TYPE_ENDPOINT; - break; - case ACPI_HEST_TYPE_AER_BRIDGE: - rc = sizeof(struct acpi_hest_aer_bridge); - if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE) - bridge = 1; - break; - } - - if (p->flags & ACPI_HEST_GLOBAL) { - if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge) - *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); - } - else - if (hest_match_pci(p, pci)) - *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); - return rc; -} - -static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci) -{ - struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader; - void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */ - struct acpi_hest_header *hdr = p; - - int i; - int firmware_first = 0; - static unsigned char printed_unused = 0; - static unsigned char printed_reserved = 0; - - for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) { - switch (hdr->type) { - case ACPI_HEST_TYPE_IA32_CHECK: - p += parse_acpi_hest_ia_machine_check(p); - break; - case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK: - p += parse_acpi_hest_ia_corrected(p); - break; - case ACPI_HEST_TYPE_IA32_NMI: - p += parse_acpi_hest_ia_nmi(p); - break; - /* These three should never appear */ - case ACPI_HEST_TYPE_NOT_USED3: - case ACPI_HEST_TYPE_NOT_USED4: - case ACPI_HEST_TYPE_NOT_USED5: - if (!printed_unused) { - printk(KERN_DEBUG PREFIX - "HEST Error Source list contains an obsolete type (%d).\n", hdr->type); - printed_unused = 1; - } - break; - case ACPI_HEST_TYPE_AER_ROOT_PORT: - case ACPI_HEST_TYPE_AER_ENDPOINT: - case ACPI_HEST_TYPE_AER_BRIDGE: - p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first); - break; - case ACPI_HEST_TYPE_GENERIC_ERROR: - p += parse_acpi_hest_generic(p); - break; - /* These should never appear either */ - case ACPI_HEST_TYPE_RESERVED: - default: - if (!printed_reserved) { - printk(KERN_DEBUG PREFIX - "HEST Error Source list contains a reserved type (%d).\n", hdr->type); - printed_reserved = 1; - } - break; - } - } - return firmware_first; -} - -int acpi_hest_firmware_first_pci(struct pci_dev *pci) -{ - acpi_status status = AE_NOT_FOUND; - struct acpi_table_header *hest = NULL; - - if (acpi_disabled) - return 0; - - status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); - - if (ACPI_SUCCESS(status)) { - if (acpi_hest_firmware_first(hest, pci)) { - return 1; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 4bc1c4178f50..78418ce4fc78 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, EXPORT_SYMBOL(acpi_check_mem_region); /* + * Let drivers know whether the resource checks are effective + */ +int acpi_resources_are_enforced(void) +{ + return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; +} +EXPORT_SYMBOL(acpi_resources_are_enforced); + +/* * Acquire a spinlock. * * handle is a pointer to the spinlock_t. diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index aefce33f2a09..4eac59393edc 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) struct acpi_pci_root *root; list_for_each_entry(root, &acpi_pci_roots, node) - if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) + if ((root->segment == (u16) seg) && + (root->secondary.start == (u16) bus)) return root->device->handle; return NULL; } @@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge); static acpi_status get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) { - int *busnr = data; + struct resource *res = data; struct acpi_resource_address64 address; if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && @@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) acpi_resource_to_address64(resource, &address); if ((address.address_length > 0) && - (address.resource_type == ACPI_BUS_NUMBER_RANGE)) - *busnr = address.minimum; + (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { + res->start = address.minimum; + res->end = address.minimum + address.address_length - 1; + } return AE_OK; } static acpi_status try_get_root_bridge_busnr(acpi_handle handle, - unsigned long long *bus) + struct resource *res) { acpi_status status; - int busnum; - busnum = -1; + res->start = -1; status = acpi_walk_resources(handle, METHOD_NAME__CRS, - get_root_bridge_busnr_callback, &busnum); + get_root_bridge_busnr_callback, res); if (ACPI_FAILURE(status)) return status; - /* Check if we really get a bus number from _CRS */ - if (busnum == -1) + if (res->start == -1) return AE_ERROR; - *bus = busnum; return AE_OK; } @@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) struct acpi_device *child; u32 flags, base_flags; + root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); + if (!root) + return -ENOMEM; + segment = 0; status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, &segment); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); - return -ENODEV; + result = -ENODEV; + goto end; } /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ - bus = 0; - status = try_get_root_bridge_busnr(device->handle, &bus); + root->secondary.flags = IORESOURCE_BUS; + status = try_get_root_bridge_busnr(device->handle, &root->secondary); if (ACPI_FAILURE(status)) { + /* + * We need both the start and end of the downstream bus range + * to interpret _CBA (MMCONFIG base address), so it really is + * supposed to be in _CRS. If we don't find it there, all we + * can do is assume [_BBN-0xFF] or [0-0xFF]. + */ + root->secondary.end = 0xFF; + printk(KERN_WARNING FW_BUG PREFIX + "no secondary bus range in _CRS\n"); status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk(KERN_ERR PREFIX - "no bus number in _CRS and can't evaluate _BBN\n"); - return -ENODEV; + if (ACPI_SUCCESS(status)) + root->secondary.start = bus; + else if (status == AE_NOT_FOUND) + root->secondary.start = 0; + else { + printk(KERN_ERR PREFIX "can't evaluate _BBN\n"); + result = -ENODEV; + goto end; } } - root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); - if (!root) - return -ENOMEM; - INIT_LIST_HEAD(&root->node); root->device = device; root->segment = segment & 0xFFFF; - root->bus_nr = bus & 0xFF; strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; @@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) /* TBD: Locking */ list_add_tail(&root->node, &acpi_pci_roots); - printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", + printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), - root->segment, root->bus_nr); + root->segment, &root->secondary); /* * Scan the Root Bridge @@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ - root->bus = pci_acpi_scan_root(device, segment, bus); + root->bus = pci_acpi_scan_root(root); if (!root->bus) { printk(KERN_ERR PREFIX "Bus %04x:%02x not present in PCI namespace\n", - root->segment, root->bus_nr); + root->segment, (unsigned int)root->secondary.start); result = -ENODEV; goto end; } diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 5675d9747e87..b1034a9ada4e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) acpi_processor_get_limit_info(pr); - acpi_processor_power_init(pr, device); + if (cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr, device); pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); @@ -920,9 +921,14 @@ static int __init acpi_processor_init(void) if (!acpi_processor_dir) return -ENOMEM; #endif - result = cpuidle_register_driver(&acpi_idle_driver); - if (result < 0) - goto out_proc; + + if (!cpuidle_register_driver(&acpi_idle_driver)) { + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); + } else { + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", + cpuidle_get_driver()->name); + } result = acpi_bus_register_driver(&acpi_processor_driver); if (result < 0) @@ -941,7 +947,6 @@ static int __init acpi_processor_init(void) out_cpuidle: cpuidle_unregister_driver(&acpi_idle_driver); -out_proc: #ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); #endif diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c3817e1f32c7..2e8c27d48f2b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) break; } - if (pr->power.states[i].promotion.state) - seq_printf(seq, "promotion[C%zd] ", - (pr->power.states[i].promotion.state - - pr->power.states)); - else - seq_puts(seq, "promotion[--] "); - - if (pr->power.states[i].demotion.state) - seq_printf(seq, "demotion[C%zd] ", - (pr->power.states[i].demotion.state - - pr->power.states)); - else - seq_puts(seq, "demotion[--] "); + seq_puts(seq, "promotion[--] "); + + seq_puts(seq, "demotion[--] "); seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", pr->power.states[i].latency, @@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; + s64 idle_time_ns; s64 idle_time; s64 sleep_ticks = 0; @@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, return(acpi_idle_enter_c1(dev, state)); local_irq_disable(); + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, * NEED_RESCHED: */ smp_mb(); - } - if (unlikely(need_resched())) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return 0; + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } } /* @@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); + idle_time = idle_time_ns; + do_div(idle_time, NSEC_PER_USEC); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); - current_thread_info()->status |= TS_POLLING; + if (cx->entry_method != ACPI_CSTATE_FFH) + current_thread_info()->status |= TS_POLLING; cx->usage++; @@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; + s64 idle_time_ns; s64 idle_time; s64 sleep_ticks = 0; @@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, } local_irq_disable(); + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, * NEED_RESCHED: */ smp_mb(); - } - if (unlikely(need_resched())) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return 0; + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } } acpi_unlazy_tlb(smp_processor_id()); @@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, spin_unlock(&c3_lock); } kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time = idle_time_ns; + do_div(idle_time, NSEC_PER_USEC); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); - current_thread_info()->status |= TS_POLLING; + if (cx->entry_method != ACPI_CSTATE_FFH) + current_thread_info()->status |= TS_POLLING; cx->usage++; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index baa76bbf244a..4ab2275b4461 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; -/* - * According to the ACPI specification the BIOS should make sure that ACPI is - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI - * on such systems during resume. Unfortunately that doesn't help in - * particularly pathological cases in which SCI_EN has to be set directly on - * resume, although the specification states very clearly that this flag is - * owned by the hardware. The set_sci_en_on_resume variable will be set in such - * cases. - */ -static bool set_sci_en_on_resume; - -void __init acpi_set_sci_en_on_resume(void) -{ - set_sci_en_on_resume = true; -} /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the @@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) break; } - /* If ACPI is not enabled by the BIOS, we need to enable it here. */ - if (set_sci_en_on_resume) - acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); - else - acpi_enable(); + /* This violates the spec but is required for bug compatibility. */ + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(acpi_state); @@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) return 0; } -static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d) -{ - set_sci_en_on_resume = true; - return 0; -} - static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, @@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { - .callback = init_set_sci_en_on_resume, - .ident = "Apple MacBook 1,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Apple MacMini 1,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), - }, - }, - { .callback = init_old_suspend_ordering, .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", .matches = { @@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { - .callback = init_set_sci_en_on_resume, - .ident = "Toshiba Satellite L300", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard HP G7000 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Pavilion dv4", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Pavilion dv7", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad T410", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad T510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad W510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad X201[s]", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), - }, - }, - { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { @@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1558", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1557", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1555", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"), - }, - }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 8a8f3b3382a6..25b8bd149284 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -1,6 +1,6 @@ extern u8 sleep_states[]; -extern int acpi_suspend (u32 state); +extern int acpi_suspend(u32 state); extern void acpi_enable_wakeup_device_prep(u8 sleep_state); extern void acpi_enable_wakeup_device(u8 sleep_state); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8a0ed2800e63..f336bca7c450 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, unsigned long table_end; acpi_size tbl_size; - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return -ENODEV; if (!handler) @@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) struct acpi_table_header *table = NULL; acpi_size tbl_size; - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return -ENODEV; if (!handler) diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a0c93b321482..9865d46f49a8 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -45,6 +45,7 @@ #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/suspend.h> +#include <acpi/video.h> #define PREFIX "ACPI: " @@ -65,11 +66,6 @@ #define MAX_NAME_LEN 20 -#define ACPI_VIDEO_DISPLAY_CRT 1 -#define ACPI_VIDEO_DISPLAY_TV 2 -#define ACPI_VIDEO_DISPLAY_DVI 3 -#define ACPI_VIDEO_DISPLAY_LCD 4 - #define _COMPONENT ACPI_VIDEO_COMPONENT ACPI_MODULE_NAME("video"); @@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) result = acpi_video_init_brightness(device); if (result) return; - name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; + count++; - sprintf(name, "acpi_video%d", count++); memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = device->brightness->count - 3; device->backlight = backlight_device_register(name, NULL, device, @@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (device->cap._DCS && device->cap._DSS) { static int count; char *name; - name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; - sprintf(name, "acpi_video%d", count++); + count++; device->output_dev = video_output_register(name, NULL, device, &acpi_output_properties); kfree(name); @@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id } static int +acpi_video_get_device_type(struct acpi_video_bus *video, + unsigned long device_id) +{ + struct acpi_video_enumerated_device *ids; + int i; + + for (i = 0; i < video->attached_count; i++) { + ids = &video->attached_array[i]; + if ((ids->value.int_val & 0xffff) == device_id) + return ids->value.int_val; + } + + return 0; +} + +static int acpi_video_bus_get_one_device(struct acpi_device *device, struct acpi_video_bus *video) { unsigned long long device_id; - int status; + int status, device_type; struct acpi_video_device *data; struct acpi_video_device_attrib* attribute; @@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device, } if(attribute->bios_can_detect) data->flags.bios = 1; - } else - data->flags.unknown = 1; + } else { + /* Check for legacy IDs */ + device_type = acpi_video_get_device_type(video, + device_id); + /* Ignore bits 16 and 18-20 */ + switch (device_type & 0xffe2ffff) { + case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR: + data->flags.crt = 1; + break; + case ACPI_VIDEO_DISPLAY_LEGACY_PANEL: + data->flags.lcd = 1; + break; + case ACPI_VIDEO_DISPLAY_LEGACY_TV: + data->flags.tvout = 1; + break; + default: + data->flags.unknown = 1; + } + } acpi_video_device_bind(video, data); acpi_video_device_find_cap(data); @@ -2032,6 +2061,71 @@ out: return result; } +int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, + void **edid) +{ + struct acpi_video_bus *video; + struct acpi_video_device *video_device; + union acpi_object *buffer = NULL; + acpi_status status; + int i, length; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + video = acpi_driver_data(device); + + for (i = 0; i < video->attached_count; i++) { + video_device = video->attached_array[i].bind_info; + length = 256; + + if (!video_device) + continue; + + if (type) { + switch (type) { + case ACPI_VIDEO_DISPLAY_CRT: + if (!video_device->flags.crt) + continue; + break; + case ACPI_VIDEO_DISPLAY_TV: + if (!video_device->flags.tvout) + continue; + break; + case ACPI_VIDEO_DISPLAY_DVI: + if (!video_device->flags.dvi) + continue; + break; + case ACPI_VIDEO_DISPLAY_LCD: + if (!video_device->flags.lcd) + continue; + break; + } + } else if (video_device->device_id != device_id) { + continue; + } + + status = acpi_video_device_EDID(video_device, &buffer, length); + + if (ACPI_FAILURE(status) || !buffer || + buffer->type != ACPI_TYPE_BUFFER) { + length = 128; + status = acpi_video_device_EDID(video_device, &buffer, + length); + if (ACPI_FAILURE(status) || !buffer || + buffer->type != ACPI_TYPE_BUFFER) { + continue; + } + } + + *edid = buffer->buffer.pointer; + return length; + } + + return -ENODEV; +} +EXPORT_SYMBOL(acpi_video_get_edid); + static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index fc2f26b9b407..c5fef01b3c95 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str) ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; if (!strcmp("video", str)) acpi_video_support |= - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; } return 1; } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e68541f662b9..73f883333a0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -57,6 +57,8 @@ config SATA_PMP This option adds support for SATA Port Multipliers (the SATA version of an ethernet hub, or SAS expander). +comment "Controllers with non-SFF native interface" + config SATA_AHCI tristate "AHCI SATA support" depends on PCI @@ -73,11 +75,12 @@ config SATA_AHCI_PLATFORM If unsure, say N. -config SATA_SIL24 - tristate "Silicon Image 3124/3132 SATA support" - depends on PCI +config SATA_FSL + tristate "Freescale 3.0Gbps SATA support" + depends on FSL_SOC help - This option enables support for Silicon Image 3124/3132 Serial ATA. + This option enables support for Freescale 3.0Gbps SATA controller. + It can be found on MPC837x and MPC8315. If unsure, say N. @@ -87,12 +90,11 @@ config SATA_INIC162X help This option enables support for Initio 162x Serial ATA. -config SATA_FSL - tristate "Freescale 3.0Gbps SATA support" - depends on FSL_SOC +config SATA_SIL24 + tristate "Silicon Image 3124/3132 SATA support" + depends on PCI help - This option enables support for Freescale 3.0Gbps SATA controller. - It can be found on MPC837x and MPC8315. + This option enables support for Silicon Image 3124/3132 Serial ATA. If unsure, say N. @@ -116,15 +118,65 @@ config ATA_SFF if ATA_SFF -config SATA_SVW - tristate "ServerWorks Frodo / Apple K2 SATA support" +comment "SFF controllers with custom DMA interface" + +config PDC_ADMA + tristate "Pacific Digital ADMA support" depends on PCI help - This option enables support for Broadcom/Serverworks/Apple K2 - SATA support. + This option enables support for Pacific Digital ADMA controllers + + If unsure, say N. + +config PATA_MPC52xx + tristate "Freescale MPC52xx SoC internal IDE" + depends on PPC_MPC52xx && PPC_BESTCOMM + select PPC_BESTCOMM_ATA + help + This option enables support for integrated IDE controller + of the Freescale MPC52xx SoC. + + If unsure, say N. + +config PATA_OCTEON_CF + tristate "OCTEON Boot Bus Compact Flash support" + depends on CPU_CAVIUM_OCTEON + help + This option enables a polled compact flash driver for use with + compact flash cards attached to the OCTEON boot bus. + + If unsure, say N. + +config SATA_QSTOR + tristate "Pacific Digital SATA QStor support" + depends on PCI + help + This option enables support for Pacific Digital Serial ATA QStor. + + If unsure, say N. + +config SATA_SX4 + tristate "Promise SATA SX4 support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for Promise Serial ATA SX4. If unsure, say N. +config ATA_BMDMA + bool "ATA BMDMA support" + default y + help + This option adds support for SFF ATA controllers with BMDMA + capability. BMDMA stands for bus-master DMA and the + de-facto DMA interface for SFF controllers. + + If unuser, say Y. + +if ATA_BMDMA + +comment "SATA SFF controllers with BMDMA" + config ATA_PIIX tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" depends on PCI @@ -152,22 +204,6 @@ config SATA_NV If unsure, say N. -config PDC_ADMA - tristate "Pacific Digital ADMA support" - depends on PCI - help - This option enables support for Pacific Digital ADMA controllers - - If unsure, say N. - -config SATA_QSTOR - tristate "Pacific Digital SATA QStor support" - depends on PCI - help - This option enables support for Pacific Digital Serial ATA QStor. - - If unsure, say N. - config SATA_PROMISE tristate "Promise SATA TX2/TX4 support" depends on PCI @@ -176,14 +212,6 @@ config SATA_PROMISE If unsure, say N. -config SATA_SX4 - tristate "Promise SATA SX4 support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for Promise Serial ATA SX4. - - If unsure, say N. - config SATA_SIL tristate "Silicon Image SATA support" depends on PCI @@ -203,6 +231,15 @@ config SATA_SIS enable the PATA_SIS driver in the config. If unsure, say N. +config SATA_SVW + tristate "ServerWorks Frodo / Apple K2 SATA support" + depends on PCI + help + This option enables support for Broadcom/Serverworks/Apple K2 + SATA support. + + If unsure, say N. + config SATA_ULI tristate "ULi Electronics SATA support" depends on PCI @@ -227,14 +264,7 @@ config SATA_VITESSE If unsure, say N. -config PATA_ACPI - tristate "ACPI firmware driver for PATA" - depends on ATA_ACPI - help - This option enables an ACPI method driver which drives - motherboard PATA controller interfaces through the ACPI - firmware in the BIOS. This driver can sometimes handle - otherwise unsupported hardware. +comment "PATA SFF controllers with BMDMA" config PATA_ALI tristate "ALi PATA support" @@ -262,40 +292,30 @@ config PATA_ARTOP If unsure, say N. -config PATA_ATP867X - tristate "ARTOP/Acard ATP867X PATA support" +config PATA_ATIIXP + tristate "ATI PATA support" depends on PCI help - This option enables support for ARTOP/Acard ATP867X PATA - controllers. - - If unsure, say N. - -config PATA_AT32 - tristate "Atmel AVR32 PATA support (Experimental)" - depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL - help - This option enables support for the IDE devices on the - Atmel AT32AP platform. + This option enables support for the ATI ATA interfaces + found on the many ATI chipsets. If unsure, say N. -config PATA_ATIIXP - tristate "ATI PATA support" +config PATA_ATP867X + tristate "ARTOP/Acard ATP867X PATA support" depends on PCI help - This option enables support for the ATI ATA interfaces - found on the many ATI chipsets. + This option enables support for ARTOP/Acard ATP867X PATA + controllers. If unsure, say N. -config PATA_CMD640_PCI - tristate "CMD640 PCI PATA support (Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_BF54X + tristate "Blackfin 54x ATAPI support" + depends on BF542 || BF548 || BF549 help - This option enables support for the CMD640 PCI IDE - interface chip. Only the primary channel is currently - supported. + This option enables support for the built-in ATAPI controller on + Blackfin 54x family chips. If unsure, say N. @@ -362,15 +382,6 @@ config PATA_EFAR If unsure, say N. -config ATA_GENERIC - tristate "Generic ATA support" - depends on PCI - help - This option enables support for generic BIOS configured - ATA controllers via the new ATA layer - - If unsure, say N. - config PATA_HPT366 tristate "HPT 366/368 PATA support" depends on PCI @@ -415,12 +426,20 @@ config PATA_HPT3X3_DMA controllers. Enable with care as there are still some problems with DMA on this chipset. -config PATA_ISAPNP - tristate "ISA Plug and Play PATA support" - depends on ISAPNP +config PATA_ICSIDE + tristate "Acorn ICS PATA support" + depends on ARM && ARCH_ACORN help - This option enables support for ISA plug & play ATA - controllers such as those found on old soundcards. + On Acorn systems, say Y here if you wish to use the ICS PATA + interface card. This is not required for ICS partition support. + If you are unsure, say N to this. + +config PATA_IT8213 + tristate "IT8213 PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the ITE 821 PATA + controllers via the new ATA layer. If unsure, say N. @@ -434,15 +453,6 @@ config PATA_IT821X If unsure, say N. -config PATA_IT8213 - tristate "IT8213 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for the ITE 821 PATA - controllers via the new ATA layer. - - If unsure, say N. - config PATA_JMICRON tristate "JMicron PATA support" depends on PCI @@ -452,23 +462,14 @@ config PATA_JMICRON If unsure, say N. -config PATA_LEGACY - tristate "Legacy ISA PATA support (Experimental)" - depends on (ISA || PCI) && EXPERIMENTAL - help - This option enables support for ISA/VLB/PCI bus legacy PATA - ports and allows them to be accessed via the new ATA layer. - - If unsure, say N. - -config PATA_TRIFLEX - tristate "Compaq Triflex PATA support" - depends on PCI +config PATA_MACIO + tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" + depends on PPC_PMAC help - Enable support for the Compaq 'Triflex' IDE controller as found - on many Compaq Pentium-Pro systems, via the new ATA layer. - - If unsure, say N. + Most IDE capable PowerMacs have IDE busses driven by a variant + of this controller which is part of the Apple chipset used on + most PowerMac models. Some models have multiple busses using + different chipsets, though generally, MacIO is one of them. config PATA_MARVELL tristate "Marvell PATA support via legacy mode" @@ -481,32 +482,6 @@ config PATA_MARVELL If unsure, say N. -config PATA_MPC52xx - tristate "Freescale MPC52xx SoC internal IDE" - depends on PPC_MPC52xx && PPC_BESTCOMM - select PPC_BESTCOMM_ATA - help - This option enables support for integrated IDE controller - of the Freescale MPC52xx SoC. - - If unsure, say N. - -config PATA_MPIIX - tristate "Intel PATA MPIIX support" - depends on PCI - help - This option enables support for MPIIX PATA support. - - If unsure, say N. - -config PATA_OLDPIIX - tristate "Intel PATA old PIIX support" - depends on PCI - help - This option enables support for early PIIX PATA support. - - If unsure, say N. - config PATA_NETCELL tristate "NETCELL Revolution RAID support" depends on PCI @@ -525,15 +500,6 @@ config PATA_NINJA32 If unsure, say N. -config PATA_NS87410 - tristate "Nat Semi NS87410 PATA support" - depends on PCI - help - This option enables support for the National Semiconductor - NS87410 PCI-IDE controller. - - If unsure, say N. - config PATA_NS87415 tristate "Nat Semi NS87415 PATA support" depends on PCI @@ -543,12 +509,11 @@ config PATA_NS87415 If unsure, say N. -config PATA_OPTI - tristate "OPTI621/6215 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_OLDPIIX + tristate "Intel PATA old PIIX support" + depends on PCI help - This option enables full PIO support for the early Opti ATA - controllers found on some old motherboards. + This option enables support for early PIIX PATA support. If unsure, say N. @@ -562,24 +527,6 @@ config PATA_OPTIDMA If unsure, say N. -config PATA_PALMLD - tristate "Palm LifeDrive PATA support" - depends on MACH_PALMLD - help - This option enables support for Palm LifeDrive's internal ATA - port via the new ATA layer. - - If unsure, say N. - -config PATA_PCMCIA - tristate "PCMCIA PATA support" - depends on PCMCIA - help - This option enables support for PCMCIA ATA interfaces, including - compact flash card adapters via the new ATA layer. - - If unsure, say N. - config PATA_PDC2027X tristate "Promise PATA 2027x support" depends on PCI @@ -597,12 +544,6 @@ config PATA_PDC_OLD If unsure, say N. -config PATA_QDI - tristate "QDI VLB PATA support" - depends on ISA - help - Support for QDI 6500 and 6580 PATA controllers on VESA local bus. - config PATA_RADISYS tristate "RADISYS 82600 PATA support (Experimental)" depends on PCI && EXPERIMENTAL @@ -612,15 +553,6 @@ config PATA_RADISYS If unsure, say N. -config PATA_RB532 - tristate "RouterBoard 532 PATA CompactFlash support" - depends on MIKROTIK_RB532 - help - This option enables support for the RouterBoard 532 - PATA CompactFlash controller. - - If unsure, say N. - config PATA_RDC tristate "RDC PATA support" depends on PCI @@ -631,21 +563,30 @@ config PATA_RDC If unsure, say N. -config PATA_RZ1000 - tristate "PC Tech RZ1000 PATA support" +config PATA_SC1200 + tristate "SC1200 PATA support" depends on PCI help - This option enables basic support for the PC Tech RZ1000/1 - PATA controllers via the new ATA layer + This option enables support for the NatSemi/AMD SC1200 SoC + companion chip used with the Geode processor family. If unsure, say N. -config PATA_SC1200 - tristate "SC1200 PATA support" +config PATA_SCC + tristate "Toshiba's Cell Reference Set IDE support" + depends on PCI && PPC_CELLEB + help + This option enables support for the built-in IDE controller on + Toshiba Cell Reference Board. + + If unsure, say N. + +config PATA_SCH + tristate "Intel SCH PATA support" depends on PCI help - This option enables support for the NatSemi/AMD SC1200 SoC - companion chip used with the Geode processor family. + This option enables support for Intel SCH PATA on the Intel + SCH (US15W, US15L, UL11L) series host controllers. If unsure, say N. @@ -683,6 +624,15 @@ config PATA_TOSHIBA If unsure, say N. +config PATA_TRIFLEX + tristate "Compaq Triflex PATA support" + depends on PCI + help + Enable support for the Compaq 'Triflex' IDE controller as found + on many Compaq Pentium-Pro systems, via the new ATA layer. + + If unsure, say N. + config PATA_VIA tristate "VIA PATA support" depends on PCI @@ -701,12 +651,99 @@ config PATA_WINBOND If unsure, say N. -config PATA_WINBOND_VLB - tristate "Winbond W83759A VLB PATA support (Experimental)" - depends on ISA && EXPERIMENTAL +endif # ATA_BMDMA + +comment "PIO-only SFF controllers" + +config PATA_AT32 + tristate "Atmel AVR32 PATA support (Experimental)" + depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL help - Support for the Winbond W83759A controller on Vesa Local Bus - systems. + This option enables support for the IDE devices on the + Atmel AT32AP platform. + + If unsure, say N. + +config PATA_AT91 + tristate "PATA support for AT91SAM9260" + depends on ARM && ARCH_AT91 + help + This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. + + If unsure, say N. + +config PATA_CMD640_PCI + tristate "CMD640 PCI PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the CMD640 PCI IDE + interface chip. Only the primary channel is currently + supported. + + If unsure, say N. + +config PATA_ISAPNP + tristate "ISA Plug and Play PATA support" + depends on ISAPNP + help + This option enables support for ISA plug & play ATA + controllers such as those found on old soundcards. + + If unsure, say N. + +config PATA_IXP4XX_CF + tristate "IXP4XX Compact Flash support" + depends on ARCH_IXP4XX + help + This option enables support for a Compact Flash connected on + the ixp4xx expansion bus. This driver had been written for + Loft/Avila boards in mind but can work with others. + + If unsure, say N. + +config PATA_MPIIX + tristate "Intel PATA MPIIX support" + depends on PCI + help + This option enables support for MPIIX PATA support. + + If unsure, say N. + +config PATA_NS87410 + tristate "Nat Semi NS87410 PATA support" + depends on PCI + help + This option enables support for the National Semiconductor + NS87410 PCI-IDE controller. + + If unsure, say N. + +config PATA_OPTI + tristate "OPTI621/6215 PATA support (Very Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables full PIO support for the early Opti ATA + controllers found on some old motherboards. + + If unsure, say N. + +config PATA_PALMLD + tristate "Palm LifeDrive PATA support" + depends on MACH_PALMLD + help + This option enables support for Palm LifeDrive's internal ATA + port via the new ATA layer. + + If unsure, say N. + +config PATA_PCMCIA + tristate "PCMCIA PATA support" + depends on PCMCIA + help + This option enables support for PCMCIA ATA interfaces, including + compact flash card adapters via the new ATA layer. + + If unsure, say N. config HAVE_PATA_PLATFORM bool @@ -725,14 +762,6 @@ config PATA_PLATFORM If unsure, say N. -config PATA_AT91 - tristate "PATA support for AT91SAM9260" - depends on ARM && ARCH_AT91 - help - This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. - - If unsure, say N. - config PATA_OF_PLATFORM tristate "OpenFirmware platform device PATA support" depends on PATA_PLATFORM && PPC_OF @@ -743,69 +772,65 @@ config PATA_OF_PLATFORM If unsure, say N. -config PATA_ICSIDE - tristate "Acorn ICS PATA support" - depends on ARM && ARCH_ACORN +config PATA_QDI + tristate "QDI VLB PATA support" + depends on ISA help - On Acorn systems, say Y here if you wish to use the ICS PATA - interface card. This is not required for ICS partition support. - If you are unsure, say N to this. + Support for QDI 6500 and 6580 PATA controllers on VESA local bus. -config PATA_IXP4XX_CF - tristate "IXP4XX Compact Flash support" - depends on ARCH_IXP4XX +config PATA_RB532 + tristate "RouterBoard 532 PATA CompactFlash support" + depends on MIKROTIK_RB532 help - This option enables support for a Compact Flash connected on - the ixp4xx expansion bus. This driver had been written for - Loft/Avila boards in mind but can work with others. + This option enables support for the RouterBoard 532 + PATA CompactFlash controller. If unsure, say N. -config PATA_OCTEON_CF - tristate "OCTEON Boot Bus Compact Flash support" - depends on CPU_CAVIUM_OCTEON +config PATA_RZ1000 + tristate "PC Tech RZ1000 PATA support" + depends on PCI help - This option enables a polled compact flash driver for use with - compact flash cards attached to the OCTEON boot bus. + This option enables basic support for the PC Tech RZ1000/1 + PATA controllers via the new ATA layer If unsure, say N. -config PATA_SCC - tristate "Toshiba's Cell Reference Set IDE support" - depends on PCI && PPC_CELLEB +config PATA_WINBOND_VLB + tristate "Winbond W83759A VLB PATA support (Experimental)" + depends on ISA && EXPERIMENTAL help - This option enables support for the built-in IDE controller on - Toshiba Cell Reference Board. + Support for the Winbond W83759A controller on Vesa Local Bus + systems. - If unsure, say N. +comment "Generic fallback / legacy drivers" -config PATA_SCH - tristate "Intel SCH PATA support" - depends on PCI +config PATA_ACPI + tristate "ACPI firmware driver for PATA" + depends on ATA_ACPI && ATA_BMDMA help - This option enables support for Intel SCH PATA on the Intel - SCH (US15W, US15L, UL11L) series host controllers. - - If unsure, say N. + This option enables an ACPI method driver which drives + motherboard PATA controller interfaces through the ACPI + firmware in the BIOS. This driver can sometimes handle + otherwise unsupported hardware. -config PATA_BF54X - tristate "Blackfin 54x ATAPI support" - depends on BF542 || BF548 || BF549 +config ATA_GENERIC + tristate "Generic ATA support" + depends on PCI && ATA_BMDMA help - This option enables support for the built-in ATAPI controller on - Blackfin 54x family chips. + This option enables support for generic BIOS configured + ATA controllers via the new ATA layer If unsure, say N. -config PATA_MACIO - tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" - depends on PPC_PMAC +config PATA_LEGACY + tristate "Legacy ISA PATA support (Experimental)" + depends on (ISA || PCI) && EXPERIMENTAL help - Most IDE capable PowerMacs have IDE busses driven by a variant - of this controller which is part of the Apple chipset used on - most PowerMac models. Some models have multiple busses using - different chipsets, though generally, MacIO is one of them. + This option enables support for ISA/VLB/PCI bus legacy PATA + ports and allows them to be accessed via the new ATA layer. + If unsure, say N. endif # ATA_SFF endif # ATA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d0a93c4ad3ec..7ef89d73df63 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -1,33 +1,39 @@ obj-$(CONFIG_ATA) += libata.o +# non-SFF interface obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o -obj-$(CONFIG_SATA_SVW) += sata_svw.o +obj-$(CONFIG_SATA_FSL) += sata_fsl.o +obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o +obj-$(CONFIG_SATA_SIL24) += sata_sil24.o + +# SFF w/ custom DMA +obj-$(CONFIG_PDC_ADMA) += pdc_adma.o +obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o +obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o +obj-$(CONFIG_SATA_SX4) += sata_sx4.o + +# SFF SATA w/ BMDMA obj-$(CONFIG_ATA_PIIX) += ata_piix.o +obj-$(CONFIG_SATA_MV) += sata_mv.o +obj-$(CONFIG_SATA_NV) += sata_nv.o obj-$(CONFIG_SATA_PROMISE) += sata_promise.o -obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o obj-$(CONFIG_SATA_SIL) += sata_sil.o -obj-$(CONFIG_SATA_SIL24) += sata_sil24.o -obj-$(CONFIG_SATA_VIA) += sata_via.o -obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o obj-$(CONFIG_SATA_SIS) += sata_sis.o -obj-$(CONFIG_SATA_SX4) += sata_sx4.o -obj-$(CONFIG_SATA_NV) += sata_nv.o +obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o -obj-$(CONFIG_SATA_MV) += sata_mv.o -obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o -obj-$(CONFIG_PDC_ADMA) += pdc_adma.o -obj-$(CONFIG_SATA_FSL) += sata_fsl.o -obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_SATA_VIA) += sata_via.o +obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o +# SFF PATA w/ BMDMA obj-$(CONFIG_PATA_ALI) += pata_ali.o obj-$(CONFIG_PATA_AMD) += pata_amd.o obj-$(CONFIG_PATA_ARTOP) += pata_artop.o -obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o -obj-$(CONFIG_PATA_AT32) += pata_at32.o obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o -obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o +obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o @@ -39,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o -obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o -obj-$(CONFIG_PATA_IT821X) += pata_it821x.o +obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o obj-$(CONFIG_PATA_IT8213) += pata_it8213.o +obj-$(CONFIG_PATA_IT821X) += pata_it821x.o obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o +obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o -obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o -obj-$(CONFIG_PATA_OPTI) += pata_opti.o -obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o -obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o -obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o -obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o -obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o -obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o -obj-$(CONFIG_PATA_QDI) += pata_qdi.o obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o -obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o obj-$(CONFIG_PATA_RDC) += pata_rdc.o -obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o +obj-$(CONFIG_PATA_SCC) += pata_scc.o +obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o obj-$(CONFIG_PATA_SIL680) += pata_sil680.o +obj-$(CONFIG_PATA_SIS) += pata_sis.o obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o +obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o obj-$(CONFIG_PATA_VIA) += pata_via.o obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o -obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o -obj-$(CONFIG_PATA_SIS) += pata_sis.o -obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o + +# SFF PIO only +obj-$(CONFIG_PATA_AT32) += pata_at32.o +obj-$(CONFIG_PATA_AT91) += pata_at91.o +obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o -obj-$(CONFIG_PATA_SCC) += pata_scc.o -obj-$(CONFIG_PATA_SCH) += pata_sch.o -obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o -obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o +obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o +obj-$(CONFIG_PATA_OPTI) += pata_opti.o +obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o -obj-$(CONFIG_PATA_AT91) += pata_at91.o obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o -obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o +obj-$(CONFIG_PATA_QDI) += pata_qdi.o +obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o +obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o +obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o + # Should be last but two libata driver obj-$(CONFIG_PATA_ACPI) += pata_acpi.o # Should be last but one libata driver diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 33fb614f9784..573158a9668d 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id return rc; pcim_pin_device(dev); } - return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); } static struct pci_device_id ata_generic[] = { diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ec52fc618763..7409f98d2ae6 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, hpriv->map = piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); } static void piix_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c47373f01f89..06b7e49e039c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -160,6 +160,10 @@ int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); @@ -2122,6 +2126,14 @@ retry: goto err_out; } + if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " + "class=%d may_fallback=%d tried_spinup=%d\n", + class, may_fallback, tried_spinup); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); + } + /* Falling back doesn't make sense if ID data was read * successfully at least once. */ @@ -2510,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev) * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; @@ -6372,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur, { "3.0Gbps", .spd_limit = 2 }, { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, + { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 19ddf924944f..efa4a18cfb9d 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -63,7 +63,6 @@ const struct ata_port_operations ata_sff_port_ops = { .sff_tf_read = ata_sff_tf_read, .sff_exec_command = ata_sff_exec_command, .sff_data_xfer = ata_sff_data_xfer, - .sff_irq_clear = ata_sff_irq_clear, .sff_drain_fifo = ata_sff_drain_fifo, .lost_interrupt = ata_sff_lost_interrupt, @@ -395,33 +394,12 @@ void ata_sff_irq_on(struct ata_port *ap) ata_sff_set_devctl(ap, ap->ctl); ata_wait_idle(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_irq_on); /** - * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Clear interrupt and error flags in DMA status register. - * - * May be used as the irq_clear() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_irq_clear(struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - if (!mmio) - return; - - iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); -} -EXPORT_SYMBOL_GPL(ata_sff_irq_clear); - -/** * ata_sff_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set @@ -820,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_LAST; break; +#ifdef CONFIG_ATA_BMDMA case ATAPI_PROT_DMA: ap->hsm_task_state = HSM_ST_LAST; /* initiate bmdma */ ap->ops->bmdma_start(qc); break; +#endif /* CONFIG_ATA_BMDMA */ + default: + BUG(); } } @@ -1491,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -/** - * ata_sff_host_intr - Handle host interrupt for given (port, task) - * @ap: Port on which interrupt arrived (possibly...) - * @qc: Taskfile currently active in engine - * - * Handle host interrupt for given queued command. Currently, - * only DMA interrupts are handled. All other commands are - * handled via polling with interrupts disabled (nIEN bit). - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * One if interrupt was handled, zero if not (shared irq). - */ -unsigned int ata_sff_host_intr(struct ata_port *ap, - struct ata_queued_cmd *qc) +static unsigned int ata_sff_idle_irq(struct ata_port *ap) { - struct ata_eh_info *ehi = &ap->link.eh_info; - u8 status, host_stat = 0; - bool bmdma_stopped = false; + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_port_printk(ap, KERN_WARNING, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ +} + +static unsigned int __ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, + bool hsmv_on_idle) +{ + u8 status; VPRINTK("ata%u: protocol %d task_state %d\n", ap->print_id, qc->tf.protocol, ap->hsm_task_state); @@ -1528,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, * need to check ata_is_atapi(qc->tf.protocol) again. */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - goto idle_irq; - break; - case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA) { - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", - ap->print_id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); - bmdma_stopped = true; - - if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ - qc->err_mask |= AC_ERR_HOST_BUS; - ap->hsm_task_state = HSM_ST_ERR; - } - } + return ata_sff_idle_irq(ap); break; case HSM_ST: + case HSM_ST_LAST: break; default: - goto idle_irq; + return ata_sff_idle_irq(ap); } - /* check main status, clearing INTRQ if needed */ status = ata_sff_irq_status(ap); if (status & ATA_BUSY) { - if (bmdma_stopped) { + if (hsmv_on_idle) { /* BMDMA engine is already stopped, we're screwed */ qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; } else - goto idle_irq; + return ata_sff_idle_irq(ap); } /* clear irq events */ - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_hsm_move(ap, qc, status, 0); - if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) - ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); - return 1; /* irq handled */ - -idle_irq: - ap->stats.idle_irq++; - -#ifdef ATA_IRQ_TRAP - if ((ap->stats.idle_irq % 1000) == 0) { - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); - return 1; - } -#endif - return 0; /* irq not handled */ } -EXPORT_SYMBOL_GPL(ata_sff_host_intr); /** - * ata_sff_interrupt - Default ATA host interrupt handler - * @irq: irq line (unused) - * @dev_instance: pointer to our ata_host information structure + * ata_sff_port_intr - Handle SFF port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine * - * Default interrupt handler for PCI IDE devices. Calls - * ata_sff_host_intr() for each port that is not disabled. + * Handle port interrupt for given queued command. * * LOCKING: - * Obtains host lock during operation. + * spin_lock_irqsave(host lock) * * RETURNS: - * IRQ_NONE or IRQ_HANDLED. + * One if interrupt was handled, zero if not (shared irq). */ -irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + return __ata_sff_port_intr(ap, qc, false); +} +EXPORT_SYMBOL_GPL(ata_sff_port_intr); + +static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, + unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) { struct ata_host *host = dev_instance; bool retried = false; @@ -1631,7 +1579,7 @@ retry: qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + handled |= port_intr(ap, qc); else polling |= 1 << i; } else @@ -1658,7 +1606,8 @@ retry: if (idle & (1 << i)) { ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } else { /* clear INTRQ and check if BUSY cleared */ if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) @@ -1680,6 +1629,25 @@ retry: return IRQ_RETVAL(handled); } + +/** + * ata_sff_interrupt - Default SFF ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_sff_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); +} EXPORT_SYMBOL_GPL(ata_sff_interrupt); /** @@ -1717,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) status); /* Run the host interrupt logic as if the interrupt had not been lost */ - ata_sff_host_intr(ap, qc); + ata_sff_port_intr(ap, qc); } EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); @@ -1744,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap) */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_freeze); @@ -1761,7 +1730,8 @@ void ata_sff_thaw(struct ata_port *ap) { /* clear & re-enable interrupts */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_irq_on(ap); } EXPORT_SYMBOL_GPL(ata_sff_thaw); @@ -2349,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host) EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** - * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host + * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host * @pdev: target PCI device * @ppi: array of port_info, must be enough for two ports * @r_host: out argument for the initialized ATA host * - * Helper to allocate ATA host for @pdev, acquire all native PCI - * resources and initialize it accordingly in one go. + * Helper to allocate PIO-only SFF ATA host for @pdev, acquire + * all PCI resources and initialize it accordingly in one go. * * LOCKING: * Inherited from calling layer (may sleep). @@ -2385,9 +2355,6 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, if (rc) goto err_out; - /* init DMA related stuff */ - ata_pci_bmdma_init(host); - devres_remove_group(&pdev->dev, NULL); *r_host = host; return 0; @@ -2492,8 +2459,21 @@ out: } EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); +static const struct ata_port_info *ata_sff_find_valid_pi( + const struct ata_port_info * const *ppi) +{ + int i; + + /* look up the first valid port_info */ + for (i = 0; i < 2 && ppi[i]; i++) + if (ppi[i]->port_ops != &ata_dummy_port_ops) + return ppi[i]; + + return NULL; +} + /** - * ata_pci_sff_init_one - Initialize/register PCI IDE host controller + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller * @pdev: Controller to be initialized * @ppi: array of port_info, must be enough for two ports * @sht: scsi_host_template to use when registering the host @@ -2502,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); * * This is a helper function which can be called from a driver's * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers. - * - * This function calls pci_enable_device(), reserves its register - * regions, sets the dma mask, enables bus master mode, and calls - * ata_device_add() + * IDE taskfile registers and is PIO only. * * ASSUMPTION: * Nobody makes a single channel controller that appears solely as @@ -2523,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, struct scsi_host_template *sht, void *host_priv, int hflag) { struct device *dev = &pdev->dev; - const struct ata_port_info *pi = NULL; + const struct ata_port_info *pi; struct ata_host *host = NULL; - int i, rc; + int rc; DPRINTK("ENTER\n"); - /* look up the first valid port_info */ - for (i = 0; i < 2 && ppi[i]; i++) { - if (ppi[i]->port_ops != &ata_dummy_port_ops) { - pi = ppi[i]; - break; - } - } - + pi = ata_sff_find_valid_pi(ppi); if (!pi) { dev_printk(KERN_ERR, &pdev->dev, "no valid port_info specified\n"); @@ -2557,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, host->private_data = host_priv; host->flags |= hflag; - pci_set_master(pdev); rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); out: if (rc == 0) @@ -2571,6 +2539,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); #endif /* CONFIG_PCI */ +/* + * BMDMA support + */ + +#ifdef CONFIG_ATA_BMDMA + const struct ata_port_operations ata_bmdma_port_ops = { .inherits = &ata_sff_port_ops, @@ -2580,6 +2554,7 @@ const struct ata_port_operations ata_bmdma_port_ops = { .qc_prep = ata_bmdma_qc_prep, .qc_issue = ata_bmdma_qc_issue, + .sff_irq_clear = ata_bmdma_irq_clear, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, @@ -2804,6 +2779,75 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); /** + * ata_bmdma_port_intr - Handle BMDMA port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle port interrupt for given queued command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ +unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 host_stat = 0; + bool bmdma_stopped = false; + unsigned int handled; + + if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + return ata_sff_idle_irq(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + bmdma_stopped = true; + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transfering data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + + handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); + + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + + return handled; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); + +/** + * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_bmdma_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); +} +EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); + +/** * ata_bmdma_error_handler - Stock error handler for BMDMA controller * @ap: port to handle error for * @@ -2848,7 +2892,8 @@ void ata_bmdma_error_handler(struct ata_port *ap) /* if we're gonna thaw, make sure IRQ is clear */ if (thaw) { ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } } @@ -2882,6 +2927,28 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); /** + * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Clear interrupt and error flags in DMA status register. + * + * May be used as the irq_clear() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + + iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); + +/** * ata_bmdma_setup - Set up PCI IDE BMDMA transaction * @qc: Info associated with this ATA transaction. * @@ -3137,7 +3204,100 @@ void ata_pci_bmdma_init(struct ata_host *host) } EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); +/** + * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host + * + * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI + * resources and initialize it accordingly in one go. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) +{ + int rc; + + rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); + if (rc) + return rc; + + ata_pci_bmdma_init(*r_host); + return 0; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); + +/** + * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflags: host flags + * + * This function is similar to ata_pci_sff_init_one() but also + * takes care of BMDMA initialization. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags) +{ + struct device *dev = &pdev->dev; + const struct ata_port_info *pi; + struct ata_host *host = NULL; + int rc; + + DPRINTK("ENTER\n"); + + pi = ata_sff_find_valid_pi(ppi); + if (!pi) { + dev_printk(KERN_ERR, &pdev->dev, + "no valid port_info specified\n"); + return -EINVAL; + } + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + goto out; + + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + goto out; + host->private_data = host_priv; + host->flags |= hflags; + + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); + out: + if (rc == 0) + devres_remove_group(&pdev->dev, NULL); + else + devres_release_group(&pdev->dev, NULL); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); + #endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ /** * ata_sff_port_init - Initialize SFF/BMDMA ATA port diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index 066b9f301ed5..c8d47034d5e9 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) return rc; pcim_pin_device(pdev); } - return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0); } static const struct pci_device_id pacpi_pci_tbl[] = { diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index f306e10c748d..794ec6e3275d 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[0] = &info_20_udma; } - return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) + return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + else + return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index d95eca9c547e..620a07cabe31 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* And fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 4d066d6c30fa..ba43f0f8c880 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) BUG_ON(ppi[0] == NULL); - return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 44d88b380ddd..43755616dc5a 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) ppi[i] = &ata_dummy_port_info; - return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id atiixp[] = { diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index bb6e0746e07d..95295935dd95 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev, pci_set_master(pdev); - rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, + rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &atp867x_sht); if (rc) dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 6422cfd13d0d..9cae65de750e 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1214,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, * bfin_irq_clear - Clear ATAPI interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void bfin_irq_clear(struct ata_port *ap) diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 4c81a71b8877..9f5da1c7454b 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_write_config_byte(pdev, UDIDETCR0, 0xF0); #endif - return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 17c5f346ff01..030952f1f97c 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], - ata_sff_interrupt, 0, DRV_NAME, host); + ata_bmdma_interrupt, 0, DRV_NAME, host); if (rc) return rc; diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index e809a4233a81..f792330f0d8e 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[1] = &info_palmax_secondary; /* Now kick off ATA set up */ - return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index a02e6459fdcc..03a93186aa19 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) rdmsr(ATAC_CH0D1_PIO, timings, dummy); if (CS5535_BAD_PIO(timings)) wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); - return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0); } static const struct pci_device_id cs5535[] = { diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 914ae3506ff5..21ee23f89e88 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); } static const struct pci_device_id cs5536[] = { diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 0fcc096b8dac..6d915b063d93 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; - return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); } static const struct pci_device_id cy82c693[] = { diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 3bac0e079691..a08834758ea2 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id efar_pci_tbl[] = { diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 8580eb3cd54d..7688868557b9 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) break; } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 98b498b6907c..9ae4c0830577 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } static const struct pci_device_id hpt37x[] = { diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 8b95aeba0e74..32f3463216b8 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } static const struct pci_device_id hpt3x2n[] = { diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index 727a81ce4c9f..b63d5e2d4628 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &hpt3x3_sht); } diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index b56e8f722d20..9f2889fe43b2 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -470,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); } - return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, + return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0, &pata_icside_sht); } diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index f971f0de88e6..4d142a2ab8fd 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0); } static const struct pci_device_id it8213_pci_tbl[] = { diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 2bd2b002d14a..bf88f71a21f4 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) else ppi[0] = &info_smart; } - return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 565e01e6ac7c..cb3babbb7035 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } static const struct pci_device_id jmicron_pci_tbl[] = { diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 25df50f51c04..76640ac76888 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -1110,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, /* Start it up */ priv->irq = irq; - return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, + return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, &pata_macio_sht); } @@ -1140,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev, "Failed to allocate private memory\n"); return -ENOMEM; } - priv->node = of_node_get(mdev->ofdev.node); + priv->node = of_node_get(mdev->ofdev.dev.of_node); priv->mdev = mdev; priv->dev = &mdev->ofdev.dev; diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index e8ca02e5a71d..dd38083dcbeb 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i return -ENODEV; } #endif - return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 96b11b604ae0..f087ab55b1df 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); /* activate host */ - return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, + return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, &mpc52xx_ata_sht); } @@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) struct bcom_task *dmatsk = NULL; /* Get ipb frequency */ - ipb_freq = mpc5xxx_get_bus_frequency(op->node); + ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); if (!ipb_freq) { dev_err(&op->dev, "could not determine IPB bus frequency\n"); return -ENODEV; @@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) /* Get device base address from device tree, request the region * and ioremap it. */ - rv = of_address_to_resource(op->node, 0, &res_mem); + rv = of_address_to_resource(op->dev.of_node, 0, &res_mem); if (rv) { dev_err(&op->dev, "could not determine device base address\n"); return rv; @@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and * UDMA modes 0, 1 and 2. */ - prop = of_get_property(op->node, "mwdma-mode", &proplen); + prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen); if ((prop) && (proplen >= 4)) mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); - prop = of_get_property(op->node, "udma-mode", &proplen); + prop = of_get_property(op->dev.of_node, "udma-mode", &proplen); if ((prop) && (proplen >= 4)) udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); - ata_irq = irq_of_parse_and_map(op->node, 0); + ata_irq = irq_of_parse_and_map(op->dev.of_node, 0); if (ata_irq == NO_IRQ) { dev_err(&op->dev, "error mapping irq\n"); return -EINVAL; @@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = { static struct of_platform_driver mpc52xx_ata_of_platform_driver = { - .owner = THIS_MODULE, - .name = DRV_NAME, - .match_table = mpc52xx_ata_of_match, .probe = mpc52xx_ata_probe, .remove = mpc52xx_ata_remove, #ifdef CONFIG_PM @@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, + .of_match_table = mpc52xx_ata_of_match, }, }; diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 94f979a7f4f7..3eb921c746a1 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e ata_pci_bmdma_clear_simplex(pdev); /* And let the library code do the work */ - return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0); } static const struct pci_device_id netcell_pci_tbl[] = { diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index dd53a66b19e3..cc50bd09aa26 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ninja32_program(base); /* FIXME: Should we disable them at remove ? */ - return ata_host_activate(host, dev->irq, ata_sff_interrupt, + return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, IRQF_SHARED, &ninja32_sht); } diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index fdbba2d76d3e..605f198f958c 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e ns87415_fixup(pdev); - return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0); } static const struct pci_device_id ns87415_pci_tbl[] = { diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 3001109352ea..06ddd91ffeda 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -750,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev) } /* - * Trap if driver tries to do standard bmdma commands. They are not - * supported. - */ -static void unreachable_qc(struct ata_queued_cmd *qc) -{ - BUG(); -} - -static u8 unreachable_port(struct ata_port *ap) -{ - BUG(); -} - -/* * We don't do ATAPI DMA so return 0. */ static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) @@ -804,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = { .sff_dev_select = octeon_cf_dev_select, .sff_irq_on = octeon_cf_irq_on, .sff_irq_clear = octeon_cf_irq_clear, - .bmdma_setup = unreachable_qc, - .bmdma_start = unreachable_qc, - .bmdma_stop = unreachable_qc, - .bmdma_status = unreachable_port, .cable_detect = ata_cable_40wire, .set_piomode = octeon_cf_set_piomode, .set_dmamode = octeon_cf_set_dmamode, diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index 1f18ad9e4fe1..5a1b82c08be9 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c @@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev, const struct of_device_id *match) { int ret; - struct device_node *dn = ofdev->node; + struct device_node *dn = ofdev->dev.of_node; struct resource io_res; struct resource ctl_res; struct resource irq_res; @@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = { MODULE_DEVICE_TABLE(of, pata_of_platform_match); static struct of_platform_driver pata_of_platform_driver = { - .name = "pata_of_platform", - .match_table = pata_of_platform_match, + .driver = { + .name = "pata_of_platform", + .owner = THIS_MODULE, + .of_match_table = pata_of_platform_match, + }, .probe = pata_of_platform_probe, .remove = __devexit_p(pata_of_platform_remove), }; diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 988ef2627be3..b811c1636204 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); } static const struct pci_device_id oldpiix_pci_tbl[] = { diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 76b7d12b1e8d..0852cd07de08 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (optiplus_with_udma(dev)) ppi[0] = &info_82c700_udma; - return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0); } static const struct pci_device_id optidma[] = { diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 09f1f22c0307..b18351122525 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de return -EIO; pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &pdc2027x_sht); } diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index fa1e2f3bc0fd..c39f213e1bbc 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id return -ENODEV; } } - return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); } static const struct pci_device_id pdc202xx[] = { diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c index 981615414849..cb01bf9496fe 100644 --- a/drivers/ata/pata_piccolo.c +++ b/drivers/ata/pata_piccolo.c @@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id }; const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; /* Just one port for the moment */ - return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); } static struct pci_device_id ata_tosh[] = { diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index a5fa388e5398..8574b31f1773 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c @@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); } static const struct pci_device_id radisys_pci_tbl[] = { diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 37092cfd7bc6..5fbe9b166c69 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c @@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, */ pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht); } static void rdc_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 6b5b63a2fd8e..e2c18257adff 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); } static const struct pci_device_id sc1200[] = { diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 6f6193b707cb..d9db3f8d60ef 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -875,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes) * scc_irq_clear - Clear PCI IDE BMDMA interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void scc_irq_clear (struct ata_port *ap) @@ -1105,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &scc_sht); } diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c index 86b3d0133c7c..e97b32f03a6e 100644 --- a/drivers/ata/pata_sch.c +++ b/drivers/ata/pata_sch.c @@ -179,7 +179,7 @@ static int __devinit sch_init_one(struct pci_dev *pdev, dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); } static int __init sch_init(void) diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 43ea389df2b3..86dd714e3e1d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ata_pci_bmdma_clear_simplex(pdev); - return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 43faf106f647..d3190d7ec304 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -374,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, ata_sff_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: - return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index b6708032f321..60cea13cccce 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) sis_fixup(pdev, chipset); - return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 733b042a7469..98548f640c8e 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; pci_write_config_dword(dev, 0x40, val); - return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); } static const struct pci_device_id sl82c105[] = { diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 48f50600ed2a..0d1f89e571dd 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (!printed_version++) dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); } static const struct pci_device_id triflex[] = { diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 7e3e0a5598b7..5e659885de16 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -627,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* We have established the device type, now fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index a69192b38b43..61c89b54ea23 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev, dev_printk(KERN_INFO, &ofdev->dev, "Sata FSL Platform/CSB Driver init\n"); - hcr_base = of_iomap(ofdev->node, 0); + hcr_base = of_iomap(ofdev->dev.of_node, 0); if (!hcr_base) goto error_exit_with_cleanup; @@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev, host_priv->ssr_base = ssr_base; host_priv->csr_base = csr_base; - irq = irq_of_parse_and_map(ofdev->node, 0); + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (irq < 0) { dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n"); goto error_exit_with_cleanup; @@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = { MODULE_DEVICE_TABLE(of, fsl_sata_match); static struct of_platform_driver fsl_sata_driver = { - .name = "fsl-sata", - .match_table = fsl_sata_match, + .driver = { + .name = "fsl-sata", + .owner = THIS_MODULE, + .of_match_table = fsl_sata_match, + }, .probe = sata_fsl_probe, .remove = sata_fsl_remove, #ifdef CONFIG_PM diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f3471bc949d3..a476cd99b95d 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = { .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, - .error_handler = ata_std_error_handler, /* avoid SFF EH */ - .post_internal_cmd = ATA_OP_NULL, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, @@ -2813,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) - ata_sff_host_intr(ap, qc); + ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index baa8f0d2c86f..6fd114784116 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) } /* handle interrupt */ - return ata_sff_host_intr(ap, qc); + return ata_bmdma_port_intr(ap, qc); } static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) @@ -1100,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) u32 notifier_clears[2]; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); return; } @@ -1505,7 +1505,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - handled += ata_sff_host_intr(ap, qc); + handled += ata_bmdma_port_intr(ap, qc); } else { /* * No request pending? Clear interrupt status @@ -2430,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ppi[0] = &nv_port_info[type]; ipriv = ppi[0]->private_data; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index d533b3d20ca1..daeebf19a6a9 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host); static void qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); -static void qs_bmdma_stop(struct ata_queued_cmd *qc); -static u8 qs_bmdma_status(struct ata_port *ap); static void qs_freeze(struct ata_port *ap); static void qs_thaw(struct ata_port *ap); static int qs_prereset(struct ata_link *link, unsigned long deadline); @@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, - .bmdma_stop = qs_bmdma_stop, - .bmdma_status = qs_bmdma_status, .qc_prep = qs_qc_prep, .qc_issue = qs_qc_issue, @@ -190,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc) return 1; /* ATAPI DMA not supported */ } -static void qs_bmdma_stop(struct ata_queued_cmd *qc) -{ - /* nothing */ -} - -static u8 qs_bmdma_status(struct ata_port *ap) -{ - return 0; -} - static inline void qs_enter_reg_mode(struct ata_port *ap) { u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); @@ -454,7 +440,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) if (!pp || pp->state != qs_state_mmio) continue; if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + handled |= ata_sff_port_intr(ap, qc); } return handled; } diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 2dda312b6b9a..3a4f84219719 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) goto err_hsm; /* ack bmdma irq events */ - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* kick HSM in the ass */ ata_sff_hsm_move(ap, qc, status, 0); @@ -584,7 +584,7 @@ static void sil_thaw(struct ata_port *ap) /* clear IRQ */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* turn on SATA IRQ if supported */ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index f8a91bfd66a8..2bfe3ae03976 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; @@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sis_sht); } diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 101fd6a19829..7d9db4aaf07e 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &k2_sata_sht); } diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index d8dac17dc2c8..b8578c32d344 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -242,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &uli_sht); } diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 08f65492cc81..101d8c219caf 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap) * certain way. Leave it alone and just clear pending IRQ. */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); } /** @@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int i, rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) svia_configure(pdev); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &svia_sht); } diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 2107952ebff1..b777176ff494 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled = ata_sff_host_intr(ap, qc); + handled = ata_bmdma_port_intr(ap, qc); /* We received an interrupt during a polled command, * or some other spurious condition. Interrupt reporting diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f7d6ebaa0418..da8f176c051e 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -789,7 +789,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e) fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ /* get the supported DVMA burst sizes */ - bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00); + bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00); if (sbus_can_dma_64bit()) sbus_set_sbus64(&op->dev, bursts); @@ -820,18 +820,20 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_ const u8 *prop; int len; - prop = of_get_property(op->node, "madaddrlo2", &len); + prop = of_get_property(op->dev.of_node, "madaddrlo2", &len); if (!prop) return -ENODEV; memcpy(&prom->mac_addr[4], prop, 4); - prop = of_get_property(op->node, "madaddrhi4", &len); + prop = of_get_property(op->dev.of_node, "madaddrhi4", &len); if (!prop) return -ENODEV; memcpy(&prom->mac_addr[2], prop, 4); - prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0); - prom->hw_revision = of_getintprop_default(op->node, "promversion", 0); + prom->serial_number = of_getintprop_default(op->dev.of_node, + "serialnumber", 0); + prom->hw_revision = of_getintprop_default(op->dev.of_node, + "promversion", 0); return 0; } @@ -841,10 +843,10 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) struct of_device *op = fore200e->bus_dev; const struct linux_prom_registers *regs; - regs = of_get_property(op->node, "reg", NULL); + regs = of_get_property(op->dev.of_node, "reg", NULL); return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", - (regs ? regs->which_io : 0), op->node->name); + (regs ? regs->which_io : 0), op->dev.of_node->name); } #endif /* CONFIG_SBUS */ @@ -2693,8 +2695,11 @@ static const struct of_device_id fore200e_sba_match[] = { MODULE_DEVICE_TABLE(of, fore200e_sba_match); static struct of_platform_driver fore200e_sba_driver = { - .name = "fore_200e", - .match_table = fore200e_sba_match, + .driver = { + .name = "fore_200e", + .owner = THIS_MODULE, + .of_match_table = fore200e_sba_match, + }, .probe = fore200e_sba_probe, .remove = __devexit_p(fore200e_sba_remove), }; diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index 3fecfb446d90..5ad3bad2b0a5 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -37,7 +37,7 @@ #define CFAG12864BFB_NAME "cfag12864bfb" -static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = { +static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = { .id = "cfag12864b", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, @@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo cfag12864bfb_var __initdata = { +static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = { .xres = CFAG12864B_WIDTH, .yres = CFAG12864B_HEIGHT, .xres_virtual = CFAG12864B_WIDTH, @@ -114,7 +114,7 @@ none: return ret; } -static int cfag12864bfb_remove(struct platform_device *device) +static int __devexit cfag12864bfb_remove(struct platform_device *device) { struct fb_info *info = platform_get_drvdata(device); @@ -128,7 +128,7 @@ static int cfag12864bfb_remove(struct platform_device *device) static struct platform_driver cfag12864bfb_driver = { .probe = cfag12864bfb_probe, - .remove = cfag12864bfb_remove, + .remove = __devexit_p(cfag12864bfb_remove), .driver = { .name = CFAG12864BFB_NAME, }, diff --git a/drivers/base/node.c b/drivers/base/node.c index 057979a19eea..2bdd8a94ec94 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -9,6 +9,7 @@ #include <linux/memory.h> #include <linux/node.h> #include <linux/hugetlb.h> +#include <linux/compaction.h> #include <linux/cpumask.h> #include <linux/topology.h> #include <linux/nodemask.h> @@ -246,6 +247,8 @@ int register_node(struct node *node, int num, struct node *parent) scan_unevictable_register_node(node); hugetlb_register_node(node); + + compaction_register_node(node); } return error; } diff --git a/drivers/base/topology.c b/drivers/base/topology.c index bf6b13206d00..9fc630ce1ddb 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, topology_remove_dev(cpu); break; } - return rc ? NOTIFY_BAD : NOTIFY_OK; + return notifier_from_errno(rc); } static int __cpuinit topology_sysfs_init(void) diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 59ca2b77b574..52f2d11bc7b9 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1004,7 +1004,7 @@ static const struct block_device_operations floppy_fops = { static int swim3_add_device(struct macio_dev *mdev, int index) { - struct device_node *swim = mdev->ofdev.node; + struct device_node *swim = mdev->ofdev.dev.of_node; struct floppy_state *fs = &floppy_states[index]; int rc = -EBUSY; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index e1c95e208a66..a7b83c0a7eb5 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1198,10 +1198,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match) dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match); /* device id */ - id = of_get_property(op->node, "port-number", NULL); + id = of_get_property(op->dev.of_node, "port-number", NULL); /* physaddr */ - rc = of_address_to_resource(op->node, 0, &res); + rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); return rc; @@ -1209,11 +1209,11 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match) physaddr = res.start; /* irq */ - irq = irq_of_parse_and_map(op->node, 0); + irq = irq_of_parse_and_map(op->dev.of_node, 0); /* bus width */ bus_width = ACE_BUS_WIDTH_16; - if (of_find_property(op->node, "8-bit", NULL)) + if (of_find_property(op->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; /* Call the bus-independant setup code */ @@ -1237,13 +1237,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = { MODULE_DEVICE_TABLE(of, ace_of_match); static struct of_platform_driver ace_of_driver = { - .owner = THIS_MODULE, - .name = "xsysace", - .match_table = ace_of_match, .probe = ace_of_probe, .remove = __devexit_p(ace_of_remove), .driver = { .name = "xsysace", + .owner = THIS_MODULE, + .of_match_table = ace_of_match, }, }; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index cc435be0bc13..451cd7071b1d 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -567,7 +567,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) struct disk_info *d; struct cdrom_device_info *c; struct request_queue *q; - struct device_node *node = vdev->dev.archdata.of_node; + struct device_node *node = vdev->dev.of_node; deviceno = vdev->unit_address; if (deviceno >= VIOCD_MAX_CD) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e21175be25d0..f09fc0e2062d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -1121,5 +1121,12 @@ config DEVPORT source "drivers/s390/char/Kconfig" +config RAMOOPS + tristate "Log panic/oops to a RAM buffer" + default n + help + This enables panic and oops messages to be logged to a circular + buffer in RAM where it can be read back at some later point. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index d39be4cf1f5d..88d6eac69754 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ obj-$(CONFIG_PS3_FLASH) += ps3flash.o +obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 67ea3a60de74..70312da4c968 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) { u32 httfea,baseaddr,enuscr; struct pci_dev *dev1; - int i; + int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up ULi AGP\n"); @@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) if (i == ARRAY_SIZE(uli_sizes)) { dev_info(&pdev->dev, "no ULi size found for %d\n", size); - return -ENODEV; + ret = -ENODEV; + goto put; } /* shadow x86-64 registers into ULi registers */ pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ - if ((httfea & 0x7fff) >> (32 - 25)) - return -ENODEV; + if ((httfea & 0x7fff) >> (32 - 25)) { + ret = -ENODEV; + goto put; + } httfea = (httfea& 0x7fff) << 25; @@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) enuscr= httfea+ (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); - + ret = 0; +put: pci_dev_put(dev1); - return 0; + return ret; } @@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) { u32 tmp, apbase, apbar, aplimit; struct pci_dev *dev1; - int i; + int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); @@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) if (i == ARRAY_SIZE(nforce3_sizes)) { dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); - return -ENODEV; + ret = -ENODEV; + goto put; } pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); @@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { dev_info(&pdev->dev, "aperture base > 4G\n"); - return -ENODEV; + ret = -ENODEV; + goto put; } apbase = (apbase & 0x7fff) << 25; @@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); + ret = 0; +put: pci_dev_put(dev1); - return 0; + return ret; } static int __devinit agp_amd64_probe(struct pci_dev *pdev, diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 56b27671adc4..4f8d60c25a98 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -84,6 +84,7 @@ static char *serial_version = "4.30"; #include <linux/smp_lock.h> #include <linux/init.h> #include <linux/bitops.h> +#include <linux/platform_device.h> #include <asm/setup.h> @@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = { /* * The serial driver boot-time initialization code! */ -static int __init rs_init(void) +static int __init amiga_serial_probe(struct platform_device *pdev) { unsigned long flags; struct serial_state * state; int error; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL)) - return -ENODEV; - serial_driver = alloc_tty_driver(1); if (!serial_driver) return -ENOMEM; - /* - * We request SERDAT and SERPER only, because the serial registers are - * too spreaded over the custom register space - */ - if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4, - "amiserial [Paula]")) { - error = -EBUSY; - goto fail_put_tty_driver; - } - IRQ_ports = NULL; show_serial_version(); @@ -1998,7 +1986,7 @@ static int __init rs_init(void) error = tty_register_driver(serial_driver); if (error) - goto fail_release_mem_region; + goto fail_put_tty_driver; state = rs_table; state->magic = SSTATE_MAGIC; @@ -2050,23 +2038,24 @@ static int __init rs_init(void) ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ + platform_set_drvdata(pdev, state); + return 0; fail_free_irq: free_irq(IRQ_AMIGA_TBE, state); fail_unregister: tty_unregister_driver(serial_driver); -fail_release_mem_region: - release_mem_region(CUSTOM_PHYSADDR+0x30, 4); fail_put_tty_driver: put_tty_driver(serial_driver); return error; } -static __exit void rs_exit(void) +static int __exit amiga_serial_remove(struct platform_device *pdev) { int error; - struct async_struct *info = rs_table[0].info; + struct serial_state *state = platform_get_drvdata(pdev); + struct async_struct *info = state->info; /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ tasklet_kill(&info->tlet); @@ -2075,19 +2064,38 @@ static __exit void rs_exit(void) error); put_tty_driver(serial_driver); - if (info) { - rs_table[0].info = NULL; - kfree(info); - } + rs_table[0].info = NULL; + kfree(info); free_irq(IRQ_AMIGA_TBE, rs_table); free_irq(IRQ_AMIGA_RBF, rs_table); - release_mem_region(CUSTOM_PHYSADDR+0x30, 4); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static struct platform_driver amiga_serial_driver = { + .remove = __exit_p(amiga_serial_remove), + .driver = { + .name = "amiga-serial", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_serial_init(void) +{ + return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe); +} + +module_init(amiga_serial_init); + +static void __exit amiga_serial_exit(void) +{ + platform_driver_unregister(&amiga_serial_driver); } -module_init(rs_init) -module_exit(rs_exit) +module_exit(amiga_serial_exit); #if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) @@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init); #endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-serial"); diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 4f568cb9af3f..033e1505fca9 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait) * Only when everyone who has opened /dev/apm_bios with write permission * has acknowledge does the actual suspend happen. */ -static int -apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) +static long +apm_ioctl(struct file *filp, u_int cmd, u_long arg) { struct apm_user *as = filp->private_data; int err = -EINVAL; @@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) if (!as->suser || !as->writer) return -EPERM; + lock_kernel(); switch (cmd) { case APM_IOC_SUSPEND: mutex_lock(&state_lock); @@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) mutex_unlock(&state_lock); break; } + unlock_kernel(); return err; } @@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = apm_read, .poll = apm_poll, - .ioctl = apm_ioctl, + .unlocked_ioctl = apm_ioctl, .open = apm_open, .release = apm_release, }; diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index a7424bf7eacf..f4ae0e0fb631 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> +#include <linux/smp_lock.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/wait.h> @@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */ static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *); static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *); -static int ac_ioctl(struct inode *, struct file *, unsigned int, - unsigned long); +static long ac_ioctl(struct file *, unsigned int, unsigned long); static irqreturn_t ac_interrupt(int, void *); static const struct file_operations ac_fops = { @@ -115,7 +115,7 @@ static const struct file_operations ac_fops = { .llseek = no_llseek, .read = ac_read, .write = ac_write, - .ioctl = ac_ioctl, + .unlocked_ioctl = ac_ioctl, }; static struct miscdevice ac_miscdev = { @@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance) -static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* @ ADG ou ATO selon le cas */ int i; @@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un /* In general, the device is only openable by root anyway, so we're not particularly concerned that bogus ioctls can flood the console. */ - adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); - if (!adgl) - return -ENOMEM; + adgl = memdup_user(argp, sizeof(struct st_ram_io)); + if (IS_ERR(adgl)) + return PTR_ERR(adgl); - if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) { - kfree(adgl); - return -EFAULT; - } - + lock_kernel(); IndexCard = adgl->num_card-1; if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { @@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un warncount--; } kfree(adgl); + unlock_kernel(); return -EINVAL; } @@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un } Dummy = readb(apbs[IndexCard].RamIO + VERS); kfree(adgl); + unlock_kernel(); return 0; } diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index 61f0146e215d..dbee8688f75c 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) } static int -ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct therm therm; union { @@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned return 0; } +static long +ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = ds1620_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + #ifdef THERM_USE_PROC static int proc_therm_ds1620_read(char *buf, char **start, off_t offset, @@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = { .owner = THIS_MODULE, .open = ds1620_open, .read = ds1620_read, - .ioctl = ds1620_ioctl, + .unlocked_ioctl = ds1620_unlocked_ioctl, }; static struct miscdevice ds1620_miscdev = { diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 045c930e6320..e3859d4eaead 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *, static unsigned int dtlk_poll(struct file *, poll_table *); static int dtlk_open(struct inode *, struct file *); static int dtlk_release(struct inode *, struct file *); -static int dtlk_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg); +static long dtlk_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); static const struct file_operations dtlk_fops = { @@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops = .read = dtlk_read, .write = dtlk_write, .poll = dtlk_poll, - .ioctl = dtlk_ioctl, + .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, }; @@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data) wake_up_interruptible(&dtlk_process_list); } -static int dtlk_ioctl(struct inode *inode, - struct file *file, - unsigned int cmd, - unsigned long arg) +static long dtlk_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) { char __user *argp = (char __user *)arg; struct dtlk_settings *sp; @@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode, switch (cmd) { case DTLK_INTERROGATE: + lock_kernel(); sp = dtlk_interrogate(); + unlock_kernel(); if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) return -EINVAL; return 0; diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index fda4181b5e67..82b5a88a82d7 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c @@ -19,6 +19,7 @@ #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/init.h> +#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/nvram.h> #ifdef CONFIG_PPC_PMAC @@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf, return p - buf; } -static int nvram_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch(cmd) { #ifdef CONFIG_PPC_PMAC @@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file, return 0; } +static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = nvram_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + const struct file_operations nvram_fops = { .owner = THIS_MODULE, .llseek = nvram_llseek, .read = read_nvram, .write = write_nvram, - .ioctl = nvram_ioctl, + .unlocked_ioctl = nvram_unlocked_ioctl, }; static struct miscdevice nvram_dev = { diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index 31e7c91c2d9d..b6c2cc167c11 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c @@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit) #endif } -static int gen_rtc_ioctl(struct inode *inode, struct file *file, +static int gen_rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rtc_time wtime; @@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file, return -EINVAL; } +static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = gen_rtc_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + /* * We enforce only one user at a time here with the open/close. * Also clear the previous interrupt data on an open, and clean @@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = { .read = gen_rtc_read, .poll = gen_rtc_poll, #endif - .ioctl = gen_rtc_ioctl, + .unlocked_ioctl = gen_rtc_unlocked_ioctl, .open = gen_rtc_open, .release = gen_rtc_release, }; diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 712d9f271aa6..e0249722d25f 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -49,8 +49,9 @@ #include <asm/uaccess.h> #include <linux/sysrq.h> #include <linux/timer.h> +#include <linux/time.h> -#define VERSION_STR "0.9.0" +#define VERSION_STR "0.9.1" #define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ #define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ @@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); #if defined(CONFIG_S390) # define HAVE_MONOTONIC # define TIMER_FREQ 1000000000ULL -#elif defined(CONFIG_IA64) -# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq) #else -# define TIMER_FREQ (HZ*loops_per_jiffy) +# define TIMER_FREQ 1000000000ULL #endif #ifdef HAVE_MONOTONIC @@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void); #else static inline unsigned long long monotonic_clock(void) { - return get_cycles(); + struct timespec ts; + getrawmonotonic(&ts); + return timespec_to_ns(&ts); } #endif /* HAVE_MONOTONIC */ @@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data) printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); } } +#if 0 + /* + * Enable to investigate delays in detail + */ + printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n", + tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ); +#endif mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); hangcheck_tsc = monotonic_clock(); } @@ -180,7 +188,7 @@ static int __init hangcheck_init(void) #if defined (HAVE_MONOTONIC) printk("Hangcheck: Using monotonic_clock().\n"); #else - printk("Hangcheck: Using get_cycles().\n"); + printk("Hangcheck: Using getrawmonotonic().\n"); #endif /* HAVE_MONOTONIC */ hangcheck_tsc_margin = (unsigned long long)(hangcheck_margin + hangcheck_tick); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 9ded667625ac..a0a1829d3198 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file) static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); -static int -hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, - unsigned long arg) +static long hpet_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { struct hpet_dev *devp; + int ret; devp = file->private_data; - return hpet_ioctl_common(devp, cmd, arg, 0); + lock_kernel(); + ret = hpet_ioctl_common(devp, cmd, arg, 0); + unlock_kernel(); + + return ret; } static int hpet_ioctl_ieon(struct hpet_dev *devp) @@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = { .llseek = no_llseek, .read = hpet_read, .poll = hpet_poll, - .ioctl = hpet_ioctl, + .unlocked_ioctl = hpet_ioctl, .open = hpet_open, .release = hpet_release, .fasync = hpet_fasync, diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 793b236c9266..d4b14ff1c4c1 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c @@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp) "HVSI_WAIT_FOR_MCTRL_RESPONSE", "HVSI_FSP_DIED", }; - const char *name = state_names[hp->state]; - - if (hp->state > ARRAY_SIZE(state_names)) - name = "UNKNOWN"; + const char *name = (hp->state < ARRAY_SIZE(state_names)) + ? state_names[hp->state] : "UNKNOWN"; pr_debug("hvsi%i: state = %s\n", hp->index, name); #endif /* DEBUG */ diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 10f868eefaa6..0f9cbf1aaf15 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op, np->hvapi_major); goto out_hvapi_unregister; } - np->num_units = of_getintprop_default(op->node, + np->num_units = of_getintprop_default(op->dev.of_node, "rng-#units", 0); if (!np->num_units) { dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); @@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = { MODULE_DEVICE_TABLE(of, n2rng_match); static struct of_platform_driver n2rng_driver = { - .name = "n2rng", - .match_table = n2rng_match, + .driver = { + .name = "n2rng", + .owner = THIS_MODULE, + .of_match_table = n2rng_match, + }, .probe = n2rng_probe, .remove = __devexit_p(n2rng_remove), }; diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index a8b4c4010144..a348c7e9aa0b 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -15,6 +15,10 @@ #include <linux/amba/bus.h> #include <linux/hw_random.h> #include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> + +static struct clk *rng_clk; static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { @@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) void __iomem *base; int ret; + rng_clk = clk_get(&dev->dev, NULL); + if (IS_ERR(rng_clk)) { + dev_err(&dev->dev, "could not get rng clock\n"); + ret = PTR_ERR(rng_clk); + return ret; + } + + clk_enable(rng_clk); + ret = amba_request_regions(dev, dev->dev.init_name); if (ret) return ret; @@ -57,6 +70,8 @@ out_unmap: iounmap(base); out_release: amba_release_regions(dev); + clk_disable(rng_clk); + clk_put(rng_clk); return ret; } @@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev) hwrng_unregister(&nmk_rng); iounmap(base); amba_release_regions(dev); + clk_disable(rng_clk); + clk_put(rng_clk); return 0; } diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 7fa61dd1d9d9..261ba8f22b8b 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev, const struct of_device_id *match) { void __iomem *rng_regs; - struct device_node *rng_np = ofdev->node; + struct device_node *rng_np = ofdev->dev.of_node; struct resource res; int err = 0; @@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = { }; static struct of_platform_driver rng_driver = { - .name = "pasemi-rng", - .match_table = rng_match, + .driver = { + .name = "pasemi-rng", + .owner = THIS_MODULE, + .of_match_table = rng_match, + }, .probe = rng_probe, .remove = rng_remove, }; diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 65545de3dbf4..d8ec92a38980 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user, return rv; } -static int ipmi_ioctl(struct inode *inode, - struct file *file, +static int ipmi_ioctl(struct file *file, unsigned int cmd, unsigned long data) { @@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode, return rv; } +/* + * Note: it doesn't make sense to take the BKL here but + * not in compat_ipmi_ioctl. -arnd + */ +static long ipmi_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long data) +{ + int ret; + + lock_kernel(); + ret = ipmi_ioctl(file, cmd, data); + unlock_kernel(); + + return ret; +} + #ifdef CONFIG_COMPAT /* @@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, if (copy_to_user(precv64, &recv64, sizeof(recv64))) return -EFAULT; - rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep, + rc = ipmi_ioctl(filep, ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) ? IPMICTL_RECEIVE_MSG : IPMICTL_RECEIVE_MSG_TRUNC), @@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return rc; } default: - return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg); + return ipmi_ioctl(filep, cmd, arg); } } #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, - .ioctl = ipmi_ioctl, + .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_ipmi_ioctl, #endif diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c6ad4234378d..4f3f8c9ec262 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, return rv; } - printk(KERN_INFO - "ipmi: Found new BMC (man_id: 0x%6.6x, " - " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", - bmc->id.manufacturer_id, - bmc->id.product_id, - bmc->id.device_id); + dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " + "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); } /* @@ -4037,8 +4036,8 @@ static void ipmi_request_event(void) static struct timer_list ipmi_timer; -/* Call every ~100 ms. */ -#define IPMI_TIMEOUT_TIME 100 +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 /* How many jiffies does it take to get to the timeout time. */ #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 4462b113ba3f..35603dd4e6c5 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -107,6 +107,14 @@ enum si_type { }; static char *si_to_str[] = { "kcs", "smic", "bt" }; +enum ipmi_addr_src { + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, + SI_PCI, SI_DEVICETREE, SI_DEFAULT +}; +static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", + "ACPI", "SMBIOS", "PCI", + "device-tree", "default" }; + #define DEVICE_NAME "ipmi_si" static struct platform_driver ipmi_driver = { @@ -188,7 +196,7 @@ struct smi_info { int (*irq_setup)(struct smi_info *info); void (*irq_cleanup)(struct smi_info *info); unsigned int io_size; - char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ + enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ void (*addr_source_cleanup)(struct smi_info *info); void *addr_source_data; @@ -300,6 +308,7 @@ static int num_max_busy_us; static int unload_when_empty = 1; +static int add_smi(struct smi_info *smi); static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *to_clean); @@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, { /* Deliver the message to the upper layer with the lock released. */ - spin_unlock(&(smi_info->si_lock)); - ipmi_smi_msg_received(smi_info->intf, msg); - spin_lock(&(smi_info->si_lock)); + + if (smi_info->run_to_completion) { + ipmi_smi_msg_received(smi_info->intf, msg); + } else { + spin_unlock(&(smi_info->si_lock)); + ipmi_smi_msg_received(smi_info->intf, msg); + spin_lock(&(smi_info->si_lock)); + } } static void return_hosed_msg(struct smi_info *smi_info, int cCode) @@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info) if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { start_disable_irq(smi_info); smi_info->interrupt_disabled = 1; + if (!atomic_read(&smi_info->stop_operation)) + mod_timer(&smi_info->si_timer, + jiffies + SI_TIMEOUT_JIFFIES); } } @@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info) smi_info->handlers->get_result(smi_info->si_sm, msg, 3); if (msg[2] != 0) { /* Error clearing flags */ - printk(KERN_WARNING - "ipmi_si: Error clearing flags: %2.2x\n", - msg[2]); + dev_warn(smi_info->dev, + "Error clearing flags: %2.2x\n", msg[2]); } if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) start_enable_irq(smi_info); @@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not enable interrupts" - ", failed get, using polled mode.\n"); + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed get, using polled mode.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not enable interrupts" - ", failed set, using polled mode.\n"); - } + if (msg[2] != 0) + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed set, using polled mode.\n"); + else + smi_info->interrupt_disabled = 0; smi_info->si_state = SI_NORMAL; break; } @@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not disable interrupts" - ", failed get.\n"); + dev_warn(smi_info->dev, "Could not disable interrupts" + ", failed get.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not disable interrupts" - ", failed set.\n"); + dev_warn(smi_info->dev, "Could not disable interrupts" + ", failed set.\n"); } smi_info->si_state = SI_NORMAL; break; @@ -877,6 +890,11 @@ static void sender(void *send_info, printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif + mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + if (smi_info->run_to_completion) { /* * If we are running to completion, then throw it in @@ -997,6 +1015,8 @@ static int ipmi_thread(void *data) ; /* do nothing */ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) schedule(); + else if (smi_result == SI_SM_IDLE) + schedule_timeout_interruptible(100); else schedule_timeout_interruptible(0); } @@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data) unsigned long flags; unsigned long jiffies_now; long time_diff; + long timeout; #ifdef DEBUG_TIMING struct timeval t; #endif @@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data) if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ - smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); - goto do_add_timer; + goto do_mod_timer; } /* @@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data) */ if (smi_result == SI_SM_CALL_WITH_DELAY) { smi_inc_stat(smi_info, short_timeouts); - smi_info->si_timer.expires = jiffies + 1; + timeout = jiffies + 1; } else { smi_inc_stat(smi_info, long_timeouts); - smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + timeout = jiffies + SI_TIMEOUT_JIFFIES; } - do_add_timer: - add_timer(&(smi_info->si_timer)); + do_mod_timer: + if (smi_result != SI_SM_IDLE) + mod_timer(&(smi_info->si_timer), timeout); } static irqreturn_t si_irq_handler(int irq, void *data) @@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info, new_smi->thread = kthread_run(ipmi_thread, new_smi, "kipmi%d", new_smi->intf_num); if (IS_ERR(new_smi->thread)) { - printk(KERN_NOTICE "ipmi_si_intf: Could not start" - " kernel thread due to error %ld, only using" - " timers to drive the interface\n", - PTR_ERR(new_smi->thread)); + dev_notice(new_smi->dev, "Could not start" + " kernel thread due to error %ld, only using" + " timers to drive the interface\n", + PTR_ERR(new_smi->thread)); new_smi->thread = NULL; } } @@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info) DEVICE_NAME, info); if (rv) { - printk(KERN_WARNING - "ipmi_si: %s unable to claim interrupt %d," - " running polled\n", - DEVICE_NAME, info->irq); + dev_warn(info->dev, "%s unable to claim interrupt %d," + " running polled\n", + DEVICE_NAME, info->irq); info->irq = 0; } else { info->irq_cleanup = std_irq_cleanup; - printk(" Using irq %d\n", info->irq); + dev_info(info->dev, "Using irq %d\n", info->irq); } return rv; @@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info) info->io.outputb = port_outl; break; default: - printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", - info->io.regsize); + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); return -EINVAL; } @@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info) break; #endif default: - printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", - info->io.regsize); + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); return -EINVAL; } @@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) goto out; } - info->addr_source = "hotmod"; + info->addr_source = SI_HOTMOD; info->si_type = si_type; info->io.addr_data = addr; info->io.addr_type = addr_space; @@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) info->irq_setup = std_irq_setup; info->slave_addr = ipmb; - try_smi_init(info); + if (!add_smi(info)) + if (try_smi_init(info)) + cleanup_one_si(info); } else { /* remove */ struct smi_info *e, *tmp_e; @@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void) if (!info) return; - info->addr_source = "hardcoded"; + info->addr_source = SI_HARDCODED; + printk(KERN_INFO PFX "probing via hardcoded address\n"); if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { info->si_type = SI_KCS; @@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void) } else if (strcmp(si_type[i], "bt") == 0) { info->si_type = SI_BT; } else { - printk(KERN_WARNING - "ipmi_si: Interface type specified " + printk(KERN_WARNING PFX "Interface type specified " "for interface %d, was invalid: %s\n", i, si_type[i]); kfree(info); @@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void) info->io.addr_data = addrs[i]; info->io.addr_type = IPMI_MEM_ADDR_SPACE; } else { - printk(KERN_WARNING - "ipmi_si: Interface type specified " - "for interface %d, " - "but port and address were not set or " - "set to zero.\n", i); + printk(KERN_WARNING PFX "Interface type specified " + "for interface %d, but port and address were " + "not set or set to zero.\n", i); kfree(info); continue; } @@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void) info->irq_setup = std_irq_setup; info->slave_addr = slave_addrs[i]; - try_smi_init(info); + if (!add_smi(info)) + if (try_smi_init(info)) + cleanup_one_si(info); } } @@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info) &ipmi_acpi_gpe, info); if (status != AE_OK) { - printk(KERN_WARNING - "ipmi_si: %s unable to claim ACPI GPE %d," - " running polled\n", - DEVICE_NAME, info->irq); + dev_warn(info->dev, "%s unable to claim ACPI GPE %d," + " running polled\n", DEVICE_NAME, info->irq); info->irq = 0; return -EINVAL; } else { info->irq_cleanup = acpi_gpe_irq_cleanup; - printk(" Using ACPI GPE %d\n", info->irq); + dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); return 0; } } @@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) u8 addr_space; if (spmi->IPMIlegacy != 1) { - printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); - return -ENODEV; + printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); + return -ENODEV; } if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) @@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); + printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); return -ENOMEM; } - info->addr_source = "SPMI"; + info->addr_source = SI_SPMI; + printk(KERN_INFO PFX "probing via SPMI\n"); /* Figure out the interface type. */ switch (spmi->InterfaceType) { @@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info->si_type = SI_BT; break; default: - printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", - spmi->InterfaceType); + printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", + spmi->InterfaceType); kfree(info); return -EIO; } @@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info->io.addr_type = IPMI_IO_ADDR_SPACE; } else { kfree(info); - printk(KERN_WARNING - "ipmi_si: Unknown ACPI I/O Address type\n"); + printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); return -EIO; } info->io.addr_data = spmi->addr.address; - try_smi_init(info); + add_smi(info); return 0; } @@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, { struct acpi_device *acpi_dev; struct smi_info *info; + struct resource *res; acpi_handle handle; acpi_status status; unsigned long long tmp; @@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, if (!info) return -ENOMEM; - info->addr_source = "ACPI"; + info->addr_source = SI_ACPI; + printk(KERN_INFO PFX "probing via ACPI\n"); handle = acpi_dev->handle; @@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->si_type = SI_BT; break; default: - dev_info(&dev->dev, "unknown interface type %lld\n", tmp); + dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); goto err_free; } - if (pnp_port_valid(dev, 0)) { + res = pnp_get_resource(dev, IORESOURCE_IO, 0); + if (res) { info->io_setup = port_setup; info->io.addr_type = IPMI_IO_ADDR_SPACE; - info->io.addr_data = pnp_port_start(dev, 0); - } else if (pnp_mem_valid(dev, 0)) { - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - info->io.addr_data = pnp_mem_start(dev, 0); } else { + res = pnp_get_resource(dev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + } + if (!res) { dev_err(&dev->dev, "no I/O or memory address\n"); goto err_free; } + info->io.addr_data = res->start; info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = DEFAULT_REGSPACING; @@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->irq_setup = std_irq_setup; } - info->dev = &acpi_dev->dev; + info->dev = &dev->dev; pnp_set_drvdata(dev, info); - return try_smi_init(info); + dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", + res, info->io.regsize, info->io.regspacing, + info->irq); + + return add_smi(info); err_free: kfree(info); @@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - printk(KERN_ERR - "ipmi_si: Could not allocate SI data\n"); + printk(KERN_ERR PFX "Could not allocate SI data\n"); return; } - info->addr_source = "SMBIOS"; + info->addr_source = SI_SMBIOS; + printk(KERN_INFO PFX "probing via SMBIOS\n"); switch (ipmi_data->type) { case 0x01: /* KCS */ @@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) default: kfree(info); - printk(KERN_WARNING - "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", + printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", ipmi_data->addr_space); return; } @@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) if (info->irq) info->irq_setup = std_irq_setup; - try_smi_init(info); + add_smi(info); } static void __devinit dmi_find_bmc(void) @@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, if (!info) return -ENOMEM; - info->addr_source = "PCI"; + info->addr_source = SI_PCI; + dev_info(&pdev->dev, "probing via PCI"); switch (class_type) { case PCI_ERMC_CLASSCODE_TYPE_SMIC: @@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, default: kfree(info); - printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", - pci_name(pdev), class_type); + dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); return -ENOMEM; } rv = pci_enable_device(pdev); if (rv) { - printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", - pci_name(pdev)); + dev_err(&pdev->dev, "couldn't enable PCI device\n"); kfree(info); return rv; } @@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, info->dev = &pdev->dev; pci_set_drvdata(pdev, info); - return try_smi_init(info); + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], info->io.regsize, info->io.regspacing, + info->irq); + + return add_smi(info); } static void __devexit ipmi_pci_remove(struct pci_dev *pdev) @@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev, struct smi_info *info; struct resource resource; const int *regsize, *regspacing, *regshift; - struct device_node *np = dev->node; + struct device_node *np = dev->dev.of_node; int ret; int proplen; - dev_info(&dev->dev, PFX "probing via device tree\n"); + dev_info(&dev->dev, "probing via device tree\n"); ret = of_address_to_resource(np, 0, &resource); if (ret) { @@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev, if (!info) { dev_err(&dev->dev, - PFX "could not allocate memory for OF probe\n"); + "could not allocate memory for OF probe\n"); return -ENOMEM; } info->si_type = (enum si_type) match->data; - info->addr_source = "device-tree"; + info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; if (resource.flags & IORESOURCE_IO) { @@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev, info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING; info->io.regshift = regshift ? *regshift : 0; - info->irq = irq_of_parse_and_map(dev->node, 0); + info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); info->dev = &dev->dev; - dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", + dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", info->io.addr_data, info->io.regsize, info->io.regspacing, info->irq); dev_set_drvdata(&dev->dev, info); - return try_smi_init(info); + return add_smi(info); } static int __devexit ipmi_of_remove(struct of_device *dev) @@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] = }; static struct of_platform_driver ipmi_of_platform_driver = { - .name = "ipmi", - .match_table = ipmi_match, + .driver = { + .name = "ipmi", + .owner = THIS_MODULE, + .of_match_table = ipmi_match, + }, .probe = ipmi_of_probe, .remove = __devexit_p(ipmi_of_remove), }; @@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING - "ipmi_si: Error getting response from get global," - " enables command, the event buffer is not" + printk(KERN_WARNING PFX "Error getting response from get" + " global enables command, the event buffer is not" " enabled.\n"); goto out; } @@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Invalid return from get global" - " enables command, cannot enable the event" - " buffer.\n"); + printk(KERN_WARNING PFX "Invalid return from get global" + " enables command, cannot enable the event buffer.\n"); rv = -EINVAL; goto out; } @@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING - "ipmi_si: Error getting response from set global," - " enables command, the event buffer is not" + printk(KERN_WARNING PFX "Error getting response from set" + " global, enables command, the event buffer is not" " enabled.\n"); goto out; } @@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { - printk(KERN_WARNING - "ipmi_si: Invalid return from get global," - "enables command, not enable the event" - " buffer.\n"); + printk(KERN_WARNING PFX "Invalid return from get global," + "enables command, not enable the event buffer.\n"); rv = -EINVAL; goto out; } @@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void) if (!info) return; - info->addr_source = NULL; + info->addr_source = SI_DEFAULT; info->si_type = ipmi_defaults[i].type; info->io_setup = port_setup; @@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void) info->io.regsize = DEFAULT_REGSPACING; info->io.regshift = 0; - if (try_smi_init(info) == 0) { - /* Found one... */ - printk(KERN_INFO "ipmi_si: Found default %s state" - " machine at %s address 0x%lx\n", - si_to_str[info->si_type], - addr_space_to_str[info->io.addr_type], - info->io.addr_data); - return; + if (add_smi(info) == 0) { + if ((try_smi_init(info)) == 0) { + /* Found one... */ + printk(KERN_INFO PFX "Found default %s" + " state machine at %s address 0x%lx\n", + si_to_str[info->si_type], + addr_space_to_str[info->io.addr_type], + info->io.addr_data); + } else + cleanup_one_si(info); } } } @@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info) return 1; } -static int try_smi_init(struct smi_info *new_smi) +static int add_smi(struct smi_info *new_smi) { - int rv; - int i; - - if (new_smi->addr_source) { - printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" - " machine at %s address 0x%lx, slave address 0x%x," - " irq %d\n", - new_smi->addr_source, - si_to_str[new_smi->si_type], - addr_space_to_str[new_smi->io.addr_type], - new_smi->io.addr_data, - new_smi->slave_addr, new_smi->irq); - } + int rv = 0; + printk(KERN_INFO PFX "Adding %s-specified %s state machine", + ipmi_addr_src_to_str[new_smi->addr_source], + si_to_str[new_smi->si_type]); mutex_lock(&smi_infos_lock); if (!is_new_interface(new_smi)) { - printk(KERN_WARNING "ipmi_si: duplicate interface\n"); + printk(KERN_CONT PFX "duplicate interface\n"); rv = -EBUSY; goto out_err; } + printk(KERN_CONT "\n"); + /* So we know not to free it unless we have allocated one. */ new_smi->intf = NULL; new_smi->si_sm = NULL; new_smi->handlers = NULL; + list_add_tail(&new_smi->link, &smi_infos); + +out_err: + mutex_unlock(&smi_infos_lock); + return rv; +} + +static int try_smi_init(struct smi_info *new_smi) +{ + int rv = 0; + int i; + + printk(KERN_INFO PFX "Trying %s-specified %s state" + " machine at %s address 0x%lx, slave address 0x%x," + " irq %d\n", + ipmi_addr_src_to_str[new_smi->addr_source], + si_to_str[new_smi->si_type], + addr_space_to_str[new_smi->io.addr_type], + new_smi->io.addr_data, + new_smi->slave_addr, new_smi->irq); + switch (new_smi->si_type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; @@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi) /* Allocate the state machine's data and initialize it. */ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); if (!new_smi->si_sm) { - printk(KERN_ERR "Could not allocate state machine memory\n"); + printk(KERN_ERR PFX + "Could not allocate state machine memory\n"); rv = -ENOMEM; goto out_err; } @@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi) /* Now that we know the I/O size, we can set up the I/O. */ rv = new_smi->io_setup(new_smi); if (rv) { - printk(KERN_ERR "Could not set up I/O space\n"); + printk(KERN_ERR PFX "Could not set up I/O space\n"); goto out_err; } @@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi) /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->addr_source) - printk(KERN_INFO "ipmi_si: Interface detection" - " failed\n"); + printk(KERN_INFO PFX "Interface detection failed\n"); rv = -ENODEV; goto out_err; } @@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi) rv = try_get_dev_id(new_smi); if (rv) { if (new_smi->addr_source) - printk(KERN_INFO "ipmi_si: There appears to be no BMC" + printk(KERN_INFO PFX "There appears to be no BMC" " at this location\n"); goto out_err; } @@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi) for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); - new_smi->interrupt_disabled = 0; + new_smi->interrupt_disabled = 1; atomic_set(&new_smi->stop_operation, 0); new_smi->intf_num = smi_num; smi_num++; @@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi) new_smi->pdev = platform_device_alloc("ipmi_si", new_smi->intf_num); if (!new_smi->pdev) { - printk(KERN_ERR - "ipmi_si_intf:" - " Unable to allocate platform device\n"); + printk(KERN_ERR PFX + "Unable to allocate platform device\n"); goto out_err; } new_smi->dev = &new_smi->pdev->dev; @@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi) rv = platform_device_add(new_smi->pdev); if (rv) { - printk(KERN_ERR - "ipmi_si_intf:" - " Unable to register system interface device:" + printk(KERN_ERR PFX + "Unable to register system interface device:" " %d\n", rv); goto out_err; @@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi) "bmc", new_smi->slave_addr); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to register device: error %d\n", - rv); + dev_err(new_smi->dev, "Unable to register device: error %d\n", + rv); goto out_err_stop_timer; } @@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi) type_file_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } @@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi) stat_file_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } @@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi) param_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } - list_add_tail(&new_smi->link, &smi_infos); - - mutex_unlock(&smi_infos_lock); - - printk(KERN_INFO "IPMI %s interface initialized\n", - si_to_str[new_smi->si_type]); + dev_info(new_smi->dev, "IPMI %s interface initialized\n", + si_to_str[new_smi->si_type]); return 0; @@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi) wait_for_timer_and_thread(new_smi); out_err: - if (new_smi->intf) + new_smi->interrupt_disabled = 1; + + if (new_smi->intf) { ipmi_unregister_smi(new_smi->intf); + new_smi->intf = NULL; + } - if (new_smi->irq_cleanup) + if (new_smi->irq_cleanup) { new_smi->irq_cleanup(new_smi); + new_smi->irq_cleanup = NULL; + } /* * Wait until we know that we are out of any interrupt @@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi) if (new_smi->handlers) new_smi->handlers->cleanup(new_smi->si_sm); kfree(new_smi->si_sm); + new_smi->si_sm = NULL; } - if (new_smi->addr_source_cleanup) + if (new_smi->addr_source_cleanup) { new_smi->addr_source_cleanup(new_smi); - if (new_smi->io_cleanup) + new_smi->addr_source_cleanup = NULL; + } + if (new_smi->io_cleanup) { new_smi->io_cleanup(new_smi); + new_smi->io_cleanup = NULL; + } - if (new_smi->dev_registered) + if (new_smi->dev_registered) { platform_device_unregister(new_smi->pdev); - - kfree(new_smi); - - mutex_unlock(&smi_infos_lock); + new_smi->dev_registered = 0; + } return rv; } @@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void) int i; char *str; int rv; + struct smi_info *e; + enum ipmi_addr_src type = SI_INVALID; if (initialized) return 0; @@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void) /* Register the device drivers. */ rv = driver_register(&ipmi_driver.driver); if (rv) { - printk(KERN_ERR - "init_ipmi_si: Unable to register driver: %d\n", - rv); + printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); return rv; } @@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void) hardcode_find_bmc(); -#ifdef CONFIG_DMI - dmi_find_bmc(); -#endif + /* If the user gave us a device, they presumably want us to use it */ + mutex_lock(&smi_infos_lock); + if (!list_empty(&smi_infos)) { + mutex_unlock(&smi_infos_lock); + return 0; + } + mutex_unlock(&smi_infos_lock); -#ifdef CONFIG_ACPI - spmi_find_bmc(); +#ifdef CONFIG_PCI + rv = pci_register_driver(&ipmi_pci_driver); + if (rv) + printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); #endif + #ifdef CONFIG_ACPI pnp_register_driver(&ipmi_pnp_driver); #endif -#ifdef CONFIG_PCI - rv = pci_register_driver(&ipmi_pci_driver); - if (rv) - printk(KERN_ERR - "init_ipmi_si: Unable to register PCI driver: %d\n", - rv); +#ifdef CONFIG_DMI + dmi_find_bmc(); +#endif + +#ifdef CONFIG_ACPI + spmi_find_bmc(); #endif #ifdef CONFIG_PPC_OF of_register_platform_driver(&ipmi_of_platform_driver); #endif + /* We prefer devices with interrupts, but in the case of a machine + with multiple BMCs we assume that there will be several instances + of a given type so if we succeed in registering a type then also + try to register everything else of the same type */ + + mutex_lock(&smi_infos_lock); + list_for_each_entry(e, &smi_infos, link) { + /* Try to register a device if it has an IRQ and we either + haven't successfully registered a device yet or this + device has the same type as one we successfully registered */ + if (e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + + /* type will only have been set if we successfully registered an si */ + if (type) { + mutex_unlock(&smi_infos_lock); + return 0; + } + + /* Fall back to the preferred device */ + + list_for_each_entry(e, &smi_infos, link) { + if (!e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + mutex_unlock(&smi_infos_lock); + + if (type) + return 0; + if (si_trydefaults) { mutex_lock(&smi_infos_lock); if (list_empty(&smi_infos)) { /* No BMC was found, try defaults. */ mutex_unlock(&smi_infos_lock); default_find_bmc(); - } else { + } else mutex_unlock(&smi_infos_lock); - } } mutex_lock(&smi_infos_lock); @@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void) of_unregister_platform_driver(&ipmi_of_platform_driver); #endif driver_unregister(&ipmi_driver.driver); - printk(KERN_WARNING - "ipmi_si: Unable to find any System Interface(s)\n"); + printk(KERN_WARNING PFX + "Unable to find any System Interface(s)\n"); return -ENODEV; } else { mutex_unlock(&smi_infos_lock); @@ -3317,7 +3402,7 @@ module_init(init_ipmi_si); static void cleanup_one_si(struct smi_info *to_clean) { - int rv; + int rv = 0; unsigned long flags; if (!to_clean) @@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean) schedule_timeout_uninterruptible(1); } - rv = ipmi_unregister_smi(to_clean->intf); + if (to_clean->intf) + rv = ipmi_unregister_smi(to_clean->intf); + if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to unregister device: errno=%d\n", + printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", rv); } - to_clean->handlers->cleanup(to_clean->si_sm); + if (to_clean->handlers) + to_clean->handlers->cleanup(to_clean->si_sm); kfree(to_clean->si_sm); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index a4d57e31f713..82bcdb262a3a 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -659,7 +659,7 @@ static struct watchdog_info ident = { .identity = "IPMI" }; -static int ipmi_ioctl(struct inode *inode, struct file *file, +static int ipmi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, } } +static long ipmi_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = ipmi_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + static ssize_t ipmi_write(struct file *file, const char __user *buf, size_t len, @@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = { .read = ipmi_read, .poll = ipmi_poll, .write = ipmi_write, - .ioctl = ipmi_ioctl, + .unlocked_ioctl = ipmi_unlocked_ioctl, .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 92ab03d28294..cd650ca8c679 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file) old_fops = file->f_op; file->f_op = new_fops; if (file->f_op->open) { + file->private_data = c; err=file->f_op->open(inode,file); if (err) { fops_put(file->f_op); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 47e8f7b0e4c1..66d2917b003f 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -296,8 +296,8 @@ checksum_err: return -EIO; } -static int nvram_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long nvram_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int i; @@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file, if (!capable(CAP_SYS_ADMIN)) return -EACCES; + lock_kernel(); spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) @@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file, __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); + unlock_kernel(); return 0; case NVRAM_SETCKS: @@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file, if (!capable(CAP_SYS_ADMIN)) return -EACCES; + lock_kernel(); spin_lock_irq(&rtc_lock); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); + unlock_kernel(); return 0; default: @@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = { .llseek = nvram_llseek, .read = nvram_read, .write = nvram_write, - .ioctl = nvram_ioctl, + .unlocked_ioctl = nvram_ioctl, .open = nvram_open, .release = nvram_release, }; diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index f80810901db6..043a1c7b86be 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -94,8 +94,9 @@ static int get_flash_id(void) return c2; } -static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg) +static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { + lock_kernel(); switch (cmd) { case CMD_WRITE_DISABLE: gbWriteBase64Enable = 0; @@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm default: gbWriteBase64Enable = 0; gbWriteEnable = 0; + unlock_kernel(); return -EINVAL; } + unlock_kernel(); return 0; } @@ -631,7 +634,7 @@ static const struct file_operations flash_fops = .llseek = flash_llseek, .read = flash_read, .write = flash_write, - .ioctl = flash_ioctl, + .unlocked_ioctl = flash_ioctl, }; static struct miscdevice flash_miscdev = diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index fdd37543aa79..02abfddce45a 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp) char *name; int fl; - name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; - sprintf (name, CHRDEV "%x", minor); - port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c index 606048b72bcf..85c004a518ee 100644 --- a/drivers/char/ps3flash.c +++ b/drivers/char/ps3flash.c @@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id) return ps3flash_writeback(ps3flash_dev); } -static int ps3flash_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int ps3flash_fsync(struct file *file, int datasync) { return ps3flash_writeback(ps3flash_dev); } diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c new file mode 100644 index 000000000000..74f00b5ffa36 --- /dev/null +++ b/drivers/char/ramoops.c @@ -0,0 +1,162 @@ +/* + * RAM Oops/Panic logger + * + * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kmsg_dump.h> +#include <linux/time.h> +#include <linux/io.h> +#include <linux/ioport.h> + +#define RAMOOPS_KERNMSG_HDR "====" +#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval)) + +#define RECORD_SIZE 4096 + +static ulong mem_address; +module_param(mem_address, ulong, 0400); +MODULE_PARM_DESC(mem_address, + "start of reserved RAM used to store oops/panic logs"); + +static ulong mem_size; +module_param(mem_size, ulong, 0400); +MODULE_PARM_DESC(mem_size, + "size of reserved RAM used to store oops/panic logs"); + +static int dump_oops = 1; +module_param(dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + +static struct ramoops_context { + struct kmsg_dumper dump; + void *virt_addr; + phys_addr_t phys_addr; + unsigned long size; + int count; + int max_count; +} oops_cxt; + +static void ramoops_do_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, const char *s1, unsigned long l1, + const char *s2, unsigned long l2) +{ + struct ramoops_context *cxt = container_of(dumper, + struct ramoops_context, dump); + unsigned long s1_start, s2_start; + unsigned long l1_cpy, l2_cpy; + int res; + char *buf; + struct timeval timestamp; + + /* Only dump oopses if dump_oops is set */ + if (reason == KMSG_DUMP_OOPS && !dump_oops) + return; + + buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); + memset(buf, '\0', RECORD_SIZE); + res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); + buf += res; + do_gettimeofday(×tamp); + res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec); + buf += res; + + l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE)); + l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy); + + s2_start = l2 - l2_cpy; + s1_start = l1 - l1_cpy; + + memcpy(buf, s1 + s1_start, l1_cpy); + memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy); + + cxt->count = (cxt->count + 1) % cxt->max_count; +} + +static int __init ramoops_init(void) +{ + struct ramoops_context *cxt = &oops_cxt; + int err = -EINVAL; + + if (!mem_size) { + printk(KERN_ERR "ramoops: invalid size specification"); + goto fail3; + } + + rounddown_pow_of_two(mem_size); + + if (mem_size < RECORD_SIZE) { + printk(KERN_ERR "ramoops: size too small"); + goto fail3; + } + + cxt->max_count = mem_size / RECORD_SIZE; + cxt->count = 0; + cxt->size = mem_size; + cxt->phys_addr = mem_address; + + if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { + printk(KERN_ERR "ramoops: request mem region failed"); + err = -EINVAL; + goto fail3; + } + + cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); + if (!cxt->virt_addr) { + printk(KERN_ERR "ramoops: ioremap failed"); + goto fail2; + } + + cxt->dump.dump = ramoops_do_dump; + err = kmsg_dump_register(&cxt->dump); + if (err) { + printk(KERN_ERR "ramoops: registering kmsg dumper failed"); + goto fail1; + } + + return 0; + +fail1: + iounmap(cxt->virt_addr); +fail2: + release_mem_region(cxt->phys_addr, cxt->size); +fail3: + return err; +} + +static void __exit ramoops_exit(void) +{ + struct ramoops_context *cxt = &oops_cxt; + + if (kmsg_dump_unregister(&cxt->dump) < 0) + printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); + + iounmap(cxt->virt_addr); + release_mem_region(cxt->phys_addr, cxt->size); +} + + +module_init(ramoops_init); +module_exit(ramoops_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); +MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 8756ab0daa8b..b38942f6bf31 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp) /* * Forward ioctls to the underlying block device. */ -static int -raw_ioctl(struct inode *inode, struct file *filp, - unsigned int command, unsigned long arg) +static long +raw_ioctl(struct file *filp, unsigned int command, unsigned long arg) { struct block_device *bdev = filp->private_data; + int ret; + + lock_kernel(); + ret = blkdev_ioctl(bdev, 0, command, arg); + unlock_kernel(); - return blkdev_ioctl(bdev, 0, command, arg); + return ret; } static void bind_device(struct raw_config_request *rq) @@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq) * Deal with ioctls against the raw-device control interface, to bind * and unbind other raw devices. */ -static int raw_ctl_ioctl(struct inode *inode, struct file *filp, - unsigned int command, unsigned long arg) +static long raw_ctl_ioctl(struct file *filp, unsigned int command, + unsigned long arg) { struct raw_config_request rq; struct raw_device_data *rawdev; int err = 0; + lock_kernel(); switch (command) { case RAW_SETBIND: case RAW_GETBIND: @@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, break; } out: + unlock_kernel(); return err; } static const struct file_operations raw_fops = { - .read = do_sync_read, - .aio_read = generic_file_aio_read, - .write = do_sync_write, - .aio_write = blkdev_aio_write, - .fsync = blkdev_fsync, - .open = raw_open, - .release= raw_release, - .ioctl = raw_ioctl, - .owner = THIS_MODULE, + .read = do_sync_read, + .aio_read = generic_file_aio_read, + .write = do_sync_write, + .aio_write = blkdev_aio_write, + .fsync = blkdev_fsync, + .open = raw_open, + .release = raw_release, + .unlocked_ioctl = raw_ioctl, + .owner = THIS_MODULE, }; static const struct file_operations raw_ctl_fops = { - .ioctl = raw_ctl_ioctl, - .open = raw_open, - .owner = THIS_MODULE, + .unlocked_ioctl = raw_ctl_ioctl, + .open = raw_open, + .owner = THIS_MODULE, }; static struct cdev raw_cdev; diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index 1144a04cda6e..42f7fa442ff8 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c @@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) { int i = vdev->unit_address; int j; - struct device_node *node = vdev->dev.archdata.of_node; + struct device_node *node = vdev->dev.of_node; if (i >= VIOTAPE_MAX_TAPE) return -ENODEV; diff --git a/drivers/char/vt.c b/drivers/char/vt.c index bd1d1164fec5..7cdb6ee569cd 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) font.charcount = op->charcount; font.height = op->height; font.width = op->width; - font.data = kmalloc(size, GFP_KERNEL); - if (!font.data) - return -ENOMEM; - if (copy_from_user(font.data, op->data, size)) { - kfree(font.data); - return -EFAULT; - } + font.data = memdup_user(op->data, size); + if (IS_ERR(font.data)) + return PTR_ERR(font.data); acquire_console_sem(); if (vc->vc_sw->con_font_set) rc = vc->vc_sw->con_font_set(vc, &font, op->flags); diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 7261b8d9087c..ed8a9cec2a05 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match) dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match); - rc = of_address_to_resource(op->node, 0, &res); + rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); return rc; } - id = of_get_property(op->node, "port-number", NULL); + id = of_get_property(op->dev.of_node, "port-number", NULL); /* It's most likely that we're using V4, if the family is not specified */ regs = &v4_config_registers; - family = of_get_property(op->node, "xlnx,family", NULL); + family = of_get_property(op->dev.of_node, "xlnx,family", NULL); if (family) { if (!strcmp(family, "virtex2p")) { @@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = { MODULE_DEVICE_TABLE(of, hwicap_of_match); static struct of_platform_driver hwicap_of_driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = hwicap_of_match, .probe = hwicap_of_probe, .remove = __devexit_p(hwicap_of_remove), .driver = { .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = hwicap_of_match, }, }; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 12fdd3987a36..199488576a05 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) if (dev->enabled) return 0; - if (!cpuidle_curr_driver || !cpuidle_curr_governor) + if (!cpuidle_get_driver() || !cpuidle_curr_governor) return -EIO; if (!dev->state_count) return -EINVAL; @@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) { if (!dev->enabled) return; - if (!cpuidle_curr_driver || !cpuidle_curr_governor) + if (!cpuidle_get_driver() || !cpuidle_curr_governor) return; dev->enabled = 0; @@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) { int ret; struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); if (!sys_dev) return -EINVAL; - if (!try_module_get(cpuidle_curr_driver->owner)) + if (!try_module_get(cpuidle_driver->owner)) return -EINVAL; init_completion(&dev->kobj_unregister); @@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); if ((ret = cpuidle_add_sysfs(sys_dev))) { - module_put(cpuidle_curr_driver->owner); + module_put(cpuidle_driver->owner); return ret; } @@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); void cpuidle_unregister_device(struct cpuidle_device *dev) { struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); if (dev->registered == 0) return; @@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) cpuidle_resume_and_unlock(); - module_put(cpuidle_curr_driver->owner); + module_put(cpuidle_driver->owner); } EXPORT_SYMBOL_GPL(cpuidle_unregister_device); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 9476ba33ee2c..33e50d556f17 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -9,7 +9,6 @@ /* For internal use only */ extern struct cpuidle_governor *cpuidle_curr_governor; -extern struct cpuidle_driver *cpuidle_curr_driver; extern struct list_head cpuidle_governors; extern struct list_head cpuidle_detected_devices; extern struct mutex cpuidle_lock; diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 2257004fe33d..fd1601e3d125 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -14,7 +14,7 @@ #include "cpuidle.h" -struct cpuidle_driver *cpuidle_curr_driver; +static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); /** @@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** + * cpuidle_get_driver - return the current driver + */ +struct cpuidle_driver *cpuidle_get_driver(void) +{ + return cpuidle_curr_driver; +} +EXPORT_SYMBOL_GPL(cpuidle_get_driver); + +/** * cpuidle_unregister_driver - unregisters a driver * @drv: the driver */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (!drv) + if (drv != cpuidle_curr_driver) { + WARN(1, "invalid cpuidle_unregister_driver(%s)\n", + drv->name); return; + } spin_lock(&cpuidle_driver_lock); cpuidle_curr_driver = NULL; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b81ad9c731ae..52ff8aa63f84 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -21,9 +21,12 @@ #include <linux/math64.h> #define BUCKETS 12 +#define INTERVALS 8 #define RESOLUTION 1024 -#define DECAY 4 +#define DECAY 8 #define MAX_INTERESTING 50000 +#define STDDEV_THRESH 400 + /* * Concepts and ideas behind the menu governor @@ -64,6 +67,16 @@ * indexed based on the magnitude of the expected duration as well as the * "is IO outstanding" property. * + * Repeatable-interval-detector + * ---------------------------- + * There are some cases where "next timer" is a completely unusable predictor: + * Those cases where the interval is fixed, for example due to hardware + * interrupt mitigation, but also due to fixed transfer rate devices such as + * mice. + * For this, we use a different predictor: We track the duration of the last 8 + * intervals and if the stand deviation of these 8 intervals is below a + * threshold value, we use the average of these intervals as prediction. + * * Limiting Performance Impact * --------------------------- * C states, especially those with large exit latencies, can have a real @@ -104,6 +117,8 @@ struct menu_device { unsigned int exit_us; unsigned int bucket; u64 correction_factor[BUCKETS]; + u32 intervals[INTERVALS]; + int interval_ptr; }; @@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor) return div_u64(dividend + (divisor / 2), divisor); } +/* + * Try detecting repeating patterns by keeping track of the last 8 + * intervals, and checking if the standard deviation of that set + * of points is below a threshold. If it is... then use the + * average of these 8 points as the estimated value. + */ +static void detect_repeating_patterns(struct menu_device *data) +{ + int i; + uint64_t avg = 0; + uint64_t stddev = 0; /* contains the square of the std deviation */ + + /* first calculate average and standard deviation of the past */ + for (i = 0; i < INTERVALS; i++) + avg += data->intervals[i]; + avg = avg / INTERVALS; + + /* if the avg is beyond the known next tick, it's worthless */ + if (avg > data->expected_us) + return; + + for (i = 0; i < INTERVALS; i++) + stddev += (data->intervals[i] - avg) * + (data->intervals[i] - avg); + + stddev = stddev / INTERVALS; + + /* + * now.. if stddev is small.. then assume we have a + * repeating pattern and predict we keep doing this. + */ + + if (avg && stddev < STDDEV_THRESH) + data->predicted_us = avg; +} + /** * menu_select - selects the next idle state to enter * @dev: the CPU @@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev) data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], RESOLUTION * DECAY); + detect_repeating_patterns(data); + /* * We want to default to C1 (hlt), not to busy polling * unless the timer is happening really really soon. @@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev) new_factor = 1; data->correction_factor[data->bucket] = new_factor; + + /* update the repeating-pattern data */ + data->intervals[data->interval_ptr++] = last_idle_us; + if (data->interval_ptr >= INTERVALS) + data->interval_ptr = 0; } /** diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0ba9c8b8ee74..0310ffaec9df 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class, char *buf) { ssize_t ret; + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); spin_lock(&cpuidle_driver_lock); - if (cpuidle_curr_driver) - ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); + if (cpuidle_driver) + ret = sprintf(buf, "%s\n", cpuidle_driver->name); else ret = sprintf(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 6c4c8b7ce3aa..9d65b371de64 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = { }; static struct of_platform_driver crypto4xx_driver = { - .name = "crypto4xx", - .match_table = crypto4xx_match, + .driver = { + .name = "crypto4xx", + .owner = THIS_MODULE, + .of_match_table = crypto4xx_match, + }, .probe = crypto4xx_probe, .remove = crypto4xx_remove, }; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6a0f59d1fc5c..637c105f53d2 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2398,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct talitos_private *priv; const unsigned int *prop; int i, err; @@ -2573,8 +2573,11 @@ static const struct of_device_id talitos_match[] = { MODULE_DEVICE_TABLE(of, talitos_match); static struct of_platform_driver talitos_driver = { - .name = "talitos", - .match_table = talitos_match, + .driver = { + .name = "talitos", + .owner = THIS_MODULE, + .of_match_table = talitos_match, + }, .probe = talitos_probe, .remove = talitos_remove, }; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dab6f17fbbc7..8344375dc015 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -166,6 +166,15 @@ config TIMB_DMA config ARCH_HAS_ASYNC_TX_FIND_CHANNEL bool +config PL330_DMA + tristate "DMA API Driver for PL330" + select DMA_ENGINE + depends on PL330 + help + Select if your platform has one or more PL330 DMACs. + You need to provide platform specific settings via + platform_data for a dma-pl330 device. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 20881426c1ac..0fe5ebbfda5d 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o +obj-$(CONFIG_PL330_DMA) += pl330.o diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 1fdf180cbd67..8088b14ba5f7 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1315,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op, INIT_LIST_HEAD(&fdev->common.channels); /* ioremap the registers for use */ - fdev->regs = of_iomap(op->node, 0); + fdev->regs = of_iomap(op->dev.of_node, 0); if (!fdev->regs) { dev_err(&op->dev, "unable to ioremap registers\n"); err = -ENOMEM; @@ -1323,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op, } /* map the channel IRQ if it exists, but don't hookup the handler yet */ - fdev->irq = irq_of_parse_and_map(op->node, 0); + fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); @@ -1345,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op, * of_platform_bus_remove(). Instead, we manually instantiate every DMA * channel object. */ - for_each_child_of_node(op->node, child) { + for_each_child_of_node(op->dev.of_node, child) { if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, @@ -1411,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = { }; static struct of_platform_driver fsldma_of_driver = { - .name = "fsl-elo-dma", - .match_table = fsldma_of_ids, - .probe = fsldma_of_probe, - .remove = fsldma_of_remove, + .driver = { + .name = "fsl-elo-dma", + .owner = THIS_MODULE, + .of_match_table = fsldma_of_ids, + }, + .probe = fsldma_of_probe, + .remove = fsldma_of_remove, }; /*----------------------------------------------------------------------------*/ diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c new file mode 100644 index 000000000000..7c50f6dfd3f4 --- /dev/null +++ b/drivers/dma/pl330.c @@ -0,0 +1,866 @@ +/* linux/drivers/dma/pl330.c + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh <jassi.brar@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/io.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/amba/bus.h> +#include <linux/amba/pl330.h> + +#define NR_DEFAULT_DESC 16 + +enum desc_status { + /* In the DMAC pool */ + FREE, + /* + * Allocted to some channel during prep_xxx + * Also may be sitting on the work_list. + */ + PREP, + /* + * Sitting on the work_list and already submitted + * to the PL330 core. Not more than two descriptors + * of a channel can be BUSY at any time. + */ + BUSY, + /* + * Sitting on the channel work_list but xfer done + * by PL330 core + */ + DONE, +}; + +struct dma_pl330_chan { + /* Schedule desc completion */ + struct tasklet_struct task; + + /* DMA-Engine Channel */ + struct dma_chan chan; + + /* Last completed cookie */ + dma_cookie_t completed; + + /* List of to be xfered descriptors */ + struct list_head work_list; + + /* Pointer to the DMAC that manages this channel, + * NULL if the channel is available to be acquired. + * As the parent, this DMAC also provides descriptors + * to the channel. + */ + struct dma_pl330_dmac *dmac; + + /* To protect channel manipulation */ + spinlock_t lock; + + /* Token of a hardware channel thread of PL330 DMAC + * NULL if the channel is available to be acquired. + */ + void *pl330_chid; +}; + +struct dma_pl330_dmac { + struct pl330_info pif; + + /* DMA-Engine Device */ + struct dma_device ddma; + + /* Pool of descriptors available for the DMAC's channels */ + struct list_head desc_pool; + /* To protect desc_pool manipulation */ + spinlock_t pool_lock; + + /* Peripheral channels connected to this DMAC */ + struct dma_pl330_chan peripherals[0]; /* keep at end */ +}; + +struct dma_pl330_desc { + /* To attach to a queue as child */ + struct list_head node; + + /* Descriptor for the DMA Engine API */ + struct dma_async_tx_descriptor txd; + + /* Xfer for PL330 core */ + struct pl330_xfer px; + + struct pl330_reqcfg rqcfg; + struct pl330_req req; + + enum desc_status status; + + /* The channel which currently holds this desc */ + struct dma_pl330_chan *pchan; +}; + +static inline struct dma_pl330_chan * +to_pchan(struct dma_chan *ch) +{ + if (!ch) + return NULL; + + return container_of(ch, struct dma_pl330_chan, chan); +} + +static inline struct dma_pl330_desc * +to_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct dma_pl330_desc, txd); +} + +static inline void free_desc_list(struct list_head *list) +{ + struct dma_pl330_dmac *pdmac; + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch; + unsigned long flags; + + if (list_empty(list)) + return; + + /* Finish off the work list */ + list_for_each_entry(desc, list, node) { + dma_async_tx_callback callback; + void *param; + + /* All desc in a list belong to same channel */ + pch = desc->pchan; + callback = desc->txd.callback; + param = desc->txd.callback_param; + + if (callback) + callback(param); + + desc->pchan = NULL; + } + + pdmac = pch->dmac; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + list_splice_tail_init(list, &pdmac->desc_pool); + spin_unlock_irqrestore(&pdmac->pool_lock, flags); +} + +static inline void fill_queue(struct dma_pl330_chan *pch) +{ + struct dma_pl330_desc *desc; + int ret; + + list_for_each_entry(desc, &pch->work_list, node) { + + /* If already submitted */ + if (desc->status == BUSY) + break; + + ret = pl330_submit_req(pch->pl330_chid, + &desc->req); + if (!ret) { + desc->status = BUSY; + break; + } else if (ret == -EAGAIN) { + /* QFull or DMAC Dying */ + break; + } else { + /* Unacceptable request */ + desc->status = DONE; + dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", + __func__, __LINE__, desc->txd.cookie); + tasklet_schedule(&pch->task); + } + } +} + +static void pl330_tasklet(unsigned long data) +{ + struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; + struct dma_pl330_desc *desc, *_dt; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&pch->lock, flags); + + /* Pick up ripe tomatoes */ + list_for_each_entry_safe(desc, _dt, &pch->work_list, node) + if (desc->status == DONE) { + pch->completed = desc->txd.cookie; + list_move_tail(&desc->node, &list); + } + + /* Try to submit a req imm. next to the last completed cookie */ + fill_queue(pch); + + /* Make sure the PL330 Channel thread is active */ + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); + + spin_unlock_irqrestore(&pch->lock, flags); + + free_desc_list(&list); +} + +static void dma_pl330_rqcb(void *token, enum pl330_op_err err) +{ + struct dma_pl330_desc *desc = token; + struct dma_pl330_chan *pch = desc->pchan; + unsigned long flags; + + /* If desc aborted */ + if (!pch) + return; + + spin_lock_irqsave(&pch->lock, flags); + + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + tasklet_schedule(&pch->task); +} + +static int pl330_alloc_chan_resources(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_dmac *pdmac = pch->dmac; + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + pch->completed = chan->cookie = 1; + + pch->pl330_chid = pl330_request_channel(&pdmac->pif); + if (!pch->pl330_chid) { + spin_unlock_irqrestore(&pch->lock, flags); + return 0; + } + + tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); + + spin_unlock_irqrestore(&pch->lock, flags); + + return 1; +} + +static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_desc *desc; + unsigned long flags; + + /* Only supports DMA_TERMINATE_ALL */ + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; + + spin_lock_irqsave(&pch->lock, flags); + + /* FLUSH the PL330 Channel thread */ + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + + /* Mark all desc done */ + list_for_each_entry(desc, &pch->work_list, node) + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet((unsigned long) pch); + + return 0; +} + +static void pl330_free_chan_resources(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + tasklet_kill(&pch->task); + + pl330_release_channel(pch->pl330_chid); + pch->pl330_chid = NULL; + + spin_unlock_irqrestore(&pch->lock, flags); +} + +static enum dma_status +pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + dma_cookie_t last_done, last_used; + int ret; + + last_done = pch->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_done, last_used); + + dma_set_tx_state(txstate, last_done, last_used, 0); + + return ret; +} + +static void pl330_issue_pending(struct dma_chan *chan) +{ + pl330_tasklet((unsigned long) to_pchan(chan)); +} + +/* + * We returned the last one of the circular list of descriptor(s) + * from prep_xxx, so the argument to submit corresponds to the last + * descriptor of the list. + */ +static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_pl330_desc *desc, *last = to_desc(tx); + struct dma_pl330_chan *pch = to_pchan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + /* Assign cookies to all nodes */ + cookie = tx->chan->cookie; + + while (!list_empty(&last->node)) { + desc = list_entry(last->node.next, struct dma_pl330_desc, node); + + if (++cookie < 0) + cookie = 1; + desc->txd.cookie = cookie; + + list_move_tail(&desc->node, &pch->work_list); + } + + if (++cookie < 0) + cookie = 1; + last->txd.cookie = cookie; + + list_add_tail(&last->node, &pch->work_list); + + tx->chan->cookie = cookie; + + spin_unlock_irqrestore(&pch->lock, flags); + + return cookie; +} + +static inline void _init_desc(struct dma_pl330_desc *desc) +{ + desc->pchan = NULL; + desc->req.x = &desc->px; + desc->req.token = desc; + desc->rqcfg.swap = SWAP_NO; + desc->rqcfg.privileged = 0; + desc->rqcfg.insnaccess = 0; + desc->rqcfg.scctl = SCCTRL0; + desc->rqcfg.dcctl = DCCTRL0; + desc->req.cfg = &desc->rqcfg; + desc->req.xfer_cb = dma_pl330_rqcb; + desc->txd.tx_submit = pl330_tx_submit; + + INIT_LIST_HEAD(&desc->node); +} + +/* Returns the number of descriptors added to the DMAC pool */ +int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +{ + struct dma_pl330_desc *desc; + unsigned long flags; + int i; + + if (!pdmac) + return 0; + + desc = kmalloc(count * sizeof(*desc), flg); + if (!desc) + return 0; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + for (i = 0; i < count; i++) { + _init_desc(&desc[i]); + list_add_tail(&desc[i].node, &pdmac->desc_pool); + } + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return count; +} + +static struct dma_pl330_desc * +pluck_desc(struct dma_pl330_dmac *pdmac) +{ + struct dma_pl330_desc *desc = NULL; + unsigned long flags; + + if (!pdmac) + return NULL; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + if (!list_empty(&pdmac->desc_pool)) { + desc = list_entry(pdmac->desc_pool.next, + struct dma_pl330_desc, node); + + list_del_init(&desc->node); + + desc->status = PREP; + desc->txd.callback = NULL; + } + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return desc; +} + +static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) +{ + struct dma_pl330_dmac *pdmac = pch->dmac; + struct dma_pl330_peri *peri = pch->chan.private; + struct dma_pl330_desc *desc; + + /* Pluck one desc from the pool of DMAC */ + desc = pluck_desc(pdmac); + + /* If the DMAC pool is empty, alloc new */ + if (!desc) { + if (!add_desc(pdmac, GFP_ATOMIC, 1)) + return NULL; + + /* Try again */ + desc = pluck_desc(pdmac); + if (!desc) { + dev_err(pch->dmac->pif.dev, + "%s:%d ALERT!\n", __func__, __LINE__); + return NULL; + } + } + + /* Initialize the descriptor */ + desc->pchan = pch; + desc->txd.cookie = 0; + async_tx_ack(&desc->txd); + + desc->req.rqtype = peri->rqtype; + desc->req.peri = peri->peri_id; + + dma_async_tx_descriptor_init(&desc->txd, &pch->chan); + + return desc; +} + +static inline void fill_px(struct pl330_xfer *px, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + px->next = NULL; + px->bytes = len; + px->dst_addr = dst; + px->src_addr = src; +} + +static struct dma_pl330_desc * +__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, + dma_addr_t src, size_t len) +{ + struct dma_pl330_desc *desc = pl330_get_desc(pch); + + if (!desc) { + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + return NULL; + } + + /* + * Ideally we should lookout for reqs bigger than + * those that can be programmed with 256 bytes of + * MC buffer, but considering a req size is seldom + * going to be word-unaligned and more than 200MB, + * we take it easy. + * Also, should the limit is reached we'd rather + * have the platform increase MC buffer size than + * complicating this API driver. + */ + fill_px(&desc->px, dst, src, len); + + return desc; +} + +/* Call after fixing burst size */ +static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) +{ + struct dma_pl330_chan *pch = desc->pchan; + struct pl330_info *pi = &pch->dmac->pif; + int burst_len; + + burst_len = pi->pcfg.data_bus_width / 8; + burst_len *= pi->pcfg.data_buf_dep; + burst_len >>= desc->rqcfg.brst_size; + + /* src/dst_burst_len can't be more than 16 */ + if (burst_len > 16) + burst_len = 16; + + while (burst_len > 1) { + if (!(len % (burst_len << desc->rqcfg.brst_size))) + break; + burst_len--; + } + + return burst_len; +} + +static struct dma_async_tx_descriptor * +pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_peri *peri = chan->private; + struct pl330_info *pi; + int burst; + + if (unlikely(!pch || !len || !peri)) + return NULL; + + if (peri->rqtype != MEMTOMEM) + return NULL; + + pi = &pch->dmac->pif; + + desc = __pl330_prep_dma_memcpy(pch, dst, src, len); + if (!desc) + return NULL; + + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 1; + + /* Select max possible burst size */ + burst = pi->pcfg.data_bus_width / 8; + + while (burst > 1) { + if (!(len % burst)) + break; + burst /= 2; + } + + desc->rqcfg.brst_size = 0; + while (burst != (1 << desc->rqcfg.brst_size)) + desc->rqcfg.brst_size++; + + desc->rqcfg.brst_len = get_burst_len(desc, len); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static struct dma_async_tx_descriptor * +pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flg) +{ + struct dma_pl330_desc *first, *desc = NULL; + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_peri *peri = chan->private; + struct scatterlist *sg; + unsigned long flags; + int i, burst_size; + dma_addr_t addr; + + if (unlikely(!pch || !sgl || !sg_len)) + return NULL; + + /* Make sure the direction is consistent */ + if ((direction == DMA_TO_DEVICE && + peri->rqtype != MEMTODEV) || + (direction == DMA_FROM_DEVICE && + peri->rqtype != DEVTOMEM)) { + dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", + __func__, __LINE__); + return NULL; + } + + addr = peri->fifo_addr; + burst_size = peri->burst_sz; + + first = NULL; + + for_each_sg(sgl, sg, sg_len, i) { + + desc = pl330_get_desc(pch); + if (!desc) { + struct dma_pl330_dmac *pdmac = pch->dmac; + + dev_err(pch->dmac->pif.dev, + "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + if (!first) + return NULL; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, + struct dma_pl330_desc, node); + list_move_tail(&desc->node, &pdmac->desc_pool); + } + + list_move_tail(&first->node, &pdmac->desc_pool); + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return NULL; + } + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->node); + + if (direction == DMA_TO_DEVICE) { + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + fill_px(&desc->px, + addr, sg_dma_address(sg), sg_dma_len(sg)); + } else { + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + fill_px(&desc->px, + sg_dma_address(sg), addr, sg_dma_len(sg)); + } + + desc->rqcfg.brst_size = burst_size; + desc->rqcfg.brst_len = 1; + } + + /* Return the last desc in the chain */ + desc->txd.flags = flg; + return &desc->txd; +} + +static irqreturn_t pl330_irq_handler(int irq, void *data) +{ + if (pl330_update(data)) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +static int __devinit +pl330_probe(struct amba_device *adev, struct amba_id *id) +{ + struct dma_pl330_platdata *pdat; + struct dma_pl330_dmac *pdmac; + struct dma_pl330_chan *pch; + struct pl330_info *pi; + struct dma_device *pd; + struct resource *res; + int i, ret, irq; + + pdat = adev->dev.platform_data; + + if (!pdat || !pdat->nr_valid_peri) { + dev_err(&adev->dev, "platform data missing\n"); + return -ENODEV; + } + + /* Allocate a new DMAC and its Channels */ + pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) + + sizeof(*pdmac), GFP_KERNEL); + if (!pdmac) { + dev_err(&adev->dev, "unable to allocate mem\n"); + return -ENOMEM; + } + + pi = &pdmac->pif; + pi->dev = &adev->dev; + pi->pl330_data = NULL; + pi->mcbufsz = pdat->mcbuf_sz; + + res = &adev->res; + request_mem_region(res->start, resource_size(res), "dma-pl330"); + + pi->base = ioremap(res->start, resource_size(res)); + if (!pi->base) { + ret = -ENXIO; + goto probe_err1; + } + + irq = adev->irq[0]; + ret = request_irq(irq, pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + goto probe_err2; + + ret = pl330_add(pi); + if (ret) + goto probe_err3; + + INIT_LIST_HEAD(&pdmac->desc_pool); + spin_lock_init(&pdmac->pool_lock); + + /* Create a descriptor pool of default size */ + if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) + dev_warn(&adev->dev, "unable to allocate desc\n"); + + pd = &pdmac->ddma; + INIT_LIST_HEAD(&pd->channels); + + /* Initialize channel parameters */ + for (i = 0; i < pdat->nr_valid_peri; i++) { + struct dma_pl330_peri *peri = &pdat->peri[i]; + pch = &pdmac->peripherals[i]; + + switch (peri->rqtype) { + case MEMTOMEM: + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + break; + case MEMTODEV: + case DEVTOMEM: + dma_cap_set(DMA_SLAVE, pd->cap_mask); + break; + default: + dev_err(&adev->dev, "DEVTODEV Not Supported\n"); + continue; + } + + INIT_LIST_HEAD(&pch->work_list); + spin_lock_init(&pch->lock); + pch->pl330_chid = NULL; + pch->chan.private = peri; + pch->chan.device = pd; + pch->chan.chan_id = i; + pch->dmac = pdmac; + + /* Add the channel to the DMAC list */ + pd->chancnt++; + list_add_tail(&pch->chan.device_node, &pd->channels); + } + + pd->dev = &adev->dev; + + pd->device_alloc_chan_resources = pl330_alloc_chan_resources; + pd->device_free_chan_resources = pl330_free_chan_resources; + pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; + pd->device_tx_status = pl330_tx_status; + pd->device_prep_slave_sg = pl330_prep_slave_sg; + pd->device_control = pl330_control; + pd->device_issue_pending = pl330_issue_pending; + + ret = dma_async_device_register(pd); + if (ret) { + dev_err(&adev->dev, "unable to register DMAC\n"); + goto probe_err4; + } + + amba_set_drvdata(adev, pdmac); + + dev_info(&adev->dev, + "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + dev_info(&adev->dev, + "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", + pi->pcfg.data_buf_dep, + pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, + pi->pcfg.num_peri, pi->pcfg.num_events); + + return 0; + +probe_err4: + pl330_del(pi); +probe_err3: + free_irq(irq, pi); +probe_err2: + iounmap(pi->base); +probe_err1: + release_mem_region(res->start, resource_size(res)); + kfree(pdmac); + + return ret; +} + +static int __devexit pl330_remove(struct amba_device *adev) +{ + struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); + struct dma_pl330_chan *pch, *_p; + struct pl330_info *pi; + struct resource *res; + int irq; + + if (!pdmac) + return 0; + + amba_set_drvdata(adev, NULL); + + /* Idle the DMAC */ + list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + chan.device_node) { + + /* Remove the channel */ + list_del(&pch->chan.device_node); + + /* Flush the channel */ + pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_free_chan_resources(&pch->chan); + } + + pi = &pdmac->pif; + + pl330_del(pi); + + irq = adev->irq[0]; + free_irq(irq, pi); + + iounmap(pi->base); + + res = &adev->res; + release_mem_region(res->start, resource_size(res)); + + kfree(pdmac); + + return 0; +} + +static struct amba_id pl330_ids[] = { + { + .id = 0x00041330, + .mask = 0x000fffff, + }, + { 0, 0 }, +}; + +static struct amba_driver pl330_driver = { + .drv = { + .owner = THIS_MODULE, + .name = "dma-pl330", + }, + .id_table = pl330_ids, + .probe = pl330_probe, + .remove = pl330_remove, +}; + +static int __init pl330_init(void) +{ + return amba_driver_register(&pl330_driver); +} +module_init(pl330_init); + +static void __exit pl330_exit(void) +{ + amba_driver_unregister(&pl330_driver); + return; +} +module_exit(pl330_exit); + +MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); +MODULE_DESCRIPTION("API Driver for PL330 DMAC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index c6079fcca13f..fa98abe4686f 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4944,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); static struct of_platform_driver ppc440spe_adma_driver = { - .match_table = ppc440spe_adma_of_match, .probe = ppc440spe_adma_probe, .remove = __devexit_p(ppc440spe_adma_remove), .driver = { .name = "PPC440SP(E)-ADMA", .owner = THIS_MODULE, + .of_match_table = ppc440spe_adma_of_match, }, }; diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index a2585c90a139..fb64cf36ba61 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -722,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) { while (__ld_cleanup(sh_chan, all)) ; + + if (all) + /* Terminating - forgive uncompleted cookies */ + sh_chan->completed_cookie = sh_chan->common.cookie; } static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) @@ -1188,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = { .remove = __exit_p(sh_dmae_remove), .shutdown = sh_dmae_shutdown, .driver = { + .owner = THIS_MODULE, .name = "sh-dma-engine", }, }; diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 0172fa3c7a2b..a1bf77c1993f 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -188,7 +188,7 @@ static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { - if (sg_dma_len(sg) > USHORT_MAX) { + if (sg_dma_len(sg) > USHRT_MAX) { dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); return -EINVAL; } diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index adc10a2ac5f6..996c1bdb5a34 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci) static void i5000_check_error(struct mem_ctl_info *mci) { struct i5000_error_info info; - debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5000_get_error_info(mci, &info); i5000_process_error_info(mci, &info, 1); } @@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) int num_dimms_per_channel; int num_csrows; - debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __func__, + debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) return -ENOMEM; kobject_get(&mci->edac_mci_kobj); - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ @@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", + __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); @@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i5000_pci) edac_pci_release_generic_ctl(i5000_pci); @@ -1544,7 +1544,7 @@ static int __init i5000_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1560,7 +1560,7 @@ static int __init i5000_init(void) */ static void __exit i5000_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5000_driver); } diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index f99d10655ed4..010c1d6526f5 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci) static void i5400_check_error(struct mem_ctl_info *mci) { struct i5400_error_info info; - debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5400_get_error_info(mci, &info); i5400_process_error_info(mci, &info); } @@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (dev_idx >= ARRAY_SIZE(i5400_devs)) return -EINVAL; - debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __func__, + debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ @@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", + __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); @@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i5400_pci) edac_pci_release_generic_ctl(i5400_pci); @@ -1409,7 +1409,7 @@ static int __init i5400_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1425,7 +1425,7 @@ static int __init i5400_init(void) */ static void __exit i5400_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5400_driver); } diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 2bf2c5051bfe..a2fa1feed724 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c @@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) { struct i82443bxgx_edacmc_error_info info; - debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i82443bxgx_edacmc_get_error_info(mci, &info); i82443bxgx_edacmc_process_error_info(mci, &info, 1); } @@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, for (index = 0; index < mci->nr_csrows; index++) { csrow = &mci->csrows[index]; pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); - debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", - mci->mc_idx, __func__, index, drbar); + debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", + mci->mc_idx, __FILE__, __func__, index, drbar); row_high_limit = ((u32) drbar << 23); /* find the DRAM Chip Select Base address and mask */ - debugf1("MC%d: " __FILE__ ": %s() Row=%d, " - "Boundry Address=%#0x, Last = %#0x \n", - mci->mc_idx, __func__, index, row_high_limit, + debugf1("MC%d: %s: %s() Row=%d, " + "Boundry Address=%#0x, Last = %#0x\n", + mci->mc_idx, __FILE__, __func__, index, row_high_limit, row_high_limit_last); /* 440GX goes to 2GB, represented with a DRB of 0. */ @@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) enum mem_type mtype; enum edac_type edac_mode; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* Something is really hosed if PCI config space reads from * the MC aren't working. @@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; @@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) __func__); } - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("MC: %s: %s(): success\n", __FILE__, __func__); return 0; fail: @@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* don't need to call pci_enable_device() */ rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); @@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i82443bxgx_pci) edac_pci_release_generic_ctl(i82443bxgx_pci); diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 4471647b4807..6c1886b497ff 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -338,15 +338,13 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = { }; static struct of_platform_driver mpc85xx_pci_err_driver = { - .owner = THIS_MODULE, - .name = "mpc85xx_pci_err", - .match_table = mpc85xx_pci_err_of_match, .probe = mpc85xx_pci_err_probe, .remove = __devexit_p(mpc85xx_pci_err_remove), .driver = { - .name = "mpc85xx_pci_err", - .owner = THIS_MODULE, - }, + .name = "mpc85xx_pci_err", + .owner = THIS_MODULE, + .of_match_table = mpc85xx_pci_err_of_match, + }, }; #endif /* CONFIG_PCI */ @@ -654,15 +652,13 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = { }; static struct of_platform_driver mpc85xx_l2_err_driver = { - .owner = THIS_MODULE, - .name = "mpc85xx_l2_err", - .match_table = mpc85xx_l2_err_of_match, .probe = mpc85xx_l2_err_probe, .remove = mpc85xx_l2_err_remove, .driver = { - .name = "mpc85xx_l2_err", - .owner = THIS_MODULE, - }, + .name = "mpc85xx_l2_err", + .owner = THIS_MODULE, + .of_match_table = mpc85xx_l2_err_of_match, + }, }; /**************************** MC Err device ***************************/ @@ -1131,15 +1127,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { }; static struct of_platform_driver mpc85xx_mc_err_driver = { - .owner = THIS_MODULE, - .name = "mpc85xx_mc_err", - .match_table = mpc85xx_mc_err_of_match, .probe = mpc85xx_mc_err_probe, .remove = mpc85xx_mc_err_remove, .driver = { - .name = "mpc85xx_mc_err", - .owner = THIS_MODULE, - }, + .name = "mpc85xx_mc_err", + .owner = THIS_MODULE, + .of_match_table = mpc85xx_mc_err_of_match, + }, }; #ifdef CONFIG_MPC85xx diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 11f2172aa1e6..9d6f6783328c 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -202,13 +202,13 @@ static struct of_device_id ppc4xx_edac_match[] = { }; static struct of_platform_driver ppc4xx_edac_driver = { - .match_table = ppc4xx_edac_match, .probe = ppc4xx_edac_probe, .remove = ppc4xx_edac_remove, - .driver = { - .owner = THIS_MODULE, - .name = PPC4XX_EDAC_MODULE_NAME - } + .driver = { + .owner = THIS_MODULE, + .name = PPC4XX_EDAC_MODULE_NAME + .of_match_table = ppc4xx_edac_match, + }, }; /* diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 5045156c5313..9dcb30466ec0 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> -#include <linux/timer.h> #include <linux/workqueue.h> #include <asm/atomic.h> @@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; #define BIB_CRC(v) ((v) << 0) #define BIB_CRC_LENGTH(v) ((v) << 16) #define BIB_INFO_LENGTH(v) ((v) << 24) - +#define BIB_BUS_NAME 0x31333934 /* "1394" */ #define BIB_LINK_SPEED(v) ((v) << 0) #define BIB_GENERATION(v) ((v) << 4) #define BIB_MAX_ROM(v) ((v) << 8) @@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; #define BIB_BMC ((1) << 28) #define BIB_ISC ((1) << 29) #define BIB_CMC ((1) << 30) -#define BIB_IMC ((1) << 31) +#define BIB_IRMC ((1) << 31) +#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ static void generate_config_rom(struct fw_card *card, __be32 *config_rom) { @@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom) config_rom[0] = cpu_to_be32( BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); - config_rom[1] = cpu_to_be32(0x31333934); + config_rom[1] = cpu_to_be32(BIB_BUS_NAME); config_rom[2] = cpu_to_be32( BIB_LINK_SPEED(card->link_speed) | BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | BIB_MAX_ROM(2) | BIB_MAX_RECEIVE(card->max_receive) | - BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC); + BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); config_rom[3] = cpu_to_be32(card->guid >> 32); config_rom[4] = cpu_to_be32(card->guid); /* Generate root directory. */ - config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */ + config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); i = 7; j = 7 + descriptor_count; @@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work) fw_card_put(card); } -static void flush_timer_callback(unsigned long data) -{ - struct fw_card *card = (struct fw_card *)data; - - fw_flush_transactions(card); -} - void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device) @@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card, init_completion(&card->done); INIT_LIST_HEAD(&card->transaction_list); spin_lock_init(&card->lock); - setup_timer(&card->flush_timer, - flush_timer_callback, (unsigned long)card); card->local_node = NULL; @@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card) wait_for_completion(&card->done); WARN_ON(!list_empty(&card->transaction_list)); - del_timer_sync(&card->flush_timer); } EXPORT_SYMBOL(fw_core_remove_card); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 14a34d99eea2..5bf106b9d791 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) list_add_tail(&client->link, &device->client_list); mutex_unlock(&device->client_list_mutex); - return 0; + return nonseekable_open(inode, file); } static void queue_event(struct client *client, struct event *event, @@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) const struct file_operations fw_device_ops = { .owner = THIS_MODULE, + .llseek = no_llseek, .open = fw_device_op_open, .read = fw_device_op_read, .unlocked_ioctl = fw_device_op_ioctl, - .poll = fw_device_op_poll, - .release = fw_device_op_release, .mmap = fw_device_op_mmap, - + .release = fw_device_op_release, + .poll = fw_device_op_poll, #ifdef CONFIG_COMPAT .compat_ioctl = fw_device_op_compat_ioctl, #endif diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 673b03f8b4ec..fdc33ff06dc1 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction, spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t == transaction) { - list_del(&t->link); + list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } @@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction, spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { + del_timer_sync(&t->split_timeout_timer); t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } @@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card, } EXPORT_SYMBOL(fw_cancel_transaction); +static void split_transaction_timeout_callback(unsigned long data) +{ + struct fw_transaction *t = (struct fw_transaction *)data; + struct fw_card *card = t->card; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + if (list_empty(&t->link)) { + spin_unlock_irqrestore(&card->lock, flags); + return; + } + list_del(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); + spin_unlock_irqrestore(&card->lock, flags); + + card->driver->cancel_packet(card, &t->packet); + + /* + * At this point cancel_packet will never call the transaction + * callback, since we just took the transaction out of the list. + * So do it here. + */ + t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); +} + static void transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status) { @@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, packet->payload_mapped = false; } +static int allocate_tlabel(struct fw_card *card) +{ + int tlabel; + + tlabel = card->current_tlabel; + while (card->tlabel_mask & (1ULL << tlabel)) { + tlabel = (tlabel + 1) & 0x3f; + if (tlabel == card->current_tlabel) + return -EBUSY; + } + + card->current_tlabel = (tlabel + 1) & 0x3f; + card->tlabel_mask |= 1ULL << tlabel; + + return tlabel; +} + /** * This function provides low-level access to the IEEE1394 transaction * logic. Most C programs would use either fw_read(), fw_write() or @@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int tlabel; /* - * Bump the flush timer up 100ms first of all so we - * don't race with a flush timer callback. - */ - - mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); - - /* * Allocate tlabel from the bitmap and put the transaction on * the list while holding the card spinlock. */ spin_lock_irqsave(&card->lock, flags); - tlabel = card->current_tlabel; - if (card->tlabel_mask & (1ULL << tlabel)) { + tlabel = allocate_tlabel(card); + if (tlabel < 0) { spin_unlock_irqrestore(&card->lock, flags); callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); return; } - card->current_tlabel = (card->current_tlabel + 1) & 0x3f; - card->tlabel_mask |= (1ULL << tlabel); - t->node_id = destination_id; t->tlabel = tlabel; + t->card = card; + setup_timer(&t->split_timeout_timer, + split_transaction_timeout_callback, (unsigned long)t); + /* FIXME: start this timer later, relative to t->timestamp */ + mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10)); t->callback = callback; t->callback_data = callback_data; @@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, struct transaction_callback_data d; struct fw_transaction t; + init_timer_on_stack(&t.split_timeout_timer); init_completion(&d.done); d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); + destroy_timer_on_stack(&t.split_timeout_timer); return d.rcode; } @@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card, mutex_unlock(&phy_config_mutex); } -void fw_flush_transactions(struct fw_card *card) -{ - struct fw_transaction *t, *next; - struct list_head list; - unsigned long flags; - - INIT_LIST_HEAD(&list); - spin_lock_irqsave(&card->lock, flags); - list_splice_init(&card->transaction_list, &list); - card->tlabel_mask = 0; - spin_unlock_irqrestore(&card->lock, flags); - - list_for_each_entry_safe(t, next, &list, link) { - card->driver->cancel_packet(card, &t->packet); - - /* - * At this point cancel_packet will never call the - * transaction callback, since we just took all the - * transactions out of the list. So do it here. - */ - t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); - } -} - static struct fw_address_handler *lookup_overlapping_address_handler( struct list_head *list, unsigned long long offset, size_t length) { @@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t->node_id == source && t->tlabel == tlabel) { - list_del(&t->link); - card->tlabel_mask &= ~(1 << t->tlabel); + list_del_init(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } @@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } + del_timer_sync(&t->split_timeout_timer); + /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index fb0321300cce..0ecfcd95f4c5 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -27,7 +27,12 @@ struct fw_packet; #define PHY_LINK_ACTIVE 0x80 #define PHY_CONTENDER 0x40 #define PHY_BUS_RESET 0x40 +#define PHY_EXTENDED_REGISTERS 0xe0 #define PHY_BUS_SHORT_RESET 0x40 +#define PHY_INT_STATUS_BITS 0x3c +#define PHY_ENABLE_ACCEL 0x02 +#define PHY_ENABLE_MULTI 0x01 +#define PHY_PAGE_SELECT 0xe0 #define BANDWIDTH_AVAILABLE_INITIAL 4915 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) @@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); void fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length); -void fw_flush_transactions(struct fw_card *card); void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a3b083a7403a..9f627e758cfc 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_CYCLE_TIMER 1 #define QUIRK_RESET_PACKET 2 #define QUIRK_BE_HEADERS 4 +#define QUIRK_NO_1394A 8 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { unsigned short vendor, device, flags; } ohci_quirks[] = { {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | - QUIRK_RESET_PACKET}, + QUIRK_RESET_PACKET | + QUIRK_NO_1394A}, {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, @@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) + ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) ")"); -#ifdef CONFIG_FIREWIRE_OHCI_DEBUG - #define OHCI_PARAM_DEBUG_AT_AR 1 #define OHCI_PARAM_DEBUG_SELFIDS 2 #define OHCI_PARAM_DEBUG_IRQS 4 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ +#ifdef CONFIG_FIREWIRE_OHCI_DEBUG + static int param_debug; module_param_named(debug, param_debug, int, 0644); MODULE_PARM_DESC(debug, "Verbose logging (default = 0" @@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) #else -#define log_irqs(evt) -#define log_selfids(node_id, generation, self_id_count, sid) -#define log_ar_at_event(dir, speed, header, evt) +#define param_debug 0 +static inline void log_irqs(u32 evt) {} +static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} +static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ @@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci) reg_read(ohci, OHCI1394_Version); } -static int ohci_update_phy_reg(struct fw_card *card, int addr, - int clear_bits, int set_bits) +static int read_phy_reg(struct fw_ohci *ohci, int addr) { - struct fw_ohci *ohci = fw_ohci(card); - u32 val, old; + u32 val; + int i; reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); - flush_writes(ohci); - msleep(2); - val = reg_read(ohci, OHCI1394_PhyControl); - if ((val & OHCI1394_PhyControl_ReadDone) == 0) { - fw_error("failed to set phy reg bits.\n"); - return -EBUSY; + for (i = 0; i < 10; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (val & OHCI1394_PhyControl_ReadDone) + return OHCI1394_PhyControl_ReadData(val); + + msleep(1); } + fw_error("failed to read phy reg\n"); + + return -EBUSY; +} + +static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) +{ + int i; - old = OHCI1394_PhyControl_ReadData(val); - old = (old & ~clear_bits) | set_bits; reg_write(ohci, OHCI1394_PhyControl, - OHCI1394_PhyControl_Write(addr, old)); + OHCI1394_PhyControl_Write(addr, val)); + for (i = 0; i < 100; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (!(val & OHCI1394_PhyControl_WritePending)) + return 0; - return 0; + msleep(1); + } + fw_error("failed to write phy reg\n"); + + return -EBUSY; +} + +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + ret = read_phy_reg(ohci, addr); + if (ret < 0) + return ret; + + /* + * The interrupt status bits are cleared by writing a one bit. + * Avoid clearing them unless explicitly requested in set_bits. + */ + if (addr == 5) + clear_bits |= PHY_INT_STATUS_BITS; + + return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); +} + +static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) +{ + int ret; + + ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); + if (ret < 0) + return ret; + + return read_phy_reg(ohci, addr); } static int ar_context_add_page(struct ar_context *ctx) @@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) memset(&dest[length], 0, CONFIG_ROM_SIZE - size); } +static int configure_1394a_enhancements(struct fw_ohci *ohci) +{ + bool enable_1394a; + int ret, clear, set, offset; + + /* Check if the driver should configure link and PHY. */ + if (!(reg_read(ohci, OHCI1394_HCControlSet) & + OHCI1394_HCControl_programPhyEnable)) + return 0; + + /* Paranoia: check whether the PHY supports 1394a, too. */ + enable_1394a = false; + ret = read_phy_reg(ohci, 2); + if (ret < 0) + return ret; + if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { + ret = read_paged_phy_reg(ohci, 1, 8); + if (ret < 0) + return ret; + if (ret >= 1) + enable_1394a = true; + } + + if (ohci->quirks & QUIRK_NO_1394A) + enable_1394a = false; + + /* Configure PHY and link consistently. */ + if (enable_1394a) { + clear = 0; + set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + } else { + clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + set = 0; + } + ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); + if (ret < 0) + return ret; + + if (enable_1394a) + offset = OHCI1394_HCControlSet; + else + offset = OHCI1394_HCControlClear; + reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); + + /* Clean up: configuration has been taken care of. */ + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_programPhyEnable); + + return 0; +} + static int ohci_enable(struct fw_card *card, const __be32 *config_rom, size_t length) { struct fw_ohci *ohci = fw_ohci(card); struct pci_dev *dev = to_pci_dev(card->device); u32 lps; - int i; + int i, ret; if (software_reset(ohci)) { fw_error("Failed to reset ohci card.\n"); @@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card, if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); + ret = configure_1394a_enhancements(ohci); + if (ret < 0) + return ret; + /* Activate link_on bit and contender bit in our self ID packets.*/ - if (ohci_update_phy_reg(card, 4, 0, - PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) - return -EIO; + ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); + if (ret < 0) + return ret; /* * When the link is not yet enabled, the atomic config rom @@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = { }; #ifdef CONFIG_PPC_PMAC -static void ohci_pmac_on(struct pci_dev *dev) +static void pmac_ohci_on(struct pci_dev *dev) { if (machine_is(powermac)) { struct device_node *ofn = pci_device_to_OF_node(dev); @@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev) } } -static void ohci_pmac_off(struct pci_dev *dev) +static void pmac_ohci_off(struct pci_dev *dev) { if (machine_is(powermac)) { struct device_node *ofn = pci_device_to_OF_node(dev); @@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev) } } #else -#define ohci_pmac_on(dev) -#define ohci_pmac_off(dev) +static inline void pmac_ohci_on(struct pci_dev *dev) {} +static inline void pmac_ohci_off(struct pci_dev *dev) {} #endif /* CONFIG_PPC_PMAC */ static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; - u32 bus_options, max_receive, link_speed, version; + u32 bus_options, max_receive, link_speed, version, link_enh; u64 guid; int i, err, n_ir, n_it; size_t size; @@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev, fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); - ohci_pmac_on(dev); + pmac_ohci_on(dev); err = pci_enable_device(dev); if (err) { @@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; + /* TI OHCI-Lynx and compatible: set recommended configuration bits. */ + if (dev->vendor == PCI_VENDOR_ID_TI) { + pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh); + + /* adjust latency of ATx FIFO: use 1.7 KB threshold */ + link_enh &= ~TI_LinkEnh_atx_thresh_mask; + link_enh |= TI_LinkEnh_atx_thresh_1_7K; + + /* use priority arbitration for asynchronous responses */ + link_enh |= TI_LinkEnh_enab_unfair; + + /* required for aPhyEnhanceEnable to work */ + link_enh |= TI_LinkEnh_enab_accel; + + pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh); + } + ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); @@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev, pci_disable_device(dev); fail_free: kfree(&ohci->card); - ohci_pmac_off(dev); + pmac_ohci_off(dev); fail: if (err == -ENOMEM) fw_error("Out of memory\n"); @@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev) pci_release_region(dev, 0); pci_disable_device(dev); kfree(&ohci->card); - ohci_pmac_off(dev); + pmac_ohci_off(dev); fw_notify("Removed fw-ohci device.\n"); } @@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) err = pci_set_power_state(dev, pci_choose_state(dev, state)); if (err) fw_error("pci_set_power_state failed with %d\n", err); - ohci_pmac_off(dev); + pmac_ohci_off(dev); return 0; } @@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev) struct fw_ohci *ohci = pci_get_drvdata(dev); int err; - ohci_pmac_on(dev); + pmac_ohci_on(dev); pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); err = pci_enable_device(dev); diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index ba492d85c516..3bc9a5d744eb 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -67,7 +67,7 @@ #define OHCI1394_PhyControl_ReadDone 0x80000000 #define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) #define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) -#define OHCI1394_PhyControl_WriteDone 0x00004000 +#define OHCI1394_PhyControl_WritePending 0x00004000 #define OHCI1394_IsochronousCycleTimer 0x0F0 #define OHCI1394_AsReqFilterHiSet 0x100 #define OHCI1394_AsReqFilterHiClear 0x104 @@ -154,4 +154,12 @@ #define OHCI1394_phy_tcode 0xe +/* TI extensions */ + +#define PCI_CFG_TI_LinkEnh 0xf4 +#define TI_LinkEnh_enab_accel 0x00000002 +#define TI_LinkEnh_enab_unfair 0x00000080 +#define TI_LinkEnh_atx_thresh_mask 0x00003000 +#define TI_LinkEnh_atx_thresh_1_7K 0x00001000 + #endif /* _FIREWIRE_OHCI_H */ diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index fee678f74a19..724038dab4ca 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -139,6 +139,13 @@ config GPIO_MAX732X Board setup code must specify the model to use, and the start number for these GPIOs. +config GPIO_MAX732X_IRQ + bool "Interrupt controller support for MAX732x" + depends on GPIO_MAX732X=y && GENERIC_HARDIRQS + help + Say yes here to enable the max732x to be used as an interrupt + controller. It requires the driver to be built in the kernel. + config GPIO_PCA953X tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" depends on I2C @@ -188,6 +195,13 @@ config GPIO_PCF857X This driver provides an in-kernel interface to those GPIOs using platform-neutral GPIO calls. +config GPIO_TC35892 + bool "TC35892 GPIOs" + depends on MFD_TC35892 + help + This enables support for the GPIOs found on the TC35892 + I/O Expander. + config GPIO_TWL4030 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" depends on TWL4030_CORE @@ -264,10 +278,10 @@ config GPIO_BT8XX If unsure, say N. config GPIO_LANGWELL - bool "Intel Moorestown Platform Langwell GPIO support" + bool "Intel Langwell/Penwell GPIO support" depends on PCI help - Say Y here to support Intel Moorestown platform GPIO. + Say Y here to support Intel Langwell/Penwell GPIO. config GPIO_TIMBERDALE bool "Support for timberdale GPIO IP" @@ -275,6 +289,15 @@ config GPIO_TIMBERDALE ---help--- Add support for the GPIO IP in the timberdale FPGA. +config GPIO_RDC321X + tristate "RDC R-321x GPIO support" + depends on PCI && GPIOLIB + select MFD_CORE + select MFD_RDC321X + help + Support for the RDC R321x SoC GPIOs over southbridge + PCI configuration space. + comment "SPI GPIO expanders:" config GPIO_MAX7301 @@ -310,4 +333,14 @@ config GPIO_UCB1400 To compile this driver as a module, choose M here: the module will be called ucb1400_gpio. +comment "MODULbus GPIO expanders:" + +config GPIO_JANZ_TTL + tristate "Janz VMOD-TTL Digital IO Module" + depends on MFD_JANZ_CMODIO + help + This enables support for the Janz VMOD-TTL Digital IO module. + This driver provides support for driving the pins in output + mode only. Input mode is not supported. + endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 10f3f8d958b1..51c3cdd41b5a 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o obj-$(CONFIG_GPIO_PCA953X) += pca953x.o obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o obj-$(CONFIG_GPIO_PL061) += pl061.o +obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o @@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o -obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
\ No newline at end of file +obj-$(CONFIG_GPIO_SCH) += sch_gpio.o +obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o +obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index 0c3c498f2260..f73a1555e49d 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val) return 0; } -static char *cs5535_gpio_names[] = { +static const char * const cs5535_gpio_names[] = { "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5", "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10", "GPIO11", diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cae1b8c5b08c..3ca36542e338 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change) unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; - char *ioname = NULL; + const char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { @@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change) struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%d", gpio); + desc, ioname ? ioname : "gpio%u", gpio); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpio_attr_group); @@ -1106,7 +1106,7 @@ unlock: fail: /* failures here can mean systems won't boot... */ if (status) - pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", + pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; @@ -1447,6 +1447,49 @@ fail: } EXPORT_SYMBOL_GPL(gpio_direction_output); +/** + * gpio_set_debounce - sets @debounce time for a @gpio + * @gpio: the gpio to set debounce time + * @debounce: debounce time is microseconds + */ +int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + unsigned long flags; + struct gpio_chip *chip; + struct gpio_desc *desc = &gpio_desc[gpio]; + int status = -EINVAL; + + spin_lock_irqsave(&gpio_lock, flags); + + if (!gpio_is_valid(gpio)) + goto fail; + chip = desc->chip; + if (!chip || !chip->set || !chip->set_debounce) + goto fail; + gpio -= chip->base; + if (gpio >= chip->ngpio) + goto fail; + status = gpio_ensure_requested(desc, gpio); + if (status < 0) + goto fail; + + /* now we know the gpio is valid and chip won't vanish */ + + spin_unlock_irqrestore(&gpio_lock, flags); + + might_sleep_if(extra_checks && chip->can_sleep); + + return chip->set_debounce(chip, gpio, debounce); + +fail: + spin_unlock_irqrestore(&gpio_lock, flags); + if (status) + pr_debug("%s: gpio-%d status %d\n", + __func__, gpio, status); + + return status; +} +EXPORT_SYMBOL_GPL(gpio_set_debounce); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c index 41a9388f2fde..48fc43c4bdd1 100644 --- a/drivers/gpio/it8761e_gpio.c +++ b/drivers/gpio/it8761e_gpio.c @@ -217,7 +217,10 @@ gpiochip_add_err: static void __exit it8761e_gpio_exit(void) { if (gpio_ba) { - gpiochip_remove(&it8761e_gpio_chip); + int ret = gpiochip_remove(&it8761e_gpio_chip); + + WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n", + __func__, ret); release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c new file mode 100644 index 000000000000..813ac077e5d7 --- /dev/null +++ b/drivers/gpio/janz-ttl.c @@ -0,0 +1,258 @@ +/* + * Janz MODULbus VMOD-TTL GPIO Driver + * + * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/slab.h> + +#include <linux/mfd/janz.h> + +#define DRV_NAME "janz-ttl" + +#define PORTA_DIRECTION 0x23 +#define PORTB_DIRECTION 0x2B +#define PORTC_DIRECTION 0x06 +#define PORTA_IOCTL 0x24 +#define PORTB_IOCTL 0x2C +#define PORTC_IOCTL 0x07 + +#define MASTER_INT_CTL 0x00 +#define MASTER_CONF_CTL 0x01 + +#define CONF_PAE (1 << 2) +#define CONF_PBE (1 << 7) +#define CONF_PCE (1 << 4) + +struct ttl_control_regs { + __be16 portc; + __be16 portb; + __be16 porta; + __be16 control; +}; + +struct ttl_module { + struct gpio_chip gpio; + + /* base address of registers */ + struct ttl_control_regs __iomem *regs; + + u8 portc_shadow; + u8 portb_shadow; + u8 porta_shadow; + + spinlock_t lock; +}; + +static int ttl_get_value(struct gpio_chip *gpio, unsigned offset) +{ + struct ttl_module *mod = dev_get_drvdata(gpio->dev); + u8 *shadow; + int ret; + + if (offset < 8) { + shadow = &mod->porta_shadow; + } else if (offset < 16) { + shadow = &mod->portb_shadow; + offset -= 8; + } else { + shadow = &mod->portc_shadow; + offset -= 16; + } + + spin_lock(&mod->lock); + ret = *shadow & (1 << offset); + spin_unlock(&mod->lock); + return ret; +} + +static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value) +{ + struct ttl_module *mod = dev_get_drvdata(gpio->dev); + void __iomem *port; + u8 *shadow; + + if (offset < 8) { + port = &mod->regs->porta; + shadow = &mod->porta_shadow; + } else if (offset < 16) { + port = &mod->regs->portb; + shadow = &mod->portb_shadow; + offset -= 8; + } else { + port = &mod->regs->portc; + shadow = &mod->portc_shadow; + offset -= 16; + } + + spin_lock(&mod->lock); + if (value) + *shadow |= (1 << offset); + else + *shadow &= ~(1 << offset); + + iowrite16be(*shadow, port); + spin_unlock(&mod->lock); +} + +static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val) +{ + iowrite16be(reg, &mod->regs->control); + iowrite16be(val, &mod->regs->control); +} + +static void __devinit ttl_setup_device(struct ttl_module *mod) +{ + /* reset the device to a known state */ + iowrite16be(0x0000, &mod->regs->control); + iowrite16be(0x0001, &mod->regs->control); + iowrite16be(0x0000, &mod->regs->control); + + /* put all ports in open-drain mode */ + ttl_write_reg(mod, PORTA_IOCTL, 0x00ff); + ttl_write_reg(mod, PORTB_IOCTL, 0x00ff); + ttl_write_reg(mod, PORTC_IOCTL, 0x000f); + + /* set all ports as outputs */ + ttl_write_reg(mod, PORTA_DIRECTION, 0x0000); + ttl_write_reg(mod, PORTB_DIRECTION, 0x0000); + ttl_write_reg(mod, PORTC_DIRECTION, 0x0000); + + /* set all ports to drive zeroes */ + iowrite16be(0x0000, &mod->regs->porta); + iowrite16be(0x0000, &mod->regs->portb); + iowrite16be(0x0000, &mod->regs->portc); + + /* enable all ports */ + ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE); +} + +static int __devinit ttl_probe(struct platform_device *pdev) +{ + struct janz_platform_data *pdata; + struct device *dev = &pdev->dev; + struct ttl_module *mod; + struct gpio_chip *gpio; + struct resource *res; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(dev, "no platform data\n"); + ret = -ENXIO; + goto out_return; + } + + mod = kzalloc(sizeof(*mod), GFP_KERNEL); + if (!mod) { + dev_err(dev, "unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + platform_set_drvdata(pdev, mod); + spin_lock_init(&mod->lock); + + /* get access to the MODULbus registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "MODULbus registers not found\n"); + ret = -ENODEV; + goto out_free_mod; + } + + mod->regs = ioremap(res->start, resource_size(res)); + if (!mod->regs) { + dev_err(dev, "MODULbus registers not ioremap\n"); + ret = -ENOMEM; + goto out_free_mod; + } + + ttl_setup_device(mod); + + /* Initialize the GPIO data structures */ + gpio = &mod->gpio; + gpio->dev = &pdev->dev; + gpio->label = pdev->name; + gpio->get = ttl_get_value; + gpio->set = ttl_set_value; + gpio->owner = THIS_MODULE; + + /* request dynamic allocation */ + gpio->base = -1; + gpio->ngpio = 20; + + ret = gpiochip_add(gpio); + if (ret) { + dev_err(dev, "unable to add GPIO chip\n"); + goto out_iounmap_regs; + } + + dev_info(&pdev->dev, "module %d: registered GPIO device\n", + pdata->modno); + return 0; + +out_iounmap_regs: + iounmap(mod->regs); +out_free_mod: + kfree(mod); +out_return: + return ret; +} + +static int __devexit ttl_remove(struct platform_device *pdev) +{ + struct ttl_module *mod = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + ret = gpiochip_remove(&mod->gpio); + if (ret) { + dev_err(dev, "unable to remove GPIO chip\n"); + return ret; + } + + iounmap(mod->regs); + kfree(mod); + return 0; +} + +static struct platform_driver ttl_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ttl_probe, + .remove = __devexit_p(ttl_remove), +}; + +static int __init ttl_init(void) +{ + return platform_driver_register(&ttl_driver); +} + +static void __exit ttl_exit(void) +{ + platform_driver_unregister(&ttl_driver); +} + +MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); +MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:janz-ttl"); + +module_init(ttl_init); +module_exit(ttl_exit); diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c index 00c3a14127af..8383a8d7f994 100644 --- a/drivers/gpio/langwell_gpio.c +++ b/drivers/gpio/langwell_gpio.c @@ -17,6 +17,7 @@ /* Supports: * Moorestown platform Langwell chip. + * Medfield platform Penwell chip. */ #include <linux/module.h> @@ -31,44 +32,65 @@ #include <linux/gpio.h> #include <linux/slab.h> -struct lnw_gpio_register { - u32 GPLR[2]; - u32 GPDR[2]; - u32 GPSR[2]; - u32 GPCR[2]; - u32 GRER[2]; - u32 GFER[2]; - u32 GEDR[2]; +/* + * Langwell chip has 64 pins and thus there are 2 32bit registers to control + * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit + * registers to control them, so we only define the order here instead of a + * structure, to get a bit offset for a pin (use GPDR as an example): + * + * nreg = ngpio / 32; + * reg = offset / 32; + * bit = offset % 32; + * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4; + * + * so the bit of reg_addr is to control pin offset's GPDR feature +*/ + +enum GPIO_REG { + GPLR = 0, /* pin level read-only */ + GPDR, /* pin direction */ + GPSR, /* pin set */ + GPCR, /* pin clear */ + GRER, /* rising edge detect */ + GFER, /* falling edge detect */ + GEDR, /* edge detect result */ }; struct lnw_gpio { struct gpio_chip chip; - struct lnw_gpio_register *reg_base; + void *reg_base; spinlock_t lock; unsigned irq_base; }; -static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) +static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset, + enum GPIO_REG reg_type) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); + unsigned nreg = chip->ngpio / 32; u8 reg = offset / 32; - void __iomem *gplr; + void __iomem *ptr; + + ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4); + return ptr; +} + +static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + void __iomem *gplr = gpio_reg(chip, offset, GPLR); - gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]); return readl(gplr) & BIT(offset % 32); } static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { - struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; void __iomem *gpsr, *gpcr; if (value) { - gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); + gpsr = gpio_reg(chip, offset, GPSR); writel(BIT(offset % 32), gpsr); } else { - gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); + gpcr = gpio_reg(chip, offset, GPCR); writel(BIT(offset % 32), gpcr); } } @@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); u32 value; unsigned long flags; - void __iomem *gpdr; - gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]); spin_lock_irqsave(&lnw->lock, flags); value = readl(gpdr); value &= ~BIT(offset % 32); @@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); unsigned long flags; - void __iomem *gpdr; lnw_gpio_set(chip, offset, value); - gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]); spin_lock_irqsave(&lnw->lock, flags); value = readl(gpdr); value |= BIT(offset % 32);; @@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type) { struct lnw_gpio *lnw = get_irq_chip_data(irq); u32 gpio = irq - lnw->irq_base; - u8 reg = gpio / 32; unsigned long flags; u32 value; - void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); - void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); + void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER); + void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER); if (gpio >= lnw->chip.ngpio) return -EINVAL; @@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = { .set_type = lnw_irq_type, }; -static struct pci_device_id lnw_gpio_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, +static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 }, { 0, } }; MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); @@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) { struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); - u32 reg, gpio; + u32 base, gpio; void __iomem *gedr; u32 gedr_v; /* check GPIO controller to check which pin triggered the interrupt */ - for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { - gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); + for (base = 0; base < lnw->chip.ngpio; base += 32) { + gedr = gpio_reg(&lnw->chip, base, GEDR); gedr_v = readl(gedr); if (!gedr_v) continue; - for (gpio = reg*32; gpio < reg*32+32; gpio++) + for (gpio = base; gpio < base + 32; gpio++) if (gedr_v & BIT(gpio % 32)) { pr_debug("pin %d triggered\n", gpio); generic_handle_irq(lnw->irq_base + gpio); @@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->chip.set = lnw_gpio_set; lnw->chip.to_irq = lnw_gpio_to_irq; lnw->chip.base = gpio_base; - lnw->chip.ngpio = 64; + lnw->chip.ngpio = id->driver_data; lnw->chip.can_sleep = 0; pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c index f7868243af89..9cad60f9e962 100644 --- a/drivers/gpio/max732x.c +++ b/drivers/gpio/max732x.c @@ -17,7 +17,8 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/gpio.h> - +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/i2c.h> #include <linux/i2c/max732x.h> @@ -31,7 +32,8 @@ * - Open Drain I/O * * designated by 'O', 'I' and 'P' individually according to MAXIM's - * datasheets. + * datasheets. 'I' and 'P' ports are interrupt capables, some with + * a dedicated interrupt mask. * * There are two groups of I/O ports, each group usually includes * up to 8 I/O ports, and is accessed by a specific I2C address: @@ -44,7 +46,8 @@ * * Within each group of ports, there are five known combinations of * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for - * the detailed organization of these ports. + * the detailed organization of these ports. Only Goup A is interrupt + * capable. * * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', * and GPIOs from GROUP_A are numbered before those from GROUP_B @@ -68,16 +71,47 @@ #define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ #define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ +#define INT_NONE 0x0 /* No interrupt capability */ +#define INT_NO_MASK 0x1 /* Has interrupts, no mask */ +#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */ +#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */ + +#define INT_CAPS(x) (((uint64_t)(x)) << 32) + +enum { + MAX7319, + MAX7320, + MAX7321, + MAX7322, + MAX7323, + MAX7324, + MAX7325, + MAX7326, + MAX7327, +}; + +static uint64_t max732x_features[] = { + [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK), + [MAX7320] = GROUP_B(IO_8O), + [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK), + [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK), + [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK), + [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK), + [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK), + [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK), + [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK), +}; + static const struct i2c_device_id max732x_id[] = { - { "max7319", GROUP_A(IO_8I) }, - { "max7320", GROUP_B(IO_8O) }, - { "max7321", GROUP_A(IO_8P) }, - { "max7322", GROUP_A(IO_4I4O) }, - { "max7323", GROUP_A(IO_4P4O) }, - { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, - { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, - { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, - { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, + { "max7319", MAX7319 }, + { "max7320", MAX7320 }, + { "max7321", MAX7321 }, + { "max7322", MAX7322 }, + { "max7323", MAX7323 }, + { "max7324", MAX7324 }, + { "max7325", MAX7325 }, + { "max7326", MAX7326 }, + { "max7327", MAX7327 }, { }, }; MODULE_DEVICE_TABLE(i2c, max732x_id); @@ -96,9 +130,19 @@ struct max732x_chip { struct mutex lock; uint8_t reg_out[2]; + +#ifdef CONFIG_GPIO_MAX732X_IRQ + struct mutex irq_lock; + int irq_base; + uint8_t irq_mask; + uint8_t irq_mask_cur; + uint8_t irq_trig_raise; + uint8_t irq_trig_fall; + uint8_t irq_features; +#endif }; -static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) +static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val) { struct i2c_client *client; int ret; @@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) return 0; } -static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) +static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val) { struct i2c_client *client; int ret; @@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off) chip = container_of(gc, struct max732x_chip, gpio_chip); - ret = max732x_read(chip, is_group_a(chip, off), ®_val); + ret = max732x_readb(chip, is_group_a(chip, off), ®_val); if (ret < 0) return 0; @@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; reg_out = (val) ? reg_out | mask : reg_out & ~mask; - ret = max732x_write(chip, is_group_a(chip, off), reg_out); + ret = max732x_writeb(chip, is_group_a(chip, off), reg_out); if (ret < 0) goto out; @@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off) return -EACCES; } + /* + * Open-drain pins must be set to high impedance (which is + * equivalent to output-high) to be turned into an input. + */ + if ((mask & chip->dir_output)) + max732x_gpio_set_value(gc, off, 1); + return 0; } @@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc, return 0; } +#ifdef CONFIG_GPIO_MAX732X_IRQ +static int max732x_writew(struct max732x_chip *chip, uint16_t val) +{ + int ret; + + val = cpu_to_le16(val); + + ret = i2c_master_send(chip->client_group_a, (char *)&val, 2); + if (ret < 0) { + dev_err(&chip->client_group_a->dev, "failed writing\n"); + return ret; + } + + return 0; +} + +static int max732x_readw(struct max732x_chip *chip, uint16_t *val) +{ + int ret; + + ret = i2c_master_recv(chip->client_group_a, (char *)val, 2); + if (ret < 0) { + dev_err(&chip->client_group_a->dev, "failed reading\n"); + return ret; + } + + *val = le16_to_cpu(*val); + return 0; +} + +static void max732x_irq_update_mask(struct max732x_chip *chip) +{ + uint16_t msg; + + if (chip->irq_mask == chip->irq_mask_cur) + return; + + chip->irq_mask = chip->irq_mask_cur; + + if (chip->irq_features == INT_NO_MASK) + return; + + mutex_lock(&chip->lock); + + switch (chip->irq_features) { + case INT_INDEP_MASK: + msg = (chip->irq_mask << 8) | chip->reg_out[0]; + max732x_writew(chip, msg); + break; + + case INT_MERGED_MASK: + msg = chip->irq_mask | chip->reg_out[0]; + max732x_writeb(chip, 1, (uint8_t)msg); + break; + } + + mutex_unlock(&chip->lock); +} + +static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off) +{ + struct max732x_chip *chip; + + chip = container_of(gc, struct max732x_chip, gpio_chip); + return chip->irq_base + off; +} + +static void max732x_irq_mask(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base)); +} + +static void max732x_irq_unmask(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + chip->irq_mask_cur |= 1 << (irq - chip->irq_base); +} + +static void max732x_irq_bus_lock(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + mutex_lock(&chip->irq_lock); + chip->irq_mask_cur = chip->irq_mask; +} + +static void max732x_irq_bus_sync_unlock(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + max732x_irq_update_mask(chip); + mutex_unlock(&chip->irq_lock); +} + +static int max732x_irq_set_type(unsigned int irq, unsigned int type) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + uint16_t off = irq - chip->irq_base; + uint16_t mask = 1 << off; + + if (!(mask & chip->dir_input)) { + dev_dbg(&chip->client->dev, "%s port %d is output only\n", + chip->client->name, off); + return -EACCES; + } + + if (!(type & IRQ_TYPE_EDGE_BOTH)) { + dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", + irq, type); + return -EINVAL; + } + + if (type & IRQ_TYPE_EDGE_FALLING) + chip->irq_trig_fall |= mask; + else + chip->irq_trig_fall &= ~mask; + + if (type & IRQ_TYPE_EDGE_RISING) + chip->irq_trig_raise |= mask; + else + chip->irq_trig_raise &= ~mask; + + return max732x_gpio_direction_input(&chip->gpio_chip, off); +} + +static struct irq_chip max732x_irq_chip = { + .name = "max732x", + .mask = max732x_irq_mask, + .unmask = max732x_irq_unmask, + .bus_lock = max732x_irq_bus_lock, + .bus_sync_unlock = max732x_irq_bus_sync_unlock, + .set_type = max732x_irq_set_type, +}; + +static uint8_t max732x_irq_pending(struct max732x_chip *chip) +{ + uint8_t cur_stat; + uint8_t old_stat; + uint8_t trigger; + uint8_t pending; + uint16_t status; + int ret; + + ret = max732x_readw(chip, &status); + if (ret) + return 0; + + trigger = status >> 8; + trigger &= chip->irq_mask; + + if (!trigger) + return 0; + + cur_stat = status & 0xFF; + cur_stat &= chip->irq_mask; + + old_stat = cur_stat ^ trigger; + + pending = (old_stat & chip->irq_trig_fall) | + (cur_stat & chip->irq_trig_raise); + pending &= trigger; + + return pending; +} + +static irqreturn_t max732x_irq_handler(int irq, void *devid) +{ + struct max732x_chip *chip = devid; + uint8_t pending; + uint8_t level; + + pending = max732x_irq_pending(chip); + + if (!pending) + return IRQ_HANDLED; + + do { + level = __ffs(pending); + handle_nested_irq(level + chip->irq_base); + + pending &= ~(1 << level); + } while (pending); + + return IRQ_HANDLED; +} + +static int max732x_irq_setup(struct max732x_chip *chip, + const struct i2c_device_id *id) +{ + struct i2c_client *client = chip->client; + struct max732x_platform_data *pdata = client->dev.platform_data; + int has_irq = max732x_features[id->driver_data] >> 32; + int ret; + + if (pdata->irq_base && has_irq != INT_NONE) { + int lvl; + + chip->irq_base = pdata->irq_base; + chip->irq_features = has_irq; + mutex_init(&chip->irq_lock); + + for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { + int irq = lvl + chip->irq_base; + + if (!(chip->dir_input & (1 << lvl))) + continue; + + set_irq_chip_data(irq, chip); + set_irq_chip_and_handler(irq, &max732x_irq_chip, + handle_edge_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + ret = request_threaded_irq(client->irq, + NULL, + max732x_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(&client->dev), chip); + if (ret) { + dev_err(&client->dev, "failed to request irq %d\n", + client->irq); + goto out_failed; + } + + chip->gpio_chip.to_irq = max732x_gpio_to_irq; + } + + return 0; + +out_failed: + chip->irq_base = 0; + return ret; +} + +static void max732x_irq_teardown(struct max732x_chip *chip) +{ + if (chip->irq_base) + free_irq(chip->client->irq, chip); +} +#else /* CONFIG_GPIO_MAX732X_IRQ */ +static int max732x_irq_setup(struct max732x_chip *chip, + const struct i2c_device_id *id) +{ + struct i2c_client *client = chip->client; + struct max732x_platform_data *pdata = client->dev.platform_data; + int has_irq = max732x_features[id->driver_data] >> 32; + + if (pdata->irq_base && has_irq != INT_NONE) + dev_warn(&client->dev, "interrupt support not compiled in\n"); + + return 0; +} + +static void max732x_irq_teardown(struct max732x_chip *chip) +{ +} +#endif + static int __devinit max732x_setup_gpio(struct max732x_chip *chip, const struct i2c_device_id *id, unsigned gpio_start) { struct gpio_chip *gc = &chip->gpio_chip; - uint32_t id_data = id->driver_data; + uint32_t id_data = (uint32_t)max732x_features[id->driver_data]; int i, port = 0; for (i = 0; i < 16; i++, id_data >>= 2) { @@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client, switch (client->addr & 0x70) { case 0x60: chip->client_group_a = client; - if (nr_port > 7) { + if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_b); chip->client_group_b = chip->client_dummy = c; } break; case 0x50: chip->client_group_b = client; - if (nr_port > 7) { + if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_a); chip->client_group_a = chip->client_dummy = c; } @@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client, mutex_init(&chip->lock); - max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); - if (nr_port > 7) - max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); + max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]); + if (nr_port > 8) + max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]); + + ret = max732x_irq_setup(chip, id); + if (ret) + goto out_failed; ret = gpiochip_add(&chip->gpio_chip); if (ret) @@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client, return 0; out_failed: + max732x_irq_teardown(chip); kfree(chip); return ret; } @@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client) return ret; } + max732x_irq_teardown(chip); + /* unregister any dummy i2c_client */ if (chip->client_dummy) i2c_unregister_device(chip->client_dummy); diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index b827c976dc62..a2b12aa1f2b9 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -73,7 +73,7 @@ struct pca953x_chip { struct i2c_client *client; struct pca953x_platform_data *dyn_pdata; struct gpio_chip gpio_chip; - char **names; + const char *const *names; }; static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) @@ -449,7 +449,7 @@ pca953x_get_alt_pdata(struct i2c_client *client) struct device_node *node; const uint16_t *val; - node = dev_archdata_get_node(&client->dev.archdata); + node = client->dev.of_node; if (node == NULL) return NULL; diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 105701a1f05b..ee568c8fcbd0 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger) unsigned long flags; u8 gpiois, gpioibe, gpioiev; - if (offset < 0 || offset > PL061_GPIO_NR) + if (offset < 0 || offset >= PL061_GPIO_NR) return -EINVAL; spin_lock_irqsave(&chip->irq_lock, flags); diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c new file mode 100644 index 000000000000..2762698e0204 --- /dev/null +++ b/drivers/gpio/rdc321x-gpio.c @@ -0,0 +1,246 @@ +/* + * RDC321x GPIO driver + * + * Copyright (C) 2008, Volker Weiss <dev@tintuc.de> + * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/pci.h> +#include <linux/gpio.h> +#include <linux/mfd/rdc321x.h> +#include <linux/slab.h> + +struct rdc321x_gpio { + spinlock_t lock; + struct pci_dev *sb_pdev; + u32 data_reg[2]; + int reg1_ctrl_base; + int reg1_data_base; + int reg2_ctrl_base; + int reg2_data_base; + struct gpio_chip chip; +}; + +/* read GPIO pin */ +static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio) +{ + struct rdc321x_gpio *gpch; + u32 value = 0; + int reg; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base; + + spin_lock(&gpch->lock); + pci_write_config_dword(gpch->sb_pdev, reg, + gpch->data_reg[gpio < 32 ? 0 : 1]); + pci_read_config_dword(gpch->sb_pdev, reg, &value); + spin_unlock(&gpch->lock); + + return (1 << (gpio & 0x1f)) & value ? 1 : 0; +} + +static void rdc_gpio_set_value_impl(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + int reg = (gpio < 32) ? 0 : 1; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + + if (value) + gpch->data_reg[reg] |= 1 << (gpio & 0x1f); + else + gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f)); + + pci_write_config_dword(gpch->sb_pdev, + reg ? gpch->reg2_data_base : gpch->reg1_data_base, + gpch->data_reg[reg]); +} + +/* set GPIO pin to value */ +static void rdc_gpio_set_value(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + spin_lock(&gpch->lock); + rdc_gpio_set_value_impl(chip, gpio, value); + spin_unlock(&gpch->lock); +} + +static int rdc_gpio_config(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + int err; + u32 reg; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + + spin_lock(&gpch->lock); + err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ? + gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, ®); + if (err) + goto unlock; + + reg |= 1 << (gpio & 0x1f); + + err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ? + gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg); + if (err) + goto unlock; + + rdc_gpio_set_value_impl(chip, gpio, value); + +unlock: + spin_unlock(&gpch->lock); + + return err; +} + +/* configure GPIO pin as input */ +static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) +{ + return rdc_gpio_config(chip, gpio, 1); +} + +/* + * Cache the initial value of both GPIO data registers + */ +static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) +{ + int err; + struct resource *r; + struct rdc321x_gpio *rdc321x_gpio_dev; + struct rdc321x_gpio_pdata *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data supplied\n"); + return -ENODEV; + } + + rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL); + if (!rdc321x_gpio_dev) { + dev_err(&pdev->dev, "failed to allocate private data\n"); + return -ENOMEM; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); + if (!r) { + dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); + err = -ENODEV; + goto out_free; + } + + spin_lock_init(&rdc321x_gpio_dev->lock); + rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev; + rdc321x_gpio_dev->reg1_ctrl_base = r->start; + rdc321x_gpio_dev->reg1_data_base = r->start + 0x4; + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); + if (!r) { + dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); + err = -ENODEV; + goto out_free; + } + + rdc321x_gpio_dev->reg2_ctrl_base = r->start; + rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; + + rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; + rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; + rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; + rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; + rdc321x_gpio_dev->chip.base = 0; + rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; + + platform_set_drvdata(pdev, rdc321x_gpio_dev); + + /* This might not be, what others (BIOS, bootloader, etc.) + wrote to these registers before, but it's a good guess. Still + better than just using 0xffffffff. */ + err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, + rdc321x_gpio_dev->reg1_data_base, + &rdc321x_gpio_dev->data_reg[0]); + if (err) + goto out_drvdata; + + err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, + rdc321x_gpio_dev->reg2_data_base, + &rdc321x_gpio_dev->data_reg[1]); + if (err) + goto out_drvdata; + + dev_info(&pdev->dev, "registering %d GPIOs\n", + rdc321x_gpio_dev->chip.ngpio); + return gpiochip_add(&rdc321x_gpio_dev->chip); + +out_drvdata: + platform_set_drvdata(pdev, NULL); +out_free: + kfree(rdc321x_gpio_dev); + return err; +} + +static int __devexit rdc321x_gpio_remove(struct platform_device *pdev) +{ + int ret; + struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev); + + ret = gpiochip_remove(&rdc321x_gpio_dev->chip); + if (ret) + dev_err(&pdev->dev, "failed to unregister chip\n"); + + kfree(rdc321x_gpio_dev); + platform_set_drvdata(pdev, NULL); + + return ret; +} + +static struct platform_driver rdc321x_gpio_driver = { + .driver.name = "rdc321x-gpio", + .driver.owner = THIS_MODULE, + .probe = rdc321x_gpio_probe, + .remove = __devexit_p(rdc321x_gpio_remove), +}; + +static int __init rdc321x_gpio_init(void) +{ + return platform_driver_register(&rdc321x_gpio_driver); +} + +static void __exit rdc321x_gpio_exit(void) +{ + platform_driver_unregister(&rdc321x_gpio_driver); +} + +module_init(rdc321x_gpio_init); +module_exit(rdc321x_gpio_exit); + +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_DESCRIPTION("RDC321x GPIO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rdc321x-gpio"); diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c new file mode 100644 index 000000000000..1be6288780de --- /dev/null +++ b/drivers/gpio/tc35892-gpio.c @@ -0,0 +1,381 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/mfd/tc35892.h> + +/* + * These registers are modified under the irq bus lock and cached to avoid + * unnecessary writes in bus_sync_unlock. + */ +enum { REG_IBE, REG_IEV, REG_IS, REG_IE }; + +#define CACHE_NR_REGS 4 +#define CACHE_NR_BANKS 3 + +struct tc35892_gpio { + struct gpio_chip chip; + struct tc35892 *tc35892; + struct device *dev; + struct mutex irq_lock; + + int irq_base; + + /* Caches of interrupt control registers for bus_lock */ + u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; + u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; +}; + +static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip) +{ + return container_of(chip, struct tc35892_gpio, chip); +} + +static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2; + u8 mask = 1 << (offset % 8); + int ret; + + ret = tc35892_reg_read(tc35892, reg); + if (ret < 0) + return ret; + + return ret & mask; +} + +static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2; + unsigned pos = offset % 8; + u8 data[] = {!!val << pos, 1 << pos}; + + tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data); +} + +static int tc35892_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int val) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODIR0 + offset / 8; + unsigned pos = offset % 8; + + tc35892_gpio_set(chip, offset, val); + + return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos); +} + +static int tc35892_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODIR0 + offset / 8; + unsigned pos = offset % 8; + + return tc35892_set_bits(tc35892, reg, 1 << pos, 0); +} + +static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + + return tc35892_gpio->irq_base + offset; +} + +static struct gpio_chip template_chip = { + .label = "tc35892", + .owner = THIS_MODULE, + .direction_input = tc35892_gpio_direction_input, + .get = tc35892_gpio_get, + .direction_output = tc35892_gpio_direction_output, + .set = tc35892_gpio_set, + .to_irq = tc35892_gpio_to_irq, + .can_sleep = 1, +}; + +static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + if (type == IRQ_TYPE_EDGE_BOTH) { + tc35892_gpio->regs[REG_IBE][regoffset] |= mask; + return 0; + } + + tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask; + + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) + tc35892_gpio->regs[REG_IS][regoffset] |= mask; + else + tc35892_gpio->regs[REG_IS][regoffset] &= ~mask; + + if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) + tc35892_gpio->regs[REG_IEV][regoffset] |= mask; + else + tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask; + + return 0; +} + +static void tc35892_gpio_irq_lock(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + + mutex_lock(&tc35892_gpio->irq_lock); +} + +static void tc35892_gpio_irq_sync_unlock(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + static const u8 regmap[] = { + [REG_IBE] = TC35892_GPIOIBE0, + [REG_IEV] = TC35892_GPIOIEV0, + [REG_IS] = TC35892_GPIOIS0, + [REG_IE] = TC35892_GPIOIE0, + }; + int i, j; + + for (i = 0; i < CACHE_NR_REGS; i++) { + for (j = 0; j < CACHE_NR_BANKS; j++) { + u8 old = tc35892_gpio->oldregs[i][j]; + u8 new = tc35892_gpio->regs[i][j]; + + if (new == old) + continue; + + tc35892_gpio->oldregs[i][j] = new; + tc35892_reg_write(tc35892, regmap[i] + j * 8, new); + } + } + + mutex_unlock(&tc35892_gpio->irq_lock); +} + +static void tc35892_gpio_irq_mask(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + tc35892_gpio->regs[REG_IE][regoffset] &= ~mask; +} + +static void tc35892_gpio_irq_unmask(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + tc35892_gpio->regs[REG_IE][regoffset] |= mask; +} + +static struct irq_chip tc35892_gpio_irq_chip = { + .name = "tc35892-gpio", + .bus_lock = tc35892_gpio_irq_lock, + .bus_sync_unlock = tc35892_gpio_irq_sync_unlock, + .mask = tc35892_gpio_irq_mask, + .unmask = tc35892_gpio_irq_unmask, + .set_type = tc35892_gpio_irq_set_type, +}; + +static irqreturn_t tc35892_gpio_irq(int irq, void *dev) +{ + struct tc35892_gpio *tc35892_gpio = dev; + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 status[CACHE_NR_BANKS]; + int ret; + int i; + + ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0, + ARRAY_SIZE(status), status); + if (ret < 0) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(status); i++) { + unsigned int stat = status[i]; + if (!stat) + continue; + + while (stat) { + int bit = __ffs(stat); + int line = i * 8 + bit; + + handle_nested_irq(tc35892_gpio->irq_base + line); + stat &= ~(1 << bit); + } + + tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]); + } + + return IRQ_HANDLED; +} + +static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio) +{ + int base = tc35892_gpio->irq_base; + int irq; + + for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) { + set_irq_chip_data(irq, tc35892_gpio); + set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio) +{ + int base = tc35892_gpio->irq_base; + int irq; + + for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static int __devinit tc35892_gpio_probe(struct platform_device *pdev) +{ + struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent); + struct tc35892_gpio_platform_data *pdata; + struct tc35892_gpio *tc35892_gpio; + int ret; + int irq; + + pdata = tc35892->pdata->gpio; + if (!pdata) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL); + if (!tc35892_gpio) + return -ENOMEM; + + mutex_init(&tc35892_gpio->irq_lock); + + tc35892_gpio->dev = &pdev->dev; + tc35892_gpio->tc35892 = tc35892; + + tc35892_gpio->chip = template_chip; + tc35892_gpio->chip.ngpio = tc35892->num_gpio; + tc35892_gpio->chip.dev = &pdev->dev; + tc35892_gpio->chip.base = pdata->gpio_base; + + tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0); + + /* Bring the GPIO module out of reset */ + ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL, + TC35892_RSTCTRL_GPIRST, 0); + if (ret < 0) + goto out_free; + + ret = tc35892_gpio_irq_init(tc35892_gpio); + if (ret) + goto out_free; + + ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT, + "tc35892-gpio", tc35892_gpio); + if (ret) { + dev_err(&pdev->dev, "unable to get irq: %d\n", ret); + goto out_removeirq; + } + + ret = gpiochip_add(&tc35892_gpio->chip); + if (ret) { + dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); + goto out_freeirq; + } + + platform_set_drvdata(pdev, tc35892_gpio); + + return 0; + +out_freeirq: + free_irq(irq, tc35892_gpio); +out_removeirq: + tc35892_gpio_irq_remove(tc35892_gpio); +out_free: + kfree(tc35892_gpio); + return ret; +} + +static int __devexit tc35892_gpio_remove(struct platform_device *pdev) +{ + struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + int ret; + + ret = gpiochip_remove(&tc35892_gpio->chip); + if (ret < 0) { + dev_err(tc35892_gpio->dev, + "unable to remove gpiochip: %d\n", ret); + return ret; + } + + free_irq(irq, tc35892_gpio); + tc35892_gpio_irq_remove(tc35892_gpio); + + platform_set_drvdata(pdev, NULL); + kfree(tc35892_gpio); + + return 0; +} + +static struct platform_driver tc35892_gpio_driver = { + .driver.name = "tc35892-gpio", + .driver.owner = THIS_MODULE, + .probe = tc35892_gpio_probe, + .remove = __devexit_p(tc35892_gpio_remove), +}; + +static int __init tc35892_gpio_init(void) +{ + return platform_driver_register(&tc35892_gpio_driver); +} +subsys_initcall(tc35892_gpio_init); + +static void __exit tc35892_gpio_exit(void) +{ + platform_driver_unregister(&tc35892_gpio_driver); +} +module_exit(tc35892_gpio_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TC35892 GPIO driver"); +MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent"); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f569ae88ab38..c1981861bbbd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid) csum += raw_edid[i]; if (csum) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); - goto bad; + + /* allow CEA to slide through, switches mangle this */ + if (raw_edid[0] != 0x02) + goto bad; } /* per-block-type checks */ diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e663a79829f..266b0ff441af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector) if (nv_encoder && nv_connector->native_mode) { unsigned status = connector_status_connected; -#ifdef CONFIG_ACPI +#if defined(CONFIG_ACPI_BUTTON) || \ + (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) if (!nouveau_ignorelid && !acpi_lid_open()) status = connector_status_unknown; #endif diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 0616c96e4b67..704a25d04ac9 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev) if (!dev_priv->engine.graph.ctxprog) { struct nouveau_grctx ctx = {}; - uint32_t cp[256]; + uint32_t *cp; + + cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); + if (!cp) + return -ENOMEM; ctx.dev = dev; ctx.mode = NOUVEAU_GRCTX_PROG; @@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + + kfree(cp); } /* No context present currently */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 03dd6c41dc19..f3f2827017ef 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: + default: pll = &rdev->clock.dcpll; break; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 66a37fb75839..669feb689bfc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -576,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, */ int radeon_agp_init(struct radeon_device *rdev); void radeon_agp_resume(struct radeon_device *rdev); +void radeon_agp_suspend(struct radeon_device *rdev); void radeon_agp_fini(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 28e473f1f56f..f40dfb77f9b1 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) } #endif } + +void radeon_agp_suspend(struct radeon_device *rdev) +{ + radeon_agp_fini(rdev); +} diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6e733fdc3349..24ea683f7cf5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; + struct bios_connector *bios_connectors; + size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; - if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + bios_connectors = kzalloc(bc_size, GFP_KERNEL); + if (!bios_connectors) + return false; + + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset)) { + kfree(bios_connectors); return false; + } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); @@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct radeon_link_encoder_connector(dev); + kfree(bios_connectors); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a20b612ffe75..fdc3fdf78acb 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -754,6 +754,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) /* evict remaining vram memory */ radeon_bo_evict_vram(rdev); + radeon_agp_suspend(rdev); + pci_save_state(dev->pdev); if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index cc5316dcf580..b3ba44c0a818 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, flags |= RADEON_FRONT; } if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { - if (!dev_priv->have_z_offset) + if (!dev_priv->have_z_offset) { printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); - flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + } } if (flags & (RADEON_FRONT | RADEON_BACK)) { diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 76ba59b9fea1..132278fa6240 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -347,6 +347,14 @@ config HID_QUANTA ---help--- Support for Quanta Optical Touch dual-touch panels. +config HID_ROCCAT + tristate "Roccat special event support" + depends on USB_HID + ---help--- + Support for Roccat special events. + Say Y here if you have a Roccat mouse or keyboard and want OSD or + macro execution support. + config HID_ROCCAT_KONE tristate "Roccat Kone Mouse support" depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 22e47eaeea32..987fa0627367 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o +obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e10e314d38cc..aa0f7dcabcd7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1301,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 56f314fbd4f9..c94026768570 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = { [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", }; -static const char *absolutes[ABS_MAX + 1] = { +static const char *absolutes[ABS_CNT] = { [ABS_X] = "X", [ABS_Y] = "Y", [ABS_Z] = "Z", [ABS_RX] = "Rx", [ABS_RY] = "Ry", [ABS_RZ] = "Rz", diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c index 62416e6baeca..3975e039c3dd 100644 --- a/drivers/hid/hid-gyration.c +++ b/drivers/hid/hid-gyration.c @@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id gyration_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { } }; MODULE_DEVICE_TABLE(hid, gyration_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 9776896cc4fc..6af77ed0b555 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -282,6 +282,7 @@ #define USB_VENDOR_ID_GYRATION 0x0c16 #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008 #define USB_VENDOR_ID_HAPP 0x078b #define USB_DEVICE_ID_UGCI_DRIVING 0x0010 diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 66e694054ba2..17f2dc04f883 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -37,6 +37,7 @@ #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" +#include "hid-roccat.h" #include "hid-roccat-kone.h" static void kone_set_settings_checksum(struct kone_settings *settings) @@ -263,7 +264,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result) return 0; } -static ssize_t kone_sysfs_read_settings(struct kobject *kobj, +static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); @@ -287,7 +288,7 @@ static ssize_t kone_sysfs_read_settings(struct kobject *kobj, * This function keeps values in kone_device up to date and assumes that in * case of error the old data is still valid */ -static ssize_t kone_sysfs_write_settings(struct kobject *kobj, +static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); @@ -342,31 +343,31 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj, return count; } -static ssize_t kone_sysfs_read_profile1(struct kobject *kobj, +static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1); } -static ssize_t kone_sysfs_read_profile2(struct kobject *kobj, +static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2); } -static ssize_t kone_sysfs_read_profile3(struct kobject *kobj, +static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3); } -static ssize_t kone_sysfs_read_profile4(struct kobject *kobj, +static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4); } -static ssize_t kone_sysfs_read_profile5(struct kobject *kobj, +static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5); @@ -404,31 +405,31 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj, return sizeof(struct kone_profile); } -static ssize_t kone_sysfs_write_profile1(struct kobject *kobj, +static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1); } -static ssize_t kone_sysfs_write_profile2(struct kobject *kobj, +static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2); } -static ssize_t kone_sysfs_write_profile3(struct kobject *kobj, +static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3); } -static ssize_t kone_sysfs_write_profile4(struct kobject *kobj, +static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4); } -static ssize_t kone_sysfs_write_profile5(struct kobject *kobj, +static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5); @@ -849,6 +850,16 @@ static int kone_init_specials(struct hid_device *hdev) "couldn't init struct kone_device\n"); goto exit_free; } + + retval = roccat_connect(hdev); + if (retval < 0) { + dev_err(&hdev->dev, "couldn't init char dev\n"); + /* be tolerant about not getting chrdev */ + } else { + kone->roccat_claimed = 1; + kone->chrdev_minor = retval; + } + retval = kone_create_sysfs_attributes(intf); if (retval) { dev_err(&hdev->dev, "cannot create sysfs files\n"); @@ -868,10 +879,14 @@ exit_free: static void kone_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct kone_device *kone; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { kone_remove_sysfs_attributes(intf); + kone = hid_get_drvdata(hdev); + if (kone->roccat_claimed) + roccat_disconnect(kone->chrdev_minor); kfree(hid_get_drvdata(hdev)); } } @@ -930,6 +945,37 @@ static void kone_keep_values_up_to_date(struct kone_device *kone, } } +static void kone_report_to_chrdev(struct kone_device const *kone, + struct kone_mouse_event const *event) +{ + struct kone_roccat_report roccat_report; + + switch (event->event) { + case kone_mouse_event_switch_profile: + case kone_mouse_event_switch_dpi: + case kone_mouse_event_osd_profile: + case kone_mouse_event_osd_dpi: + roccat_report.event = event->event; + roccat_report.value = event->value; + roccat_report.key = 0; + roccat_report_event(kone->chrdev_minor, + (uint8_t *)&roccat_report, + sizeof(struct kone_roccat_report)); + break; + case kone_mouse_event_call_overlong_macro: + if (event->value == kone_keystroke_action_press) { + roccat_report.event = kone_mouse_event_call_overlong_macro; + roccat_report.value = kone->actual_profile; + roccat_report.key = event->macro_key; + roccat_report_event(kone->chrdev_minor, + (uint8_t *)&roccat_report, + sizeof(struct kone_roccat_report)); + } + break; + } + +} + /* * Is called for keyboard- and mousepart. * Only mousepart gets informations about special events in its extended event @@ -958,6 +1004,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report, kone_keep_values_up_to_date(kone, event); + if (kone->roccat_claimed) + kone_report_to_chrdev(kone, event); + return 0; /* always do further processing */ } diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h index b413b10a7f8a..003e6f81c195 100644 --- a/drivers/hid/hid-roccat-kone.h +++ b/drivers/hid/hid-roccat-kone.h @@ -189,6 +189,12 @@ enum kone_commands { kone_command_firmware = 0xe5a }; +struct kone_roccat_report { + uint8_t event; + uint8_t value; /* holds dpi or profile value */ + uint8_t key; /* macro key on overlong macro execution */ +}; + #pragma pack(pop) struct kone_device { @@ -219,6 +225,9 @@ struct kone_device { * so it's read only once */ int firmware_version; + + int roccat_claimed; + int chrdev_minor; }; #endif diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c new file mode 100644 index 000000000000..e05d48edb66f --- /dev/null +++ b/drivers/hid/hid-roccat.c @@ -0,0 +1,428 @@ +/* + * Roccat driver for Linux + * + * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* + * Module roccat is a char device used to report special events of roccat + * hardware to userland. These events include requests for on-screen-display of + * profile or dpi settings or requests for execution of macro sequences that are + * not stored in device. The information in these events depends on hid device + * implementation and contains data that is not available in a single hid event + * or else hidraw could have been used. + * It is inspired by hidraw, but uses only one circular buffer for all readers. + */ + +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/sched.h> + +#include "hid-roccat.h" + +#define ROCCAT_FIRST_MINOR 0 +#define ROCCAT_MAX_DEVICES 8 + +/* should be a power of 2 for performance reason */ +#define ROCCAT_CBUF_SIZE 16 + +struct roccat_report { + uint8_t *value; + int len; +}; + +struct roccat_device { + unsigned int minor; + int open; + int exist; + wait_queue_head_t wait; + struct device *dev; + struct hid_device *hid; + struct list_head readers; + /* protects modifications of readers list */ + struct mutex readers_lock; + + /* + * circular_buffer has one writer and multiple readers with their own + * read pointers + */ + struct roccat_report cbuf[ROCCAT_CBUF_SIZE]; + int cbuf_end; + struct mutex cbuf_lock; +}; + +struct roccat_reader { + struct list_head node; + struct roccat_device *device; + int cbuf_start; +}; + +static int roccat_major; +static struct class *roccat_class; +static struct cdev roccat_cdev; + +static struct roccat_device *devices[ROCCAT_MAX_DEVICES]; +/* protects modifications of devices array */ +static DEFINE_MUTEX(devices_lock); + +static ssize_t roccat_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct roccat_reader *reader = file->private_data; + struct roccat_device *device = reader->device; + struct roccat_report *report; + ssize_t retval = 0, len; + DECLARE_WAITQUEUE(wait, current); + + mutex_lock(&device->cbuf_lock); + + /* no data? */ + if (reader->cbuf_start == device->cbuf_end) { + add_wait_queue(&device->wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + /* wait for data */ + while (reader->cbuf_start == device->cbuf_end) { + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + break; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + if (!device->exist) { + retval = -EIO; + break; + } + + mutex_unlock(&device->cbuf_lock); + schedule(); + mutex_lock(&device->cbuf_lock); + set_current_state(TASK_INTERRUPTIBLE); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&device->wait, &wait); + } + + /* here we either have data or a reason to return if retval is set */ + if (retval) + goto exit_unlock; + + report = &device->cbuf[reader->cbuf_start]; + /* + * If report is larger than requested amount of data, rest of report + * is lost! + */ + len = report->len > count ? count : report->len; + + if (copy_to_user(buffer, report->value, len)) { + retval = -EFAULT; + goto exit_unlock; + } + retval += len; + reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; + +exit_unlock: + mutex_unlock(&device->cbuf_lock); + return retval; +} + +static unsigned int roccat_poll(struct file *file, poll_table *wait) +{ + struct roccat_reader *reader = file->private_data; + poll_wait(file, &reader->device->wait, wait); + if (reader->cbuf_start != reader->device->cbuf_end) + return POLLIN | POLLRDNORM; + if (!reader->device->exist) + return POLLERR | POLLHUP; + return 0; +} + +static int roccat_open(struct inode *inode, struct file *file) +{ + unsigned int minor = iminor(inode); + struct roccat_reader *reader; + struct roccat_device *device; + int error = 0; + + reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL); + if (!reader) + return -ENOMEM; + + mutex_lock(&devices_lock); + + device = devices[minor]; + + mutex_lock(&device->readers_lock); + + if (!device) { + printk(KERN_EMERG "roccat device with minor %d doesn't exist\n", + minor); + error = -ENODEV; + goto exit_unlock; + } + + if (!device->open++) { + /* power on device on adding first reader */ + if (device->hid->ll_driver->power) { + error = device->hid->ll_driver->power(device->hid, + PM_HINT_FULLON); + if (error < 0) { + --device->open; + goto exit_unlock; + } + } + error = device->hid->ll_driver->open(device->hid); + if (error < 0) { + if (device->hid->ll_driver->power) + device->hid->ll_driver->power(device->hid, + PM_HINT_NORMAL); + --device->open; + goto exit_unlock; + } + } + + reader->device = device; + /* new reader doesn't get old events */ + reader->cbuf_start = device->cbuf_end; + + list_add_tail(&reader->node, &device->readers); + file->private_data = reader; + +exit_unlock: + mutex_unlock(&device->readers_lock); + mutex_unlock(&devices_lock); + return error; +} + +static int roccat_release(struct inode *inode, struct file *file) +{ + unsigned int minor = iminor(inode); + struct roccat_reader *reader = file->private_data; + struct roccat_device *device; + + mutex_lock(&devices_lock); + + device = devices[minor]; + if (!device) { + mutex_unlock(&devices_lock); + printk(KERN_EMERG "roccat device with minor %d doesn't exist\n", + minor); + return -ENODEV; + } + + mutex_lock(&device->readers_lock); + list_del(&reader->node); + mutex_unlock(&device->readers_lock); + kfree(reader); + + if (!--device->open) { + /* removing last reader */ + if (device->exist) { + if (device->hid->ll_driver->power) + device->hid->ll_driver->power(device->hid, + PM_HINT_NORMAL); + device->hid->ll_driver->close(device->hid); + } else { + kfree(device); + } + } + + mutex_unlock(&devices_lock); + + return 0; +} + +/* + * roccat_report_event() - output data to readers + * @minor: minor device number returned by roccat_connect() + * @data: pointer to data + * @len: size of data + * + * Return value is zero on success, a negative error code on failure. + * + * This is called from interrupt handler. + */ +int roccat_report_event(int minor, u8 const *data, int len) +{ + struct roccat_device *device; + struct roccat_reader *reader; + struct roccat_report *report; + uint8_t *new_value; + + new_value = kmemdup(data, len, GFP_ATOMIC); + if (!new_value) + return -ENOMEM; + + device = devices[minor]; + + report = &device->cbuf[device->cbuf_end]; + + /* passing NULL is safe */ + kfree(report->value); + + report->value = new_value; + report->len = len; + device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE; + + list_for_each_entry(reader, &device->readers, node) { + /* + * As we already inserted one element, the buffer can't be + * empty. If start and end are equal, buffer is full and we + * increase start, so that slow reader misses one event, but + * gets the newer ones in the right order. + */ + if (reader->cbuf_start == device->cbuf_end) + reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; + } + + wake_up_interruptible(&device->wait); + return 0; +} +EXPORT_SYMBOL_GPL(roccat_report_event); + +/* + * roccat_connect() - create a char device for special event output + * @hid: the hid device the char device should be connected to. + * + * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on + * success, a negative error code on failure. + */ +int roccat_connect(struct hid_device *hid) +{ + unsigned int minor; + struct roccat_device *device; + int temp; + + device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + mutex_lock(&devices_lock); + + for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) { + if (devices[minor]) + continue; + break; + } + + if (minor < ROCCAT_MAX_DEVICES) { + devices[minor] = device; + } else { + mutex_unlock(&devices_lock); + kfree(device); + return -EINVAL; + } + + device->dev = device_create(roccat_class, &hid->dev, + MKDEV(roccat_major, minor), NULL, + "%s%s%d", "roccat", hid->driver->name, minor); + + if (IS_ERR(device->dev)) { + devices[minor] = NULL; + mutex_unlock(&devices_lock); + temp = PTR_ERR(device->dev); + kfree(device); + return temp; + } + + mutex_unlock(&devices_lock); + + init_waitqueue_head(&device->wait); + INIT_LIST_HEAD(&device->readers); + mutex_init(&device->readers_lock); + mutex_init(&device->cbuf_lock); + device->minor = minor; + device->hid = hid; + device->exist = 1; + device->cbuf_end = 0; + + return minor; +} +EXPORT_SYMBOL_GPL(roccat_connect); + +/* roccat_disconnect() - remove char device from hid device + * @minor: the minor device number returned by roccat_connect() + */ +void roccat_disconnect(int minor) +{ + struct roccat_device *device; + + mutex_lock(&devices_lock); + device = devices[minor]; + devices[minor] = NULL; + mutex_unlock(&devices_lock); + + device->exist = 0; /* TODO exist maybe not needed */ + + device_destroy(roccat_class, MKDEV(roccat_major, minor)); + + if (device->open) { + device->hid->ll_driver->close(device->hid); + wake_up_interruptible(&device->wait); + } else { + kfree(device); + } +} +EXPORT_SYMBOL_GPL(roccat_disconnect); + +static const struct file_operations roccat_ops = { + .owner = THIS_MODULE, + .read = roccat_read, + .poll = roccat_poll, + .open = roccat_open, + .release = roccat_release, +}; + +static int __init roccat_init(void) +{ + int retval; + dev_t dev_id; + + retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR, + ROCCAT_MAX_DEVICES, "roccat"); + + roccat_major = MAJOR(dev_id); + + if (retval < 0) { + printk(KERN_WARNING "roccat: can't get major number\n"); + return retval; + } + + roccat_class = class_create(THIS_MODULE, "roccat"); + if (IS_ERR(roccat_class)) { + retval = PTR_ERR(roccat_class); + unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); + return retval; + } + + cdev_init(&roccat_cdev, &roccat_ops); + cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES); + + return 0; +} + +static void __exit roccat_exit(void) +{ + dev_t dev_id = MKDEV(roccat_major, 0); + + cdev_del(&roccat_cdev); + class_destroy(roccat_class); + unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); +} + +module_init(roccat_init); +module_exit(roccat_exit); + +MODULE_AUTHOR("Stefan Achatz"); +MODULE_DESCRIPTION("USB Roccat char device"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h new file mode 100644 index 000000000000..d8aae0c1fa7e --- /dev/null +++ b/drivers/hid/hid-roccat.h @@ -0,0 +1,31 @@ +#ifndef __HID_ROCCAT_H +#define __HID_ROCCAT_H + +/* + * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/hid.h> +#include <linux/types.h> + +#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE) +int roccat_connect(struct hid_device *hid); +void roccat_disconnect(int minor); +int roccat_report_event(int minor, u8 const *data, int len); +#else +static inline int roccat_connect(struct hid_device *hid) { return -1; } +static inline void roccat_disconnect(int minor) {} +static inline int roccat_report_event(int minor, u8 const *data, int len) +{ + return 0; +} +#endif + +#endif diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 9be8e1754a0b..e19cf8eb6ccf 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -447,13 +447,14 @@ config SENSORS_IT87 will be called it87. config SENSORS_LM63 - tristate "National Semiconductor LM63" + tristate "National Semiconductor LM63 and LM64" depends on I2C help - If you say yes here you get support for the National Semiconductor - LM63 remote diode digital temperature sensor with integrated fan - control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro) - motherboard, among others. + If you say yes here you get support for the National + Semiconductor LM63 and LM64 remote diode digital temperature + sensors with integrated fan control. Such chips are found + on the Tyan S4882 (Thunder K8QS Pro) motherboard, among + others. This driver can also be built as a module. If so, the module will be called lm63. @@ -492,7 +493,8 @@ config SENSORS_LM75 - NXP's LM75A - ST Microelectronics STDS75 - TelCom (now Microchip) TCN75 - - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275 + - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175, + TMP275 This driver supports driver model based binding through board specific I2C device tables. @@ -749,6 +751,16 @@ config SENSORS_DME1737 This driver can also be built as a module. If so, the module will be called dme1737. +config SENSORS_EMC1403 + tristate "SMSC EMC1403 thermal sensor" + depends on I2C + help + If you say yes here you get support for the SMSC EMC1403 + temperature monitoring chip. + + Threshold values can be configured using sysfs. + Data from the different diodes are accessible via sysfs. + config SENSORS_SMSC47M1 tristate "SMSC LPC47M10x and compatibles" help @@ -802,6 +814,15 @@ config SENSORS_ADS7828 This driver can also be built as a module. If so, the module will be called ads7828. +config SENSORS_ADS7871 + tristate "Texas Instruments ADS7871 A/D converter" + depends on SPI + help + If you say yes here you get support for TI ADS7871 & ADS7870 + + This driver can also be built as a module. If so, the module + will be called ads7871. + config SENSORS_AMC6821 tristate "Texas Instruments AMC6821" depends on I2C && EXPERIMENTAL @@ -822,6 +843,16 @@ config SENSORS_THMC50 This driver can also be built as a module. If so, the module will be called thmc50. +config SENSORS_TMP102 + tristate "Texas Instruments TMP102" + depends on I2C && EXPERIMENTAL + help + If you say yes here you get support for Texas Instruments TMP102 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called tmp102. + config SENSORS_TMP401 tristate "Texas Instruments TMP401 and compatibles" depends on I2C && EXPERIMENTAL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 4aa1a3d112ad..2138ceb1a713 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o +obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o @@ -40,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o +obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o obj-$(CONFIG_SENSORS_F71805F) += f71805f.o obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o obj-$(CONFIG_SENSORS_F75375S) += f75375s.o @@ -89,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o obj-$(CONFIG_SENSORS_THMC50) += thmc50.o +obj-$(CONFIG_SENSORS_TMP102) += tmp102.o obj-$(CONFIG_SENSORS_TMP401) += tmp401.o obj-$(CONFIG_SENSORS_TMP421) += tmp421.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 1644b92e7cc4..15c1a9616af3 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -36,6 +36,7 @@ #define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr)) #define ADM1031_REG_PWM (0x22) #define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr)) +#define ADM1031_REG_FAN_FILTER (0x23) #define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr)) #define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr)) @@ -61,6 +62,9 @@ #define ADM1031_CONF2_TACH2_ENABLE 0x08 #define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan)) +#define ADM1031_UPDATE_RATE_MASK 0x1c +#define ADM1031_UPDATE_RATE_SHIFT 2 + /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; @@ -75,6 +79,7 @@ struct adm1031_data { int chip_type; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ + unsigned int update_rate; /* In milliseconds */ /* The chan_select_table contains the possible configurations for * auto fan control. */ @@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); +/* Update Rate */ +static const unsigned int update_rates[] = { + 16000, 8000, 4000, 2000, 1000, 500, 250, 125, +}; + +static ssize_t show_update_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%u\n", data->update_rate); +} + +static ssize_t set_update_rate(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + unsigned long val; + int i, err; + u8 reg; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + + /* find the nearest update rate from the table */ + for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { + if (val >= update_rates[i]) + break; + } + /* if not found, we point to the last entry (lowest update rate) */ + + /* set the new update rate while preserving other settings */ + reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); + reg &= ~ADM1031_UPDATE_RATE_MASK; + reg |= i << ADM1031_UPDATE_RATE_SHIFT; + adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); + + mutex_lock(&data->update_lock); + data->update_rate = update_rates[i]; + mutex_unlock(&data->update_lock); + + return count; +} + +static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, + set_update_rate); + static struct attribute *adm1031_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, @@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = { &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, + &dev_attr_update_rate.attr, &dev_attr_alarms.attr, NULL @@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client) { unsigned int read_val; unsigned int mask; + int i; struct adm1031_data *data = i2c_get_clientdata(client); mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE); @@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client) ADM1031_CONF1_MONITOR_ENABLE); } + /* Read the chip's update rate */ + mask = ADM1031_UPDATE_RATE_MASK; + read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); + i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; + data->update_rate = update_rates[i]; } static struct adm1031_data *adm1031_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); + unsigned long next_update; int chan; mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + HZ + HZ / 2) - || !data->valid) { + next_update = data->last_updated + msecs_to_jiffies(data->update_rate); + if (time_after(jiffies, next_update) || !data->valid) { dev_dbg(&client->dev, "Starting adm1031 update\n"); for (chan = 0; diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c new file mode 100644 index 000000000000..b300a2048af1 --- /dev/null +++ b/drivers/hwmon/ads7871.c @@ -0,0 +1,253 @@ +/* + * ads7871 - driver for TI ADS7871 A/D converter + * + * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * later as publishhed by the Free Software Foundation. + * + * You need to have something like this in struct spi_board_info + * { + * .modalias = "ads7871", + * .max_speed_hz = 2*1000*1000, + * .chip_select = 0, + * .bus_num = 1, + * }, + */ + +/*From figure 18 in the datasheet*/ +/*Register addresses*/ +#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/ +#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/ +#define REG_PGA_VALID 2 /*PGA Valid Register*/ +#define REG_AD_CONTROL 3 /*A/D Control Register*/ +#define REG_GAIN_MUX 4 /*Gain/Mux Register*/ +#define REG_IO_STATE 5 /*Digital I/O State Register*/ +#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/ +#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/ +#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/ +#define REG_ID 31 /*ID Register*/ + +/*From figure 17 in the datasheet +* These bits get ORed with the address to form +* the instruction byte */ +/*Instruction Bit masks*/ +#define INST_MODE_bm (1<<7) +#define INST_READ_bm (1<<6) +#define INST_16BIT_bm (1<<5) + +/*From figure 18 in the datasheet*/ +/*bit masks for Rev/Oscillator Control Register*/ +#define MUX_CNV_bv 7 +#define MUX_CNV_bm (1<<MUX_CNV_bv) +#define MUX_M3_bm (1<<3) /*M3 selects single ended*/ +#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/ + +/*From figure 18 in the datasheet*/ +/*bit masks for Rev/Oscillator Control Register*/ +#define OSC_OSCR_bm (1<<5) +#define OSC_OSCE_bm (1<<4) +#define OSC_REFE_bm (1<<3) +#define OSC_BUFE_bm (1<<2) +#define OSC_R2V_bm (1<<1) +#define OSC_RBG_bm (1<<0) + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/spi/spi.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/delay.h> + +#define DEVICE_NAME "ads7871" + +struct ads7871_data { + struct device *hwmon_dev; + struct mutex update_lock; +}; + +static int ads7871_read_reg8(struct spi_device *spi, int reg) +{ + int ret; + reg = reg | INST_READ_bm; + ret = spi_w8r8(spi, reg); + return ret; +} + +static int ads7871_read_reg16(struct spi_device *spi, int reg) +{ + int ret; + reg = reg | INST_READ_bm | INST_16BIT_bm; + ret = spi_w8r16(spi, reg); + return ret; +} + +static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val) +{ + u8 tmp[2] = {reg, val}; + return spi_write(spi, tmp, sizeof(tmp)); +} + +static ssize_t show_voltage(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + int ret, val, i = 0; + uint8_t channel, mux_cnv; + + channel = attr->index; + /*TODO: add support for conversions + *other than single ended with a gain of 1*/ + /*MUX_M3_bm forces single ended*/ + /*This is also where the gain of the PGA would be set*/ + ads7871_write_reg8(spi, REG_GAIN_MUX, + (MUX_CNV_bm | MUX_M3_bm | channel)); + + ret = ads7871_read_reg8(spi, REG_GAIN_MUX); + mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv); + /*on 400MHz arm9 platform the conversion + *is already done when we do this test*/ + while ((i < 2) && mux_cnv) { + i++; + ret = ads7871_read_reg8(spi, REG_GAIN_MUX); + mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv); + msleep_interruptible(1); + } + + if (mux_cnv == 0) { + val = ads7871_read_reg16(spi, REG_LS_BYTE); + /*result in volts*10000 = (val/8192)*2.5*10000*/ + val = ((val>>2) * 25000) / 8192; + return sprintf(buf, "%d\n", val); + } else { + return -1; + } +} + +static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); +static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); +static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3); +static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4); +static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); +static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); +static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); + +static struct attribute *ads7871_attributes[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in6_input.dev_attr.attr, + &sensor_dev_attr_in7_input.dev_attr.attr, + NULL +}; + +static const struct attribute_group ads7871_group = { + .attrs = ads7871_attributes, +}; + +static int __devinit ads7871_probe(struct spi_device *spi) +{ + int status, ret, err = 0; + uint8_t val; + struct ads7871_data *pdata; + + dev_dbg(&spi->dev, "probe\n"); + + pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); + if (!pdata) { + err = -ENOMEM; + goto exit; + } + + status = sysfs_create_group(&spi->dev.kobj, &ads7871_group); + if (status < 0) + goto error_free; + + pdata->hwmon_dev = hwmon_device_register(&spi->dev); + if (IS_ERR(pdata->hwmon_dev)) { + err = PTR_ERR(pdata->hwmon_dev); + goto error_remove; + } + + spi_set_drvdata(spi, pdata); + + /* Configure the SPI bus */ + spi->mode = (SPI_MODE_0); + spi->bits_per_word = 8; + spi_setup(spi); + + ads7871_write_reg8(spi, REG_SER_CONTROL, 0); + ads7871_write_reg8(spi, REG_AD_CONTROL, 0); + + val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm); + ads7871_write_reg8(spi, REG_OSC_CONTROL, val); + ret = ads7871_read_reg8(spi, REG_OSC_CONTROL); + + dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret); + /*because there is no other error checking on an SPI bus + we need to make sure we really have a chip*/ + if (val != ret) { + err = -ENODEV; + goto error_remove; + } + + return 0; + +error_remove: + sysfs_remove_group(&spi->dev.kobj, &ads7871_group); +error_free: + kfree(pdata); +exit: + return err; +} + +static int __devexit ads7871_remove(struct spi_device *spi) +{ + struct ads7871_data *pdata = spi_get_drvdata(spi); + + hwmon_device_unregister(pdata->hwmon_dev); + sysfs_remove_group(&spi->dev.kobj, &ads7871_group); + kfree(pdata); + return 0; +} + +static struct spi_driver ads7871_driver = { + .driver = { + .name = DEVICE_NAME, + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = ads7871_probe, + .remove = __devexit_p(ads7871_remove), +}; + +static int __init ads7871_init(void) +{ + return spi_register_driver(&ads7871_driver); +} + +static void __exit ads7871_exit(void) +{ + spi_unregister_driver(&ads7871_driver); +} + +module_init(ads7871_init); +module_exit(ads7871_exit); + +MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>"); +MODULE_DESCRIPTION("TI ADS7871 A/D driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index f085c18d2905..b6598aa557a0 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = { /* Set 18: MacBook Pro 2,2 */ { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, +/* Set 19: Macbook Pro 5,3 */ + { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D", + "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H", + "Tm0P", "Ts0P", "Ts0S", NULL }, +/* Set 20: MacBook Pro 5,4 */ + { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D", + "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL }, +/* Set 21: MacBook Pro 6,2 */ + { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D", + "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P", + "Ts0P", "Ts0S", NULL }, +/* Set 22: MacBook Pro 7,1 */ + { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S", + "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL }, }; /* List of keys used to read/write fan speeds */ @@ -646,6 +660,17 @@ out: return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right); } +/* Displays sensor key as label */ +static ssize_t applesmc_show_sensor_label(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + const char *key = + temperature_sensors_sets[applesmc_temperature_set][attr->index]; + + return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key); +} + /* Displays degree Celsius * 1000 */ static ssize_t applesmc_show_temperature(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) @@ -1113,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = { /* * Temperature sensors sysfs entries. */ +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 5); +static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 6); +static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 7); +static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 8); +static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 9); +static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 10); +static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 11); +static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 12); +static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 13); +static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 14); +static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 15); +static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 16); +static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 17); +static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 18); +static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 19); +static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 20); +static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 21); +static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 22); +static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 23); +static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 24); +static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 25); +static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 26); +static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 27); +static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 28); +static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 29); +static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 30); +static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 31); +static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 32); +static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 33); +static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 34); +static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 35); +static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 36); +static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 37); +static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 38); +static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 39); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, applesmc_show_temperature, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, @@ -1194,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO, static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO, applesmc_show_temperature, NULL, 39); +static struct attribute *label_attributes[] = { + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + &sensor_dev_attr_temp5_label.dev_attr.attr, + &sensor_dev_attr_temp6_label.dev_attr.attr, + &sensor_dev_attr_temp7_label.dev_attr.attr, + &sensor_dev_attr_temp8_label.dev_attr.attr, + &sensor_dev_attr_temp9_label.dev_attr.attr, + &sensor_dev_attr_temp10_label.dev_attr.attr, + &sensor_dev_attr_temp11_label.dev_attr.attr, + &sensor_dev_attr_temp12_label.dev_attr.attr, + &sensor_dev_attr_temp13_label.dev_attr.attr, + &sensor_dev_attr_temp14_label.dev_attr.attr, + &sensor_dev_attr_temp15_label.dev_attr.attr, + &sensor_dev_attr_temp16_label.dev_attr.attr, + &sensor_dev_attr_temp17_label.dev_attr.attr, + &sensor_dev_attr_temp18_label.dev_attr.attr, + &sensor_dev_attr_temp19_label.dev_attr.attr, + &sensor_dev_attr_temp20_label.dev_attr.attr, + &sensor_dev_attr_temp21_label.dev_attr.attr, + &sensor_dev_attr_temp22_label.dev_attr.attr, + &sensor_dev_attr_temp23_label.dev_attr.attr, + &sensor_dev_attr_temp24_label.dev_attr.attr, + &sensor_dev_attr_temp25_label.dev_attr.attr, + &sensor_dev_attr_temp26_label.dev_attr.attr, + &sensor_dev_attr_temp27_label.dev_attr.attr, + &sensor_dev_attr_temp28_label.dev_attr.attr, + &sensor_dev_attr_temp29_label.dev_attr.attr, + &sensor_dev_attr_temp30_label.dev_attr.attr, + &sensor_dev_attr_temp31_label.dev_attr.attr, + &sensor_dev_attr_temp32_label.dev_attr.attr, + &sensor_dev_attr_temp33_label.dev_attr.attr, + &sensor_dev_attr_temp34_label.dev_attr.attr, + &sensor_dev_attr_temp35_label.dev_attr.attr, + &sensor_dev_attr_temp36_label.dev_attr.attr, + &sensor_dev_attr_temp37_label.dev_attr.attr, + &sensor_dev_attr_temp38_label.dev_attr.attr, + &sensor_dev_attr_temp39_label.dev_attr.attr, + &sensor_dev_attr_temp40_label.dev_attr.attr, + NULL +}; + static struct attribute *temperature_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, @@ -1241,6 +1390,10 @@ static struct attribute *temperature_attributes[] = { static const struct attribute_group temperature_attributes_group = { .attrs = temperature_attributes }; +static const struct attribute_group label_attributes_group = { + .attrs = label_attributes +}; + /* Module stuff */ /* @@ -1363,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { { .accelerometer = 0, .light = 0, .temperature_set = 17 }, /* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ { .accelerometer = 1, .light = 1, .temperature_set = 18 }, +/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */ + { .accelerometer = 1, .light = 1, .temperature_set = 19 }, +/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */ + { .accelerometer = 1, .light = 1, .temperature_set = 20 }, +/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */ + { .accelerometer = 1, .light = 1, .temperature_set = 21 }, +/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */ + { .accelerometer = 1, .light = 1, .temperature_set = 22 }, }; /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". @@ -1376,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, &applesmc_dmi_data[7]}, + { applesmc_dmi_match, "Apple MacBook Pro 7", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") }, + &applesmc_dmi_data[22]}, + { applesmc_dmi_match, "Apple MacBook Pro 5,4", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") }, + &applesmc_dmi_data[20]}, + { applesmc_dmi_match, "Apple MacBook Pro 5,3", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") }, + &applesmc_dmi_data[19]}, + { applesmc_dmi_match, "Apple MacBook Pro 6", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") }, + &applesmc_dmi_data[21]}, { applesmc_dmi_match, "Apple MacBook Pro 5", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") }, @@ -1518,7 +1695,8 @@ static int __init applesmc_init(void) for (i = 0; temperature_sensors_sets[applesmc_temperature_set][i] != NULL; i++) { - if (temperature_attributes[i] == NULL) { + if (temperature_attributes[i] == NULL || + label_attributes[i] == NULL) { printk(KERN_ERR "applesmc: More temperature sensors " "in temperature_sensors_sets (at least %i)" "than available sysfs files in " @@ -1530,6 +1708,10 @@ static int __init applesmc_init(void) temperature_attributes[i]); if (ret) goto out_temperature; + ret = sysfs_create_file(&pdev->dev.kobj, + label_attributes[i]); + if (ret) + goto out_temperature; } if (applesmc_accelerometer) { @@ -1580,6 +1762,7 @@ out_accelerometer: if (applesmc_accelerometer) applesmc_release_accelerometer(); out_temperature: + sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group); sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); out_fans: while (fans_handled) @@ -1609,6 +1792,7 @@ static void __exit applesmc_exit(void) } if (applesmc_accelerometer) applesmc_release_accelerometer(); + sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group); sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); while (fans_handled) sysfs_remove_group(&pdev->dev.kobj, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 16c420240724..653db1bda934 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -1411,6 +1411,13 @@ static int __init atk0110_init(void) { int ret; + /* Make sure it's safe to access the device through ACPI */ + if (!acpi_resources_are_enforced()) { + pr_err("atk: Resources not safely usable due to " + "acpi_enforce_resources kernel parameter\n"); + return -EBUSY; + } + ret = acpi_bus_register_driver(&atk_driver); if (ret) pr_info("atk: acpi_bus_register_driver failed: %d\n", ret); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index e9b7fbc5a447..2988da150ed6 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -241,6 +241,55 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * return tjmax; } +static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id, + struct device *dev) +{ + /* The 100C is default for both mobile and non mobile CPUs */ + int err; + u32 eax, edx; + u32 val; + + /* A new feature of current Intel(R) processors, the + IA32_TEMPERATURE_TARGET contains the TjMax value */ + err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); + if (err) { + dev_warn(dev, "Unable to read TjMax from CPU.\n"); + } else { + val = (eax >> 16) & 0xff; + /* + * If the TjMax is not plausible, an assumption + * will be used + */ + if ((val > 80) && (val < 120)) { + dev_info(dev, "TjMax is %d C.\n", val); + return val * 1000; + } + } + + /* + * An assumption is made for early CPUs and unreadable MSR. + * NOTE: the given value may not be correct. + */ + + switch (c->x86_model) { + case 0xe: + case 0xf: + case 0x16: + case 0x1a: + dev_warn(dev, "TjMax is assumed as 100 C!\n"); + return 100000; + break; + case 0x17: + case 0x1c: /* Atom CPUs */ + return adjust_tjmax(c, id, dev); + break; + default: + dev_warn(dev, "CPU (model=0x%x) is not supported yet," + " using default TjMax of 100C.\n", c->x86_model); + return 100000; + } +} + static int __devinit coretemp_probe(struct platform_device *pdev) { struct coretemp_data *data; @@ -283,14 +332,18 @@ static int __devinit coretemp_probe(struct platform_device *pdev) } } - data->tjmax = adjust_tjmax(c, data->id, &pdev->dev); + data->tjmax = get_tjmax(c, data->id, &pdev->dev); platform_set_drvdata(pdev, data); - /* read the still undocumented IA32_TEMPERATURE_TARGET it exists - on older CPUs but not in this register, Atoms don't have it either */ + /* + * read the still undocumented IA32_TEMPERATURE_TARGET. It exists + * on older CPUs but not in this register, + * Atoms don't have it either. + */ if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) { - err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx); + err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET, + &eax, &edx); if (err) { dev_warn(&pdev->dev, "Unable to read" " IA32_TEMPERATURE_TARGET MSR\n"); @@ -451,28 +504,20 @@ static int __init coretemp_init(void) for_each_online_cpu(i) { struct cpuinfo_x86 *c = &cpu_data(i); + /* + * CPUID.06H.EAX[0] indicates whether the CPU has thermal + * sensors. We check this bit only, all the early CPUs + * without thermal sensors will be filtered out. + */ + if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) { + err = coretemp_device_add(i); + if (err) + goto exit_devices_unreg; - /* check if family 6, models 0xe (Pentium M DC), - 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm), - 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom), - 0x1e (Lynnfield) */ - if ((c->cpuid_level < 0) || (c->x86 != 0x6) || - !((c->x86_model == 0xe) || (c->x86_model == 0xf) || - (c->x86_model == 0x16) || (c->x86_model == 0x17) || - (c->x86_model == 0x1a) || (c->x86_model == 0x1c) || - (c->x86_model == 0x1e))) { - - /* supported CPU not found, but report the unknown - family 6 CPU */ - if ((c->x86 == 0x6) && (c->x86_model > 0xf)) - printk(KERN_WARNING DRVNAME ": Unknown CPU " - "model 0x%x\n", c->x86_model); - continue; + } else { + printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" + " has no thermal sensor.\n", c->x86_model); } - - err = coretemp_device_add(i); - if (err) - goto exit_devices_unreg; } if (list_empty(&pdev_list)) { err = -ENODEV; diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 823dd28a902c..980c17d5eeae 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -1,12 +1,14 @@ /* - * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and - * SCH5027 Super-I/O chips integrated hardware monitoring features. - * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com> + * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027, + * and SCH5127 Super-I/O chips integrated hardware monitoring + * features. + * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger <juergh@gmail.com> * * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus - * if a SCH311x chip is found. Both types of chips have very similar hardware - * monitoring capabilities but differ in the way they can be accessed. + * if a SCH311x or SCH5127 chip is found. Both types of chips have very + * similar hardware monitoring capabilities but differ in the way they can be + * accessed. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC " /* Addresses to scan */ static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; -enum chips { dme1737, sch5027, sch311x }; +enum chips { dme1737, sch5027, sch311x, sch5127 }; /* --------------------------------------------------------------------- * Registers @@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23}; #define DME1737_VERSTEP_MASK 0xf8 #define SCH311X_DEVICE 0x8c #define SCH5027_VERSTEP 0x69 +#define SCH5127_DEVICE 0x8e + +/* Device ID values (global configuration register index 0x20) */ +#define DME1737_ID_1 0x77 +#define DME1737_ID_2 0x78 +#define SCH3112_ID 0x7c +#define SCH3114_ID 0x7d +#define SCH3116_ID 0x7f +#define SCH5027_ID 0x89 +#define SCH5127_ID 0x86 /* Length of ISA address segment */ #define DME1737_EXTENT 2 +/* chip-dependent features */ +#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */ +#define HAS_VID (1 << 1) /* bit 1 */ +#define HAS_ZONE3 (1 << 2) /* bit 2 */ +#define HAS_ZONE_HYST (1 << 3) /* bit 3 */ +#define HAS_PWM_MIN (1 << 4) /* bit 4 */ +#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */ +#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */ + /* --------------------------------------------------------------------- * Data structures and manipulation thereof * --------------------------------------------------------------------- */ @@ -187,8 +208,7 @@ struct dme1737_data { u8 vid; u8 pwm_rr_en; - u8 has_pwm; - u8 has_fan; + u32 has_features; /* Register values */ u16 in[7]; @@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300, 3300}; static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300, 3300}; +static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300, + 3300}; #define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \ (type) == sch5027 ? IN_NOMINAL_SCH5027 : \ + (type) == sch5127 ? IN_NOMINAL_SCH5127 : \ IN_NOMINAL_DME1737) /* Voltage input @@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Sample register contents every 1 sec */ if (time_after(jiffies, data->last_update + HZ) || !data->valid) { - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { data->vid = dme1737_read(data, DME1737_REG_VID) & 0x3f; } @@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) DME1737_REG_TEMP_MIN(ix)); data->temp_max[ix] = dme1737_read(data, DME1737_REG_TEMP_MAX(ix)); - if (data->type != sch5027) { + if (data->has_features & HAS_TEMP_OFFSET) { data->temp_offset[ix] = dme1737_read(data, DME1737_REG_TEMP_OFFSET(ix)); } @@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) { /* Skip reading registers if optional fans are not * present */ - if (!(data->has_fan & (1 << ix))) { + if (!(data->has_features & HAS_FAN(ix))) { continue; } data->fan[ix] = dme1737_read(data, @@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) { /* Skip reading registers if optional PWMs are not * present */ - if (!(data->has_pwm & (1 << ix))) { + if (!(data->has_features & HAS_PWM(ix))) { continue; } data->pwm[ix] = dme1737_read(data, @@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Thermal zone registers */ for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) { - data->zone_low[ix] = dme1737_read(data, - DME1737_REG_ZONE_LOW(ix)); - data->zone_abs[ix] = dme1737_read(data, - DME1737_REG_ZONE_ABS(ix)); + /* Skip reading registers if zone3 is not present */ + if ((ix == 2) && !(data->has_features & HAS_ZONE3)) { + continue; + } + /* sch5127 zone2 registers are special */ + if ((ix == 1) && (data->type == sch5127)) { + data->zone_low[1] = dme1737_read(data, + DME1737_REG_ZONE_LOW(2)); + data->zone_abs[1] = dme1737_read(data, + DME1737_REG_ZONE_ABS(2)); + } else { + data->zone_low[ix] = dme1737_read(data, + DME1737_REG_ZONE_LOW(ix)); + data->zone_abs[ix] = dme1737_read(data, + DME1737_REG_ZONE_ABS(ix)); + } } - if (data->type != sch5027) { + if (data->has_features & HAS_ZONE_HYST) { for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) { data->zone_hyst[ix] = dme1737_read(data, DME1737_REG_ZONE_HYST(ix)); @@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={ &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr, NULL }; @@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = { .attrs = dme1737_attr, }; -/* The following struct holds misc attributes, which are not available in all - * chips. Their creation depends on the chip type which is determined during - * module load. */ -static struct attribute *dme1737_misc_attr[] = { - /* Temperatures */ +/* The following struct holds temp offset attributes, which are not available + * in all chips. The following chips support them: + * DME1737, SCH311x */ +static struct attribute *dme1737_temp_offset_attr[] = { &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, - /* Zones */ - &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, - &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, NULL }; -static const struct attribute_group dme1737_misc_group = { - .attrs = dme1737_misc_attr, +static const struct attribute_group dme1737_temp_offset_group = { + .attrs = dme1737_temp_offset_attr, }; -/* The following struct holds VID-related attributes. Their creation - depends on the chip type which is determined during module load. */ +/* The following struct holds VID related attributes, which are not available + * in all chips. The following chips support them: + * DME1737 */ static struct attribute *dme1737_vid_attr[] = { &dev_attr_vrm.attr, &dev_attr_cpu0_vid.attr, @@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = { .attrs = dme1737_vid_attr, }; +/* The following struct holds temp zone 3 related attributes, which are not + * available in all chips. The following chips support them: + * DME1737, SCH311x, SCH5027 */ +static struct attribute *dme1737_zone3_attr[] = { + &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone3_group = { + .attrs = dme1737_zone3_attr, +}; + + +/* The following struct holds temp zone hysteresis related attributes, which + * are not available in all chips. The following chips support them: + * DME1737, SCH311x */ +static struct attribute *dme1737_zone_hyst_attr[] = { + &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone_hyst_group = { + .attrs = dme1737_zone_hyst_attr, +}; + /* The following structs hold the PWM attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ @@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = { { .attrs = dme1737_pwm6_attr }, }; -/* The following struct holds misc PWM attributes, which are not available in - * all chips. Their creation depends on the chip type which is determined +/* The following struct holds auto PWM min attributes, which are not available + * in all chips. Their creation depends on the chip type which is determined * during module load. */ -static struct attribute *dme1737_pwm_misc_attr[] = { +static struct attribute *dme1737_auto_pwm_min_attr[] = { &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr, @@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = { &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone_chmod_group = { + .attrs = dme1737_zone_chmod_attr, +}; + + +/* The permissions of the following zone 3 attributes are changed to read- + * writeable if the chip is *not* locked. Otherwise they stay read-only. */ +static struct attribute *dme1737_zone3_chmod_attr[] = { &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, NULL }; -static const struct attribute_group dme1737_zone_chmod_group = { - .attrs = dme1737_zone_chmod_attr, +static const struct attribute_group dme1737_zone3_chmod_group = { + .attrs = dme1737_zone3_chmod_attr, }; /* The permissions of the following PWM attributes are changed to read- @@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev) int ix; for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { - if (data->has_fan & (1 << ix)) { + if (data->has_features & HAS_FAN(ix)) { sysfs_remove_group(&dev->kobj, &dme1737_fan_group[ix]); } } for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { sysfs_remove_group(&dev->kobj, &dme1737_pwm_group[ix]); - if (data->type != sch5027 && ix < 3) { + if ((data->has_features & HAS_PWM_MIN) && ix < 3) { sysfs_remove_file(&dev->kobj, - dme1737_pwm_misc_attr[ix]); + dme1737_auto_pwm_min_attr[ix]); } } } - if (data->type != sch5027) { - sysfs_remove_group(&dev->kobj, &dme1737_misc_group); + if (data->has_features & HAS_TEMP_OFFSET) { + sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group); } - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { sysfs_remove_group(&dev->kobj, &dme1737_vid_group); } - + if (data->has_features & HAS_ZONE3) { + sysfs_remove_group(&dev->kobj, &dme1737_zone3_group); + } + if (data->has_features & HAS_ZONE_HYST) { + sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group); + } sysfs_remove_group(&dev->kobj, &dme1737_group); if (!data->client) { @@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev) goto exit_remove; } - /* Create misc sysfs attributes */ - if ((data->type != sch5027) && + /* Create chip-dependent sysfs attributes */ + if ((data->has_features & HAS_TEMP_OFFSET) && (err = sysfs_create_group(&dev->kobj, - &dme1737_misc_group))) { + &dme1737_temp_offset_group))) { goto exit_remove; } - - /* Create VID-related sysfs attributes */ - if ((data->type == dme1737) && + if ((data->has_features & HAS_VID) && (err = sysfs_create_group(&dev->kobj, &dme1737_vid_group))) { goto exit_remove; } + if ((data->has_features & HAS_ZONE3) && + (err = sysfs_create_group(&dev->kobj, + &dme1737_zone3_group))) { + goto exit_remove; + } + if ((data->has_features & HAS_ZONE_HYST) && + (err = sysfs_create_group(&dev->kobj, + &dme1737_zone_hyst_group))) { + goto exit_remove; + } /* Create fan sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { - if (data->has_fan & (1 << ix)) { + if (data->has_features & HAS_FAN(ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_fan_group[ix]))) { goto exit_remove; @@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev) /* Create PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_pwm_group[ix]))) { goto exit_remove; } - if (data->type != sch5027 && ix < 3 && + if ((data->has_features & HAS_PWM_MIN) && ix < 3 && (err = sysfs_create_file(&dev->kobj, - dme1737_pwm_misc_attr[ix]))) { + dme1737_auto_pwm_min_attr[ix]))) { goto exit_remove; } } @@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev) dme1737_chmod_group(dev, &dme1737_zone_chmod_group, S_IRUGO | S_IWUSR); - /* Change permissions of misc sysfs attributes */ - if (data->type != sch5027) { - dme1737_chmod_group(dev, &dme1737_misc_group, + /* Change permissions of chip-dependent sysfs attributes */ + if (data->has_features & HAS_TEMP_OFFSET) { + dme1737_chmod_group(dev, &dme1737_temp_offset_group, + S_IRUGO | S_IWUSR); + } + if (data->has_features & HAS_ZONE3) { + dme1737_chmod_group(dev, &dme1737_zone3_chmod_group, + S_IRUGO | S_IWUSR); + } + if (data->has_features & HAS_ZONE_HYST) { + dme1737_chmod_group(dev, &dme1737_zone_hyst_group, S_IRUGO | S_IWUSR); } /* Change permissions of PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { dme1737_chmod_group(dev, &dme1737_pwm_chmod_group[ix], S_IRUGO | S_IWUSR); - if (data->type != sch5027 && ix < 3) { + if ((data->has_features & HAS_PWM_MIN) && + ix < 3) { dme1737_chmod_file(dev, - dme1737_pwm_misc_attr[ix], + dme1737_auto_pwm_min_attr[ix], S_IRUGO | S_IWUSR); } } @@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev) /* Change permissions of pwm[1-3] if in manual mode */ for (ix = 0; ix < 3; ix++) { - if ((data->has_pwm & (1 << ix)) && + if ((data->has_features & HAS_PWM(ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) { dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], @@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev) return -EFAULT; } - /* Determine which optional fan and pwm features are enabled/present */ + /* Determine which optional fan and pwm features are enabled (only + * valid for I2C devices) */ if (client) { /* I2C chip */ data->config2 = dme1737_read(data, DME1737_REG_CONFIG2); /* Check if optional fan3 input is enabled */ if (data->config2 & 0x04) { - data->has_fan |= (1 << 2); + data->has_features |= HAS_FAN(2); } /* Fan4 and pwm3 are only available if the client's I2C address * is the default 0x2e. Otherwise the I/Os associated with * these functions are used for addr enable/select. */ if (client->addr == 0x2e) { - data->has_fan |= (1 << 3); - data->has_pwm |= (1 << 2); + data->has_features |= HAS_FAN(3) | HAS_PWM(2); } /* Determine which of the optional fan[5-6] and pwm[5-6] @@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev) dev_warn(dev, "Failed to query Super-IO for optional " "features.\n"); } - } else { /* ISA chip */ - /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6] - * don't exist in the ISA chip. */ - data->has_fan |= (1 << 2); - data->has_pwm |= (1 << 2); } - /* Fan1, fan2, pwm1, and pwm2 are always present */ - data->has_fan |= 0x03; - data->has_pwm |= 0x03; + /* Fan[1-2] and pwm[1-2] are present in all chips */ + data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1); + + /* Chip-dependent features */ + switch (data->type) { + case dme1737: + data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 | + HAS_ZONE_HYST | HAS_PWM_MIN; + break; + case sch311x: + data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 | + HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2); + break; + case sch5027: + data->has_features |= HAS_ZONE3; + break; + case sch5127: + data->has_features |= HAS_FAN(2) | HAS_PWM(2); + break; + default: + break; + } dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, " "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n", - (data->has_pwm & (1 << 2)) ? "yes" : "no", - (data->has_pwm & (1 << 4)) ? "yes" : "no", - (data->has_pwm & (1 << 5)) ? "yes" : "no", - (data->has_fan & (1 << 2)) ? "yes" : "no", - (data->has_fan & (1 << 3)) ? "yes" : "no", - (data->has_fan & (1 << 4)) ? "yes" : "no", - (data->has_fan & (1 << 5)) ? "yes" : "no"); + (data->has_features & HAS_PWM(2)) ? "yes" : "no", + (data->has_features & HAS_PWM(4)) ? "yes" : "no", + (data->has_features & HAS_PWM(5)) ? "yes" : "no", + (data->has_features & HAS_FAN(2)) ? "yes" : "no", + (data->has_features & HAS_FAN(3)) ? "yes" : "no", + (data->has_features & HAS_FAN(4)) ? "yes" : "no", + (data->has_features & HAS_FAN(5)) ? "yes" : "no"); reg = dme1737_read(data, DME1737_REG_TACH_PWM); /* Inform if fan-to-pwm mapping differs from the default */ @@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev) for (ix = 0; ix < 3; ix++) { data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); - if ((data->has_pwm & (1 << ix)) && + if ((data->has_features & HAS_PWM(ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) { dev_info(dev, "Switching pwm%d to " "manual mode.\n", ix + 1); @@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev) data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ /* Set VRM */ - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { data->vrm = vid_which_vrm(); } @@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) dme1737_sio_enter(sio_cip); /* Check device ID - * The DME1737 can return either 0x78 or 0x77 as its device ID. - * The SCH5027 returns 0x89 as its device ID. */ + * We currently know about two kinds of DME1737 and SCH5027. */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); - if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) { + if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 || + reg == SCH5027_ID)) { err = -ENODEV; goto exit; } @@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set * to '10' if the respective feature is enabled. */ if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */ - data->has_fan |= (1 << 5); + data->has_features |= HAS_FAN(5); } if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */ - data->has_pwm |= (1 << 5); + data->has_features |= HAS_PWM(5); } if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */ - data->has_fan |= (1 << 4); + data->has_features |= HAS_FAN(4); } if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */ - data->has_pwm |= (1 << 4); + data->has_features |= HAS_PWM(4); } exit: @@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client, if (company == DME1737_COMPANY_SMSC && verstep == SCH5027_VERSTEP) { name = "sch5027"; - } else if (company == DME1737_COMPANY_SMSC && (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) { name = "dme1737"; @@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr) dme1737_sio_enter(sio_cip); /* Check device ID - * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and - * SCH3116 (0x7f). */ + * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); - if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { + if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID || + reg == SCH5127_ID)) { err = -ENODEV; goto exit; } @@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); /* Skip chip detection if module is loaded with force_id parameter */ - if (!force_id) { + switch (force_id) { + case SCH3112_ID: + case SCH3114_ID: + case SCH3116_ID: + data->type = sch311x; + break; + case SCH5127_ID: + data->type = sch5127; + break; + default: company = dme1737_read(data, DME1737_REG_COMPANY); device = dme1737_read(data, DME1737_REG_DEVICE); - if (!((company == DME1737_COMPANY_SMSC) && - (device == SCH311X_DEVICE))) { + if ((company == DME1737_COMPANY_SMSC) && + (device == SCH311X_DEVICE)) { + data->type = sch311x; + } else if ((company == DME1737_COMPANY_SMSC) && + (device == SCH5127_DEVICE)) { + data->type = sch5127; + } else { err = -ENODEV; goto exit_kfree; } } - data->type = sch311x; - /* Fill in the remaining client fields and initialize the mutex */ - data->name = "sch311x"; + if (data->type == sch5127) { + data->name = "sch5127"; + } else { + data->name = "sch311x"; + } + + /* Initialize the mutex */ mutex_init(&data->update_lock); - dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr); + dev_info(dev, "Found a %s chip at 0x%04x\n", + data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr); /* Initialize the chip */ if ((err = dme1737_init_device(dev))) { diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c new file mode 100644 index 000000000000..0e4b5642638d --- /dev/null +++ b/drivers/hwmon/emc1403.c @@ -0,0 +1,344 @@ +/* + * emc1403.c - SMSC Thermal Driver + * + * Copyright (C) 2008 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * TODO + * - cache alarm and critical limit registers + * - add emc1404 support + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/sysfs.h> +#include <linux/mutex.h> + +#define THERMAL_PID_REG 0xfd +#define THERMAL_SMSC_ID_REG 0xfe +#define THERMAL_REVISION_REG 0xff + +struct thermal_data { + struct device *hwmon_dev; + struct mutex mutex; + /* Cache the hyst value so we don't keep re-reading it. In theory + we could cache it forever as nobody else should be writing it. */ + u8 cached_hyst; + unsigned long hyst_valid; +}; + +static ssize_t show_temp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval = i2c_smbus_read_byte_data(client, sda->index); + + if (retval < 0) + return retval; + return sprintf(buf, "%d000\n", retval); +} + +static ssize_t show_bit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); + int retval = i2c_smbus_read_byte_data(client, sda->nr); + + if (retval < 0) + return retval; + retval &= sda->index; + return sprintf(buf, "%d\n", retval ? 1 : 0); +} + +static ssize_t store_temp(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int retval; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + retval = i2c_smbus_write_byte_data(client, sda->index, + DIV_ROUND_CLOSEST(val, 1000)); + if (retval < 0) + return retval; + return count; +} + +static ssize_t show_hyst(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct thermal_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval; + int hyst; + + retval = i2c_smbus_read_byte_data(client, sda->index); + if (retval < 0) + return retval; + + if (time_after(jiffies, data->hyst_valid)) { + hyst = i2c_smbus_read_byte_data(client, 0x21); + if (hyst < 0) + return retval; + data->cached_hyst = hyst; + data->hyst_valid = jiffies + HZ; + } + return sprintf(buf, "%d000\n", retval - data->cached_hyst); +} + +static ssize_t store_hyst(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct thermal_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval; + int hyst; + unsigned long val; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + + mutex_lock(&data->mutex); + retval = i2c_smbus_read_byte_data(client, sda->index); + if (retval < 0) + goto fail; + + hyst = val - retval * 1000; + hyst = DIV_ROUND_CLOSEST(hyst, 1000); + if (hyst < 0 || hyst > 255) { + retval = -ERANGE; + goto fail; + } + + retval = i2c_smbus_write_byte_data(client, 0x21, hyst); + if (retval == 0) { + retval = count; + data->cached_hyst = hyst; + data->hyst_valid = jiffies + HZ; + } +fail: + mutex_unlock(&data->mutex); + return retval; +} + +/* + * Sensors. We pass the actual i2c register to the methods. + */ + +static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x06); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x05); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x20); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00); +static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x01); +static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x01); +static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x01); +static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x20); + +static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x08); +static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x07); +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x19); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01); +static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x02); +static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x02); +static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x02); +static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x19); + +static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x16); +static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x15); +static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x1A); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23); +static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x04); +static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x04); +static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x04); +static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x1A); + +static struct attribute *mid_att_thermal[] = { + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_crit.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp3_crit.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, + NULL +}; + +static const struct attribute_group m_thermal_gr = { + .attrs = mid_att_thermal +}; + +static int emc1403_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + int id; + /* Check if thermal chip is SMSC and EMC1403 */ + + id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); + if (id != 0x5d) + return -ENODEV; + + /* Note: 0x25 is the 1404 which is very similar and this + driver could be extended */ + id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); + if (id != 0x21) + return -ENODEV; + + id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); + if (id != 0x01) + return -ENODEV; + + strlcpy(info->type, "emc1403", I2C_NAME_SIZE); + return 0; +} + +static int emc1403_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + struct thermal_data *data; + + data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL); + if (data == NULL) { + dev_warn(&client->dev, "out of memory"); + return -ENOMEM; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->mutex); + data->hyst_valid = jiffies - 1; /* Expired */ + + res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); + if (res) { + dev_warn(&client->dev, "create group failed\n"); + hwmon_device_unregister(data->hwmon_dev); + goto thermal_error1; + } + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + res = PTR_ERR(data->hwmon_dev); + dev_warn(&client->dev, "register hwmon dev failed\n"); + goto thermal_error2; + } + dev_info(&client->dev, "EMC1403 Thermal chip found\n"); + return res; + +thermal_error2: + sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); +thermal_error1: + kfree(data); + return res; +} + +static int emc1403_remove(struct i2c_client *client) +{ + struct thermal_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); + kfree(data); + return 0; +} + +static const unsigned short emc1403_address_list[] = { + 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END +}; + +static const struct i2c_device_id emc1403_idtable[] = { + { "emc1403", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, emc1403_idtable); + +static struct i2c_driver sensor_emc1403 = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "emc1403", + }, + .detect = emc1403_detect, + .probe = emc1403_probe, + .remove = emc1403_remove, + .id_table = emc1403_idtable, + .address_list = emc1403_address_list, +}; + +static int __init sensor_emc1403_init(void) +{ + return i2c_add_driver(&sensor_emc1403); +} + +static void __exit sensor_emc1403_exit(void) +{ + i2c_del_driver(&sensor_emc1403); +} + +module_init(sensor_emc1403_init); +module_exit(sensor_emc1403_exit); + +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_DESCRIPTION("emc1403 Thermal Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index a95fa4256caa..537841ef44b9 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg) static int superio_inw(int base, int reg) { int val; - outb(reg++, base); - val = inb(base + 1) << 8; - outb(reg, base); - val |= inb(base + 1); + val = superio_inb(base, reg) << 8; + val |= superio_inb(base, reg + 1); return val; } static inline void superio_enter(int base) { /* according to the datasheet the key must be send twice! */ - outb( SIO_UNLOCK_KEY, base); - outb( SIO_UNLOCK_KEY, base); + outb(SIO_UNLOCK_KEY, base); + outb(SIO_UNLOCK_KEY, base); } -static inline void superio_select( int base, int ld) +static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); @@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg) { u16 val; - outb(reg++, data->addr + ADDR_REG_OFFSET); - val = inb(data->addr + DATA_REG_OFFSET) << 8; - outb(reg, data->addr + ADDR_REG_OFFSET); - val |= inb(data->addr + DATA_REG_OFFSET); + val = f71882fg_read8(data, reg) << 8; + val |= f71882fg_read8(data, reg + 1); return val; } @@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val) static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val) { - outb(reg++, data->addr + ADDR_REG_OFFSET); - outb(val >> 8, data->addr + DATA_REG_OFFSET); - outb(reg, data->addr + ADDR_REG_OFFSET); - outb(val & 255, data->addr + DATA_REG_OFFSET); + f71882fg_write8(data, reg, val >> 8); + f71882fg_write8(data, reg + 1, val & 0xff); } static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) @@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) mutex_lock(&data->update_lock); /* Update once every 60 seconds */ - if ( time_after(jiffies, data->last_limits + 60 * HZ ) || + if (time_after(jiffies, data->last_limits + 60 * HZ) || !data->valid) { if (data->type == f71882fg || data->type == f71889fg) { data->in1_max = @@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; val = SENSORS_LIMIT(val, 23, 1500000); val = fan_to_reg(val); @@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); @@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - long val = simple_strtol(buf, NULL, 10) / 8; + int err; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 8; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); @@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; ssize_t ret = count; u8 reg; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; mutex_lock(&data->update_lock); @@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); @@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; /* Special case for F8000 pwm channel 3 which only does auto mode */ if (data->type == f8000 && nr == 2 && val != 2) @@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int pwm = to_sensor_dev_attr_2(devattr)->index; + int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10); + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; + int err, nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10) / 1000; u8 reg; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; mutex_lock(&data->update_lock); data->pwm_auto_point_temp[nr][point] = @@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = @@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; switch (val) { case 1: @@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int pwm = to_sensor_dev_attr_2(devattr)->index; + int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10) / 1000; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; if (data->type == f71889fg) val = SENSORS_LIMIT(val, -128, 127); @@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, int err = -ENODEV; u16 devid; + /* Don't step on other drivers' I/O space by accident */ + if (!request_region(sioaddr, 2, DRVNAME)) { + printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", + (int)sioaddr); + return -EBUSY; + } + superio_enter(sioaddr); devid = superio_inw(sioaddr, SIO_REG_MANID); @@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, } *address = superio_inw(sioaddr, SIO_REG_ADDR); - if (*address == 0) - { + if (*address == 0) { printk(KERN_WARNING DRVNAME ": Base address not set\n"); goto exit; } @@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); + release_region(sioaddr, 2); return err; } diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 0627f7a5b9b8..b7ca2a9676cf 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -38,6 +38,7 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> +#include <linux/smp_lock.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> @@ -847,8 +848,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf, return count; } -static int watchdog_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { static struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | @@ -858,6 +858,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp, int i, ret = 0; struct fschmd_data *data = filp->private_data; + lock_kernel(); switch (cmd) { case WDIOC_GETSUPPORT: ident.firmware_version = data->revision; @@ -914,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp, default: ret = -ENOTTY; } - + unlock_kernel(); return ret; } @@ -924,7 +925,7 @@ static const struct file_operations watchdog_fops = { .open = watchdog_open, .release = watchdog_release, .write = watchdog_write, - .ioctl = watchdog_ioctl, + .unlocked_ioctl = watchdog_ioctl, }; diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index b2f2277cad3c..6138f036b159 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -41,6 +41,8 @@ /* joystick device poll interval in milliseconds */ #define MDPS_POLL_INTERVAL 50 +#define MDPS_POLL_MIN 0 +#define MDPS_POLL_MAX 2000 /* * The sensor can also generate interrupts (DRDY) but it's pretty pointless * because they are generated even if the data do not change. So it's better @@ -121,11 +123,9 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z) int position[3]; int i; - mutex_lock(&lis3->mutex); position[0] = lis3->read_data(lis3, OUTX); position[1] = lis3->read_data(lis3, OUTY); position[2] = lis3->read_data(lis3, OUTZ); - mutex_unlock(&lis3->mutex); for (i = 0; i < 3; i++) position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY; @@ -249,8 +249,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) EXPORT_SYMBOL_GPL(lis3lv02d_poweron); +static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev) +{ + int x, y, z; + + mutex_lock(&lis3_dev.mutex); + lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); + input_report_abs(pidev->input, ABS_X, x); + input_report_abs(pidev->input, ABS_Y, y); + input_report_abs(pidev->input, ABS_Z, z); + input_sync(pidev->input); + mutex_unlock(&lis3_dev.mutex); +} + static irqreturn_t lis302dl_interrupt(int irq, void *dummy) { + if (!test_bit(0, &lis3_dev.misc_opened)) + goto out; + /* * Be careful: on some HP laptops the bios force DD when on battery and * the lid is closed. This leads to interrupts as soon as a little move @@ -260,44 +276,93 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) wake_up_interruptible(&lis3_dev.misc_wait); kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); +out: + if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && + lis3_dev.idev->input->users) + return IRQ_WAKE_THREAD; return IRQ_HANDLED; } -static int lis3lv02d_misc_open(struct inode *inode, struct file *file) +static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3) { - int ret; + struct input_dev *dev = lis3->idev->input; + u8 click_src; - if (test_and_set_bit(0, &lis3_dev.misc_opened)) - return -EBUSY; /* already open */ + mutex_lock(&lis3->mutex); + lis3->read(lis3, CLICK_SRC, &click_src); - atomic_set(&lis3_dev.count, 0); + if (click_src & CLICK_SINGLE_X) { + input_report_key(dev, lis3->mapped_btns[0], 1); + input_report_key(dev, lis3->mapped_btns[0], 0); + } - /* - * The sensor can generate interrupts for free-fall and direction - * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep - * the things simple and _fast_ we activate it only for free-fall, so - * no need to read register (very slow with ACPI). For the same reason, - * we forbid shared interrupts. - * - * IRQF_TRIGGER_RISING seems pointless on HP laptops because the - * io-apic is not configurable (and generates a warning) but I keep it - * in case of support for other hardware. - */ - ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING, - DRIVER_NAME, &lis3_dev); + if (click_src & CLICK_SINGLE_Y) { + input_report_key(dev, lis3->mapped_btns[1], 1); + input_report_key(dev, lis3->mapped_btns[1], 0); + } - if (ret) { - clear_bit(0, &lis3_dev.misc_opened); - printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq); - return -EBUSY; + if (click_src & CLICK_SINGLE_Z) { + input_report_key(dev, lis3->mapped_btns[2], 1); + input_report_key(dev, lis3->mapped_btns[2], 0); } + input_sync(dev); + mutex_unlock(&lis3->mutex); +} + +static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3) +{ + u8 wu1_src; + u8 wu2_src; + + lis3->read(lis3, FF_WU_SRC_1, &wu1_src); + lis3->read(lis3, FF_WU_SRC_2, &wu2_src); + + wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0; + wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0; + + /* joystick poll is internally protected by the lis3->mutex. */ + if (wu1_src || wu2_src) + lis3lv02d_joystick_poll(lis3_dev.idev); +} + +static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data) +{ + + struct lis3lv02d *lis3 = data; + + if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK) + lis302dl_interrupt_handle_click(lis3); + else + lis302dl_interrupt_handle_ff_wu(lis3); + + return IRQ_HANDLED; +} + +static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data) +{ + + struct lis3lv02d *lis3 = data; + + if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK) + lis302dl_interrupt_handle_click(lis3); + else + lis302dl_interrupt_handle_ff_wu(lis3); + + return IRQ_HANDLED; +} + +static int lis3lv02d_misc_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(0, &lis3_dev.misc_opened)) + return -EBUSY; /* already open */ + + atomic_set(&lis3_dev.count, 0); return 0; } static int lis3lv02d_misc_release(struct inode *inode, struct file *file) { fasync_helper(-1, file, 0, &lis3_dev.async_queue); - free_irq(lis3_dev.irq, &lis3_dev); clear_bit(0, &lis3_dev.misc_opened); /* release the device */ return 0; } @@ -380,22 +445,12 @@ static struct miscdevice lis3lv02d_misc_device = { .fops = &lis3lv02d_misc_fops, }; -static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev) -{ - int x, y, z; - - lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); - input_report_abs(pidev->input, ABS_X, x); - input_report_abs(pidev->input, ABS_Y, y); - input_report_abs(pidev->input, ABS_Z, z); - input_sync(pidev->input); -} - int lis3lv02d_joystick_enable(void) { struct input_dev *input_dev; int err; int max_val, fuzz, flat; + int btns[] = {BTN_X, BTN_Y, BTN_Z}; if (lis3_dev.idev) return -EINVAL; @@ -406,6 +461,8 @@ int lis3lv02d_joystick_enable(void) lis3_dev.idev->poll = lis3lv02d_joystick_poll; lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL; + lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN; + lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX; input_dev = lis3_dev.idev->input; input_dev->name = "ST LIS3LV02DL Accelerometer"; @@ -422,6 +479,10 @@ int lis3lv02d_joystick_enable(void) input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat); input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat); + lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns); + lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns); + lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns); + err = input_register_polled_device(lis3_dev.idev); if (err) { input_free_polled_device(lis3_dev.idev); @@ -434,6 +495,11 @@ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable); void lis3lv02d_joystick_disable(void) { + if (lis3_dev.irq) + free_irq(lis3_dev.irq, &lis3_dev); + if (lis3_dev.pdata && lis3_dev.pdata->irq2) + free_irq(lis3_dev.pdata->irq2, &lis3_dev); + if (!lis3_dev.idev) return; @@ -462,7 +528,9 @@ static ssize_t lis3lv02d_position_show(struct device *dev, { int x, y, z; + mutex_lock(&lis3_dev.mutex); lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); + mutex_unlock(&lis3_dev.mutex); return sprintf(buf, "(%d,%d,%d)\n", x, y, z); } @@ -521,12 +589,70 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3) } EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); +static void lis3lv02d_8b_configure(struct lis3lv02d *dev, + struct lis3lv02d_platform_data *p) +{ + int err; + int ctrl2 = p->hipass_ctrl; + + if (p->click_flags) { + dev->write(dev, CLICK_CFG, p->click_flags); + dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit); + dev->write(dev, CLICK_LATENCY, p->click_latency); + dev->write(dev, CLICK_WINDOW, p->click_window); + dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf); + dev->write(dev, CLICK_THSY_X, + (p->click_thresh_x & 0xf) | + (p->click_thresh_y << 4)); + + if (dev->idev) { + struct input_dev *input_dev = lis3_dev.idev->input; + input_set_capability(input_dev, EV_KEY, BTN_X); + input_set_capability(input_dev, EV_KEY, BTN_Y); + input_set_capability(input_dev, EV_KEY, BTN_Z); + } + } + + if (p->wakeup_flags) { + dev->write(dev, FF_WU_CFG_1, p->wakeup_flags); + dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f); + /* default to 2.5ms for now */ + dev->write(dev, FF_WU_DURATION_1, 1); + ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/ + } + + if (p->wakeup_flags2) { + dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2); + dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f); + /* default to 2.5ms for now */ + dev->write(dev, FF_WU_DURATION_2, 1); + ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/ + } + /* Configure hipass filters */ + dev->write(dev, CTRL_REG2, ctrl2); + + if (p->irq2) { + err = request_threaded_irq(p->irq2, + NULL, + lis302dl_interrupt_thread2_8b, + IRQF_TRIGGER_RISING | + IRQF_ONESHOT, + DRIVER_NAME, &lis3_dev); + if (err < 0) + printk(KERN_ERR DRIVER_NAME + "No second IRQ. Limited functionality\n"); + } +} + /* * Initialise the accelerometer and the various subsystems. * Should be rather independent of the bus system. */ int lis3lv02d_init_device(struct lis3lv02d *dev) { + int err; + irq_handler_t thread_fn; + dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I); switch (dev->whoami) { @@ -567,25 +693,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) if (dev->pdata) { struct lis3lv02d_platform_data *p = dev->pdata; - if (p->click_flags && (dev->whoami == WAI_8B)) { - dev->write(dev, CLICK_CFG, p->click_flags); - dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit); - dev->write(dev, CLICK_LATENCY, p->click_latency); - dev->write(dev, CLICK_WINDOW, p->click_window); - dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf); - dev->write(dev, CLICK_THSY_X, - (p->click_thresh_x & 0xf) | - (p->click_thresh_y << 4)); - } - - if (p->wakeup_flags && (dev->whoami == WAI_8B)) { - dev->write(dev, FF_WU_CFG_1, p->wakeup_flags); - dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f); - /* default to 2.5ms for now */ - dev->write(dev, FF_WU_DURATION_1, 1); - /* enable high pass filter for both free-fall units */ - dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2); - } + if (dev->whoami == WAI_8B) + lis3lv02d_8b_configure(dev, p); if (p->irq_cfg) dev->write(dev, CTRL_REG3, p->irq_cfg); @@ -598,6 +707,32 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) goto out; } + /* + * The sensor can generate interrupts for free-fall and direction + * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep + * the things simple and _fast_ we activate it only for free-fall, so + * no need to read register (very slow with ACPI). For the same reason, + * we forbid shared interrupts. + * + * IRQF_TRIGGER_RISING seems pointless on HP laptops because the + * io-apic is not configurable (and generates a warning) but I keep it + * in case of support for other hardware. + */ + if (dev->whoami == WAI_8B) + thread_fn = lis302dl_interrupt_thread1_8b; + else + thread_fn = NULL; + + err = request_threaded_irq(dev->irq, lis302dl_interrupt, + thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + DRIVER_NAME, &lis3_dev); + + if (err < 0) { + printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n"); + goto out; + } + if (misc_register(&lis3lv02d_misc_device)) printk(KERN_ERR DRIVER_NAME ": misc_register failed\n"); out: diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h index e6a01f44709b..854091380e33 100644 --- a/drivers/hwmon/lis3lv02d.h +++ b/drivers/hwmon/lis3lv02d.h @@ -196,6 +196,16 @@ enum lis3lv02d_dd_src { DD_SRC_IA = 0x40, }; +enum lis3lv02d_click_src_8b { + CLICK_SINGLE_X = 0x01, + CLICK_DOUBLE_X = 0x02, + CLICK_SINGLE_Y = 0x04, + CLICK_DOUBLE_Y = 0x08, + CLICK_SINGLE_Z = 0x10, + CLICK_DOUBLE_Z = 0x20, + CLICK_IA = 0x40, +}; + struct axis_conversion { s8 x; s8 y; @@ -223,6 +233,7 @@ struct lis3lv02d { struct platform_device *pdev; /* platform device */ atomic_t count; /* interrupt count after last read */ struct axis_conversion ac; /* hw -> logical axis */ + int mapped_btns[3]; u32 irq; /* IRQ number */ struct fasync_struct *async_queue; /* queue for the misc device */ diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index bf81aff7051d..776aeb3019d2 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -53,7 +53,7 @@ * Address is fully defined internally and cannot be changed. */ -static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; +static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; /* * The LM63 registers @@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev); static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); +enum chips { lm63, lm64 }; + /* * Driver data (common to all clients) */ static const struct i2c_device_id lm63_id[] = { - { "lm63", 0 }, + { "lm63", lm63 }, + { "lm64", lm64 }, { } }; MODULE_DEVICE_TABLE(i2c, lm63_id); @@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client, struct i2c_adapter *adapter = new_client->adapter; u8 man_id, chip_id, reg_config1, reg_config2; u8 reg_alert_status, reg_alert_mask; + int address = new_client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; @@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client, LM63_REG_ALERT_MASK); if (man_id != 0x01 /* National Semiconductor */ - || chip_id != 0x41 /* LM63 */ || (reg_config1 & 0x18) != 0x00 || (reg_config2 & 0xF8) != 0x00 || (reg_alert_status & 0x20) != 0x00 @@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client, return -ENODEV; } - strlcpy(info->type, "lm63", I2C_NAME_SIZE); + if (chip_id == 0x41 && address == 0x4c) + strlcpy(info->type, "lm63", I2C_NAME_SIZE); + else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e)) + strlcpy(info->type, "lm64", I2C_NAME_SIZE); + else + return -ENODEV; return 0; } diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 8ae2cfe2d827..56463428a419 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */ tcn75, tmp100, tmp101, + tmp105, tmp175, tmp275, tmp75, @@ -220,6 +221,7 @@ static const struct i2c_device_id lm75_ids[] = { { "tcn75", tcn75, }, { "tmp100", tmp100, }, { "tmp101", tmp101, }, + { "tmp105", tmp105, }, { "tmp175", tmp175, }, { "tmp275", tmp275, }, { "tmp75", tmp75, }, diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 7cc2708871ab..760ef72eea56 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev) mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { + if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10) + || !data->valid) { u8 h, l; dev_dbg(&client->dev, "Updating lm90 data.\n"); diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index 65c232a9d0c5..21d201befc2c 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c @@ -45,9 +45,7 @@ enum ltc4245_cmd { LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, - LTC4245_GPIOADC1 = 0x1c, - LTC4245_GPIOADC2 = 0x1d, - LTC4245_GPIOADC3 = 0x1e, + LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { @@ -61,7 +59,7 @@ struct ltc4245_data { u8 cregs[0x08]; /* Voltage registers */ - u8 vregs[0x0f]; + u8 vregs[0x0d]; }; static struct ltc4245_data *ltc4245_update_device(struct device *dev) @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev) data->cregs[i] = val; } - /* Read voltage registers -- 0x10 to 0x1f */ + /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg) case LTC4245_VEEOUT: voltage = regval * -55; break; - case LTC4245_GPIOADC1: - case LTC4245_GPIOADC2: - case LTC4245_GPIOADC3: + case LTC4245_GPIOADC: voltage = regval * 10; break; default: @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1); -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2); -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3); +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = { &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, - &sensor_dev_attr_in10_input.dev_attr.attr, - &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c new file mode 100644 index 000000000000..8013895a1faf --- /dev/null +++ b/drivers/hwmon/tmp102.c @@ -0,0 +1,321 @@ +/* Texas Instruments TMP102 SMBus temperature sensor driver + * + * Copyright (C) 2010 Steven King <sfking@fdwdc.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/device.h> + +#define DRIVER_NAME "tmp102" + +#define TMP102_TEMP_REG 0x00 +#define TMP102_CONF_REG 0x01 +/* note: these bit definitions are byte swapped */ +#define TMP102_CONF_SD 0x0100 +#define TMP102_CONF_TM 0x0200 +#define TMP102_CONF_POL 0x0400 +#define TMP102_CONF_F0 0x0800 +#define TMP102_CONF_F1 0x1000 +#define TMP102_CONF_R0 0x2000 +#define TMP102_CONF_R1 0x4000 +#define TMP102_CONF_OS 0x8000 +#define TMP102_CONF_EM 0x0010 +#define TMP102_CONF_AL 0x0020 +#define TMP102_CONF_CR0 0x0040 +#define TMP102_CONF_CR1 0x0080 +#define TMP102_TLOW_REG 0x02 +#define TMP102_THIGH_REG 0x03 + +struct tmp102 { + struct device *hwmon_dev; + struct mutex lock; + u16 config_orig; + unsigned long last_update; + int temp[3]; +}; + +/* SMBus specifies low byte first, but the TMP102 returns high byte first, + * so we have to swab16 the values */ +static inline int tmp102_read_reg(struct i2c_client *client, u8 reg) +{ + int result = i2c_smbus_read_word_data(client, reg); + return result < 0 ? result : swab16(result); +} + +static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, swab16(val)); +} + +/* convert left adjusted 13-bit TMP102 register value to milliCelsius */ +static inline int tmp102_reg_to_mC(s16 val) +{ + return ((val & ~0x01) * 1000) / 128; +} + +/* convert milliCelsius to left adjusted 13-bit TMP102 register value */ +static inline u16 tmp102_mC_to_reg(int val) +{ + return (val * 128) / 1000; +} + +static const u8 tmp102_reg[] = { + TMP102_TEMP_REG, + TMP102_TLOW_REG, + TMP102_THIGH_REG, +}; + +static struct tmp102 *tmp102_update_device(struct i2c_client *client) +{ + struct tmp102 *tmp102 = i2c_get_clientdata(client); + + mutex_lock(&tmp102->lock); + if (time_after(jiffies, tmp102->last_update + HZ / 3)) { + int i; + for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) { + int status = tmp102_read_reg(client, tmp102_reg[i]); + if (status > -1) + tmp102->temp[i] = tmp102_reg_to_mC(status); + } + tmp102->last_update = jiffies; + } + mutex_unlock(&tmp102->lock); + return tmp102; +} + +static ssize_t tmp102_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev)); + + return sprintf(buf, "%d\n", tmp102->temp[sda->index]); +} + +static ssize_t tmp102_set_temp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct i2c_client *client = to_i2c_client(dev); + struct tmp102 *tmp102 = i2c_get_clientdata(client); + long val; + int status; + + if (strict_strtol(buf, 10, &val) < 0) + return -EINVAL; + val = SENSORS_LIMIT(val, -256000, 255000); + + mutex_lock(&tmp102->lock); + tmp102->temp[sda->index] = val; + status = tmp102_write_reg(client, tmp102_reg[sda->index], + tmp102_mC_to_reg(val)); + mutex_unlock(&tmp102->lock); + return status ? : count; +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0); + +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp, + tmp102_set_temp, 1); + +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp, + tmp102_set_temp, 2); + +static struct attribute *tmp102_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group tmp102_attr_group = { + .attrs = tmp102_attributes, +}; + +#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1) +#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL) + +static int __devinit tmp102_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tmp102 *tmp102; + int status; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "adapter doesnt support SMBus word " + "transactions\n"); + return -ENODEV; + } + + tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL); + if (!tmp102) { + dev_dbg(&client->dev, "kzalloc failed\n"); + return -ENOMEM; + } + i2c_set_clientdata(client, tmp102); + + status = tmp102_read_reg(client, TMP102_CONF_REG); + if (status < 0) { + dev_err(&client->dev, "error reading config register\n"); + goto fail_free; + } + tmp102->config_orig = status; + status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG); + if (status < 0) { + dev_err(&client->dev, "error writing config register\n"); + goto fail_restore_config; + } + status = tmp102_read_reg(client, TMP102_CONF_REG); + if (status < 0) { + dev_err(&client->dev, "error reading config register\n"); + goto fail_restore_config; + } + status &= ~TMP102_CONFIG_RD_ONLY; + if (status != TMP102_CONFIG) { + dev_err(&client->dev, "config settings did not stick\n"); + status = -ENODEV; + goto fail_restore_config; + } + tmp102->last_update = jiffies - HZ; + mutex_init(&tmp102->lock); + + status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group); + if (status) { + dev_dbg(&client->dev, "could not create sysfs files\n"); + goto fail_restore_config; + } + tmp102->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(tmp102->hwmon_dev)) { + dev_dbg(&client->dev, "unable to register hwmon device\n"); + status = PTR_ERR(tmp102->hwmon_dev); + goto fail_remove_sysfs; + } + + dev_info(&client->dev, "initialized\n"); + + return 0; + +fail_remove_sysfs: + sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); +fail_restore_config: + tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig); +fail_free: + i2c_set_clientdata(client, NULL); + kfree(tmp102); + + return status; +} + +static int __devexit tmp102_remove(struct i2c_client *client) +{ + struct tmp102 *tmp102 = i2c_get_clientdata(client); + + hwmon_device_unregister(tmp102->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); + + /* Stop monitoring if device was stopped originally */ + if (tmp102->config_orig & TMP102_CONF_SD) { + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config >= 0) + tmp102_write_reg(client, TMP102_CONF_REG, + config | TMP102_CONF_SD); + } + + i2c_set_clientdata(client, NULL); + kfree(tmp102); + + return 0; +} + +#ifdef CONFIG_PM +static int tmp102_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config < 0) + return config; + + config |= TMP102_CONF_SD; + return tmp102_write_reg(client, TMP102_CONF_REG, config); +} + +static int tmp102_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config < 0) + return config; + + config &= ~TMP102_CONF_SD; + return tmp102_write_reg(client, TMP102_CONF_REG, config); +} + +static const struct dev_pm_ops tmp102_dev_pm_ops = { + .suspend = tmp102_suspend, + .resume = tmp102_resume, +}; + +#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) +#else +#define TMP102_DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id tmp102_id[] = { + { "tmp102", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tmp102_id); + +static struct i2c_driver tmp102_driver = { + .driver.name = DRIVER_NAME, + .driver.pm = TMP102_DEV_PM_OPS, + .probe = tmp102_probe, + .remove = __devexit_p(tmp102_remove), + .id_table = tmp102_id, +}; + +static int __init tmp102_init(void) +{ + return i2c_add_driver(&tmp102_driver); +} +module_init(tmp102_init); + +static void __exit tmp102_exit(void) +{ + i2c_del_driver(&tmp102_driver); +} +module_exit(tmp102_exit); + +MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); +MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index d14a1af9f550..ad8d535235c5 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 }; #define TMP411_DEVICE_ID 0x12 /* - * Functions declarations - */ - -static int tmp401_probe(struct i2c_client *client, - const struct i2c_device_id *id); -static int tmp401_detect(struct i2c_client *client, - struct i2c_board_info *info); -static int tmp401_remove(struct i2c_client *client); -static struct tmp401_data *tmp401_update_device(struct device *dev); - -/* * Driver data (common to all clients) */ @@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = { }; MODULE_DEVICE_TABLE(i2c, tmp401_id); -static struct i2c_driver tmp401_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "tmp401", - }, - .probe = tmp401_probe, - .remove = tmp401_remove, - .id_table = tmp401_id, - .detect = tmp401_detect, - .address_list = normal_i2c, -}; - /* * Client data (each client gets its own) */ @@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config) return (temp + 500) / 1000; } +static struct tmp401_data *tmp401_update_device_reg16( + struct i2c_client *client, struct tmp401_data *data) +{ + int i; + + for (i = 0; i < 2; i++) { + /* + * High byte must be read first immediately followed + * by the low byte + */ + data->temp[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_MSB[i]) << 8; + data->temp[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_LSB[i]); + data->temp_low[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8; + data->temp_low[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_LOW_LIMIT_LSB[i]); + data->temp_high[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8; + data->temp_high[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_HIGH_LIMIT_LSB[i]); + data->temp_crit[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_CRIT_LIMIT[i]); + + if (data->kind == tmp411) { + data->temp_lowest[i] = i2c_smbus_read_byte_data(client, + TMP411_TEMP_LOWEST_MSB[i]) << 8; + data->temp_lowest[i] |= i2c_smbus_read_byte_data( + client, TMP411_TEMP_LOWEST_LSB[i]); + + data->temp_highest[i] = i2c_smbus_read_byte_data( + client, TMP411_TEMP_HIGHEST_MSB[i]) << 8; + data->temp_highest[i] |= i2c_smbus_read_byte_data( + client, TMP411_TEMP_HIGHEST_LSB[i]); + } + } + return data; +} + +static struct tmp401_data *tmp401_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tmp401_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS); + data->config = i2c_smbus_read_byte_data(client, + TMP401_CONFIG_READ); + tmp401_update_device_reg16(client, data); + + data->temp_crit_hyst = i2c_smbus_read_byte_data(client, + TMP401_TEMP_CRIT_HYST); + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev, } static struct sensor_device_attribute tmp401_attr[] = { - SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), - SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0), - SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0), - SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0), - SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst, + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0), + SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min, + store_temp_min, 0), + SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, + store_temp_max, 0), + SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit, + store_temp_crit, 0), + SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst, store_temp_crit_hyst, 0), - SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_LOW), - SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_HIGH), - SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_CRIT), - SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), - SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1), - SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1), - SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1), - SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1), - SENSOR_ATTR(temp2_fault, 0444, show_status, NULL, + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1), + SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min, + store_temp_min, 1), + SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, + store_temp_max, 1), + SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit, + store_temp_crit, 1), + SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1), + SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_OPEN), - SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_LOW), - SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_HIGH), - SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_CRIT), }; @@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = { * and remote channels. */ static struct sensor_device_attribute tmp411_attr[] = { - SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0), - SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0), - SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1), - SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1), - SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0), + SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0), + SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0), + SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1), + SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1), + SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0), }; /* @@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client, return 0; } +static int tmp401_remove(struct i2c_client *client) +{ + struct tmp401_data *data = i2c_get_clientdata(client); + int i; + + if (data->hwmon_dev) + hwmon_device_unregister(data->hwmon_dev); + + for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) + device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); + + if (data->kind == tmp411) { + for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) + device_remove_file(&client->dev, + &tmp411_attr[i].dev_attr); + } + + kfree(data); + return 0; +} + static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -581,91 +650,17 @@ exit_remove: return err; } -static int tmp401_remove(struct i2c_client *client) -{ - struct tmp401_data *data = i2c_get_clientdata(client); - int i; - - if (data->hwmon_dev) - hwmon_device_unregister(data->hwmon_dev); - - for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) - device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); - - if (data->kind == tmp411) { - for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) - device_remove_file(&client->dev, - &tmp411_attr[i].dev_attr); - } - - kfree(data); - return 0; -} - -static struct tmp401_data *tmp401_update_device_reg16( - struct i2c_client *client, struct tmp401_data *data) -{ - int i; - - for (i = 0; i < 2; i++) { - /* - * High byte must be read first immediately followed - * by the low byte - */ - data->temp[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_MSB[i]) << 8; - data->temp[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_LSB[i]); - data->temp_low[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8; - data->temp_low[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_LOW_LIMIT_LSB[i]); - data->temp_high[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8; - data->temp_high[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_HIGH_LIMIT_LSB[i]); - data->temp_crit[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_CRIT_LIMIT[i]); - - if (data->kind == tmp411) { - data->temp_lowest[i] = i2c_smbus_read_byte_data(client, - TMP411_TEMP_LOWEST_MSB[i]) << 8; - data->temp_lowest[i] |= i2c_smbus_read_byte_data( - client, TMP411_TEMP_LOWEST_LSB[i]); - - data->temp_highest[i] = i2c_smbus_read_byte_data( - client, TMP411_TEMP_HIGHEST_MSB[i]) << 8; - data->temp_highest[i] |= i2c_smbus_read_byte_data( - client, TMP411_TEMP_HIGHEST_LSB[i]); - } - } - return data; -} - -static struct tmp401_data *tmp401_update_device(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct tmp401_data *data = i2c_get_clientdata(client); - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { - data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS); - data->config = i2c_smbus_read_byte_data(client, - TMP401_CONFIG_READ); - tmp401_update_device_reg16(client, data); - - data->temp_crit_hyst = i2c_smbus_read_byte_data(client, - TMP401_TEMP_CRIT_HYST); - - data->last_updated = jiffies; - data->valid = 1; - } - - mutex_unlock(&data->update_lock); - - return data; -} +static struct i2c_driver tmp401_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "tmp401", + }, + .probe = tmp401_probe, + .remove = tmp401_remove, + .id_table = tmp401_id, + .detect = tmp401_detect, + .address_list = normal_i2c, +}; static int __init tmp401_init(void) { diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c index 68e90abeba96..5da5942cf970 100644 --- a/drivers/hwmon/ultra45_env.c +++ b/drivers/hwmon/ultra45_env.c @@ -300,8 +300,11 @@ static const struct of_device_id env_match[] = { MODULE_DEVICE_TABLE(of, env_match); static struct of_platform_driver env_driver = { - .name = "ultra45_env", - .match_table = env_match, + .driver = { + .name = "ultra45_env", + .owner = THIS_MODULE, + .of_match_table = env_match, + }, .probe = env_probe, .remove = __devexit_p(env_remove), }; diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 612807d97155..697202e27891 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> +#include <linux/smp_lock.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> @@ -1319,8 +1320,8 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf, return count; } -static int watchdog_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static long watchdog_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) { static struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | @@ -1332,6 +1333,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp, int val, ret = 0; struct w83793_data *data = filp->private_data; + lock_kernel(); switch (cmd) { case WDIOC_GETSUPPORT: if (!nowayout) @@ -1385,7 +1387,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp, default: ret = -ENOTTY; } - + unlock_kernel(); return ret; } @@ -1395,7 +1397,7 @@ static const struct file_operations watchdog_fops = { .open = watchdog_open, .release = watchdog_close, .write = watchdog_write, - .ioctl = watchdog_ioctl, + .unlocked_ioctl = watchdog_ioctl, }; /* diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 16948db38973..b02b4533651d 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -440,7 +440,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) init_waitqueue_head(&cpm->i2c_wait); - cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL); + cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (!cpm->irq) return -EINVAL; @@ -451,13 +451,13 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) return ret; /* I2C parameter RAM */ - i2c_base = of_iomap(ofdev->node, 1); + i2c_base = of_iomap(ofdev->dev.of_node, 1); if (i2c_base == NULL) { ret = -EINVAL; goto out_irq; } - if (of_device_is_compatible(ofdev->node, "fsl,cpm1-i2c")) { + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) { /* Check for and use a microcode relocation patch. */ cpm->i2c_ram = i2c_base; @@ -474,7 +474,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) cpm->version = 1; - } else if (of_device_is_compatible(ofdev->node, "fsl,cpm2-i2c")) { + } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) { cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64); cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr); out_be16(i2c_base, cpm->i2c_addr); @@ -489,24 +489,24 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) } /* I2C control/status registers */ - cpm->i2c_reg = of_iomap(ofdev->node, 0); + cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0); if (cpm->i2c_reg == NULL) { ret = -EINVAL; goto out_ram; } - data = of_get_property(ofdev->node, "fsl,cpm-command", &len); + data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) { ret = -EINVAL; goto out_reg; } cpm->cp_command = *data; - data = of_get_property(ofdev->node, "linux,i2c-class", &len); + data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len); if (data && len == 4) cpm->adap.class = *data; - data = of_get_property(ofdev->node, "clock-frequency", &len); + data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len); if (data && len == 4) cpm->freq = *data; else @@ -661,7 +661,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev, /* register new adapter to i2c module... */ - data = of_get_property(ofdev->node, "linux,i2c-index", &len); + data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len); if (data && len == 4) { cpm->adap.nr = *data; result = i2c_add_numbered_adapter(&cpm->adap); @@ -679,7 +679,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev, /* * register OF I2C devices */ - of_register_i2c_devices(&cpm->adap, ofdev->node); + of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node); return 0; out_shut: @@ -718,13 +718,13 @@ static const struct of_device_id cpm_i2c_match[] = { MODULE_DEVICE_TABLE(of, cpm_i2c_match); static struct of_platform_driver cpm_i2c_driver = { - .match_table = cpm_i2c_match, .probe = cpm_i2c_probe, .remove = __devexit_p(cpm_i2c_remove), - .driver = { - .name = "fsl-i2c-cpm", - .owner = THIS_MODULE, - } + .driver = { + .name = "fsl-i2c-cpm", + .owner = THIS_MODULE, + .of_match_table = cpm_i2c_match, + }, }; static int __init cpm_i2c_init(void) diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index f8ccc0fe95a8..bf344135647a 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -664,7 +664,7 @@ static inline u8 iic_clckdiv(unsigned int opb) static int __devinit iic_request_irq(struct of_device *ofdev, struct ibm_iic_private *dev) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; int irq; if (iic_force_poll) @@ -695,7 +695,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev, static int __devinit iic_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct ibm_iic_private *dev; struct i2c_adapter *adap; const u32 *freq; @@ -807,8 +807,11 @@ static const struct of_device_id ibm_iic_match[] = { }; static struct of_platform_driver ibm_iic_driver = { - .name = "ibm-iic", - .match_table = ibm_iic_match, + .driver = { + .name = "ibm-iic", + .owner = THIS_MODULE, + .of_match_table = ibm_iic_match, + }, .probe = iic_probe, .remove = __devexit_p(iic_remove), }; diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index e86cef300c7d..df00eb1f11f9 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -560,14 +560,14 @@ static int __devinit fsl_i2c_probe(struct of_device *op, init_waitqueue_head(&i2c->queue); - i2c->base = of_iomap(op->node, 0); + i2c->base = of_iomap(op->dev.of_node, 0); if (!i2c->base) { dev_err(i2c->dev, "failed to map controller\n"); result = -ENOMEM; goto fail_map; } - i2c->irq = irq_of_parse_and_map(op->node, 0); + i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0); if (i2c->irq) { /* no i2c->irq implies polling */ result = request_irq(i2c->irq, mpc_i2c_isr, IRQF_SHARED, "i2c-mpc", i2c); @@ -577,21 +577,22 @@ static int __devinit fsl_i2c_probe(struct of_device *op, } } - if (of_get_property(op->node, "fsl,preserve-clocking", NULL)) { + if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) { clock = MPC_I2C_CLOCK_PRESERVE; } else { - prop = of_get_property(op->node, "clock-frequency", &plen); + prop = of_get_property(op->dev.of_node, "clock-frequency", + &plen); if (prop && plen == sizeof(u32)) clock = *prop; } if (match->data) { struct mpc_i2c_data *data = match->data; - data->setup(op->node, i2c, clock, data->prescaler); + data->setup(op->dev.of_node, i2c, clock, data->prescaler); } else { /* Backwards compatibility */ - if (of_get_property(op->node, "dfsrr", NULL)) - mpc_i2c_setup_8xxx(op->node, i2c, clock, 0); + if (of_get_property(op->dev.of_node, "dfsrr", NULL)) + mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0); } dev_set_drvdata(&op->dev, i2c); @@ -605,7 +606,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op, dev_err(i2c->dev, "failed to add adapter\n"); goto fail_add; } - of_register_i2c_devices(&i2c->adap, op->node); + of_register_i2c_devices(&i2c->adap, op->dev.of_node); return result; @@ -674,12 +675,12 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match); /* Structure for a device driver */ static struct of_platform_driver mpc_i2c_driver = { - .match_table = mpc_i2c_of_match, .probe = fsl_i2c_probe, .remove = __devexit_p(fsl_i2c_remove), - .driver = { - .owner = THIS_MODULE, - .name = DRV_NAME, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = mpc_i2c_of_match, }, }; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index db3c9f3a7647..e0f833cca3f1 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -418,6 +418,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; +#ifdef CONFIG_OF + client->dev.of_node = info->of_node; +#endif dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), client->addr); diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index d2b8b272bc27..cb10201a15ed 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive) static int cmd640_test_irq(ide_hwif_t *hwif) { - struct pci_dev *dev = to_pci_dev(hwif->dev); int irq_reg = hwif->channel ? ARTTIM23 : CFR; - u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR : + u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR : CFR_IDE01INTR; - - pci_read_config_byte(dev, irq_reg, &irq_stat); + u8 irq_stat = get_cmd640_reg(irq_reg); return (irq_stat & irq_mask) ? 1 : 0; } diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index b9e517de6a82..3feaa26410be 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/zorro.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <asm/setup.h> #include <asm/amigahw.h> @@ -24,15 +25,6 @@ /* - * Bases of the IDE interfaces - */ - -#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ -#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ - -#define GAYLE_IDEREG_SIZE 0x2000 - - /* * Offsets from one of the above bases */ @@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); static int gayle_test_irq(ide_hwif_t *hwif) { - unsigned char ch; + unsigned char ch; - ch = z_readb(hwif->io_ports.irq_addr); - if (!(ch & GAYLE_IRQ_IDE)) - return 0; - return 1; + ch = z_readb(hwif->io_ports.irq_addr); + if (!(ch & GAYLE_IRQ_IDE)) + return 0; + return 1; } static void gayle_a1200_clear_irq(ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; + ide_hwif_t *hwif = drive->hwif; - (void)z_readb(hwif->io_ports.status_addr); - z_writeb(0x7c, hwif->io_ports.irq_addr); + (void)z_readb(hwif->io_ports.status_addr); + z_writeb(0x7c, hwif->io_ports.irq_addr); } static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, @@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = { * Probe for a Gayle IDE interface (and optionally for an IDE doubler) */ -static int __init gayle_init(void) +static int __init amiga_gayle_ide_probe(struct platform_device *pdev) { - unsigned long phys_base, res_start, res_n; - unsigned long base, ctrlport, irqport; - int a4000, i, rc; - struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; - struct ide_port_info d = gayle_port_info; - - if (!MACH_IS_AMIGA) - return -ENODEV; - - if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) - goto found; - -#ifdef CONFIG_ZORRO - if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE, - NULL)) - goto found; -#endif - return -ENODEV; - -found: - printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", - a4000 ? 4000 : 1200, - ide_doubler ? ", IDE doubler" : ""); - - if (a4000) { - phys_base = GAYLE_BASE_4000; - irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); - d.port_ops = &gayle_a4000_port_ops; - } else { - phys_base = GAYLE_BASE_1200; - irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200); - d.port_ops = &gayle_a1200_port_ops; + struct resource *res; + struct gayle_ide_platform_data *pdata; + unsigned long base, ctrlport, irqport; + unsigned int i; + int error; + struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; + struct ide_port_info d = gayle_port_info; + struct ide_host *host; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), "IDE")) + return -EBUSY; + + pdata = pdev->dev.platform_data; + pr_info("ide: Gayle IDE controller (A%u style%s)\n", + pdata->explicit_ack ? 1200 : 4000, + ide_doubler ? ", IDE doubler" : ""); + + base = (unsigned long)ZTWO_VADDR(pdata->base); + ctrlport = 0; + irqport = (unsigned long)ZTWO_VADDR(pdata->irqport); + if (pdata->explicit_ack) + d.port_ops = &gayle_a1200_port_ops; + else + d.port_ops = &gayle_a4000_port_ops; + + for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) { + if (GAYLE_HAS_CONTROL_REG) + ctrlport = base + GAYLE_CONTROL; + + gayle_setup_ports(&hw[i], base, ctrlport, irqport); + hws[i] = &hw[i]; } - res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); - res_n = GAYLE_IDEREG_SIZE; + error = ide_host_add(&d, hws, i, &host); + if (error) + goto out; - if (!request_mem_region(res_start, res_n, "IDE")) - return -EBUSY; + platform_set_drvdata(pdev, host); + return 0; - for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { - base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); - ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; +out: + release_mem_region(res->start, resource_size(res)); + return error; +} + +static int __exit amiga_gayle_ide_remove(struct platform_device *pdev) +{ + struct ide_host *host = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ide_host_remove(host); + release_mem_region(res->start, resource_size(res)); + return 0; +} - gayle_setup_ports(&hw[i], base, ctrlport, irqport); +static struct platform_driver amiga_gayle_ide_driver = { + .remove = __exit_p(amiga_gayle_ide_remove), + .driver = { + .name = "amiga-gayle-ide", + .owner = THIS_MODULE, + }, +}; - hws[i] = &hw[i]; - } +static int __init amiga_gayle_ide_init(void) +{ + return platform_driver_probe(&amiga_gayle_ide_driver, + amiga_gayle_ide_probe); +} - rc = ide_host_add(&d, hws, i, NULL); - if (rc) - release_mem_region(res_start, res_n); +module_init(amiga_gayle_ide_init); - return rc; +static void __exit amiga_gayle_ide_exit(void) +{ + platform_driver_unregister(&amiga_gayle_ide_driver); } -module_init(gayle_init); +module_exit(amiga_gayle_ide_exit); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-gayle-ide"); diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 42965b3e30b9..542603b394e4 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c @@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; + d.irq_flags = res_irq->flags; if (mmio) d.host_flags |= IDE_HFLAG_MMIO; diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index c5f3841af360..3a35ec6193d2 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif) * bit 7: error, bit 6: interrupting, * bit 5: FIFO full, bit 4: FIFO empty */ - return ((sc1d & 0x50) == 0x50) ? 1 : 0; + return (sc1d & 0x40) ? 1 : 0; } else { /* * bit 3: error, bit 2: interrupting, * bit 1: FIFO full, bit 0: FIFO empty */ - return ((sc1d & 0x05) == 0x05) ? 1 : 0; + return (sc1d & 0x04) ? 1 : 0; } } @@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = { static const struct ide_port_ops pdc2026x_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, + .test_irq = pdc202xx_test_irq, .cable_detect = pdc2026x_cable_detect, }; diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 159955d16c47..183fa38760d8 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -1153,7 +1153,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) if (macio_resource_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no address for %s\n", - mdev->ofdev.node->full_name); + mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto out_free_pmif; } @@ -1161,7 +1161,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) /* Request memory resource for IO ports */ if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { printk(KERN_ERR "ide-pmac: can't request MMIO resource for " - "%s!\n", mdev->ofdev.node->full_name); + "%s!\n", mdev->ofdev.dev.of_node->full_name); rc = -EBUSY; goto out_free_pmif; } @@ -1173,7 +1173,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) */ if (macio_irq_count(mdev) == 0) { printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " - "13\n", mdev->ofdev.node->full_name); + "13\n", mdev->ofdev.dev.of_node->full_name); irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); @@ -1182,7 +1182,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) regbase = (unsigned long) base; pmif->mdev = mdev; - pmif->node = mdev->ofdev.node; + pmif->node = mdev->ofdev.dev.of_node; pmif->regbase = regbase; pmif->irq = irq; pmif->kauai_fcr = NULL; @@ -1191,7 +1191,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) printk(KERN_WARNING "ide-pmac: can't request DMA " "resource for %s!\n", - mdev->ofdev.node->full_name); + mdev->ofdev.dev.of_node->full_name); else pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); } else diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig index f15e90a453d1..fb5c5186d4aa 100644 --- a/drivers/idle/Kconfig +++ b/drivers/idle/Kconfig @@ -1,3 +1,14 @@ +config INTEL_IDLE + tristate "Cpuidle Driver for Intel Processors" + depends on CPU_IDLE + depends on X86 + depends on CPU_SUP_INTEL + depends on EXPERIMENTAL + help + Enable intel_idle, a cpuidle driver that includes knowledge of + native Intel hardware idle features. The acpi_idle driver + can be configured at the same time, in order to handle + processors intel_idle does not support. menu "Memory power savings" depends on X86_64 diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile index 5f68fc377e21..23d295cf10f2 100644 --- a/drivers/idle/Makefile +++ b/drivers/idle/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_I7300_IDLE) += i7300_idle.o +obj-$(CONFIG_INTEL_IDLE) += intel_idle.o diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c new file mode 100755 index 000000000000..54f0fb4cd5d2 --- /dev/null +++ b/drivers/idle/intel_idle.c @@ -0,0 +1,461 @@ +/* + * intel_idle.c - native hardware idle loop for modern Intel processors + * + * Copyright (c) 2010, Intel Corporation. + * Len Brown <len.brown@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * intel_idle is a cpuidle driver that loads on specific Intel processors + * in lieu of the legacy ACPI processor_idle driver. The intent is to + * make Linux more efficient on these processors, as intel_idle knows + * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. + */ + +/* + * Design Assumptions + * + * All CPUs have same idle states as boot CPU + * + * Chipset BM_STS (bus master status) bit is a NOP + * for preventing entry into deep C-stats + */ + +/* + * Known limitations + * + * The driver currently initializes for_each_online_cpu() upon modprobe. + * It it unaware of subsequent processors hot-added to the system. + * This means that if you boot with maxcpus=n and later online + * processors above n, those processors will use C1 only. + * + * ACPI has a .suspend hack to turn off deep c-statees during suspend + * to avoid complications with the lapic timer workaround. + * Have not seen issues with suspend, but may need same workaround here. + * + * There is currently no kernel-based automatic probing/loading mechanism + * if the driver is built as a module. + */ + +/* un-comment DEBUG to enable pr_debug() statements */ +#define DEBUG + +#include <linux/kernel.h> +#include <linux/cpuidle.h> +#include <linux/clockchips.h> +#include <linux/hrtimer.h> /* ktime_get_real() */ +#include <trace/events/power.h> +#include <linux/sched.h> + +#define INTEL_IDLE_VERSION "0.4" +#define PREFIX "intel_idle: " + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_CSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) +#define MWAIT_MAX_NUM_CSTATES 8 +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) + +static struct cpuidle_driver intel_idle_driver = { + .name = "intel_idle", + .owner = THIS_MODULE, +}; +/* intel_idle.max_cstate=0 disables driver */ +static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; +static int power_policy = 7; /* 0 = max perf; 15 = max powersave */ + +static unsigned int substates; +static int (*choose_substate)(int); + +/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ +static unsigned int lapic_timer_reliable_states; + +static struct cpuidle_device *intel_idle_cpuidle_devices; +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); + +static struct cpuidle_state *cpuidle_state_table; + +/* + * States are indexed by the cstate number, + * which is also the index into the MWAIT hint array. + * Thus C0 is a dummy. + */ +static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { + { /* MWAIT C0 */ }, + { /* MWAIT C1 */ + .name = "NHM-C1", + .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 3, + .power_usage = 1000, + .target_residency = 6, + .enter = &intel_idle }, + { /* MWAIT C2 */ + .name = "NHM-C3", + .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 20, + .power_usage = 500, + .target_residency = 80, + .enter = &intel_idle }, + { /* MWAIT C3 */ + .name = "NHM-C6", + .desc = "MWAIT 0x20", + .driver_data = (void *) 0x20, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 200, + .power_usage = 350, + .target_residency = 800, + .enter = &intel_idle }, +}; + +static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { + { /* MWAIT C0 */ }, + { /* MWAIT C1 */ + .name = "ATM-C1", + .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 1, + .power_usage = 1000, + .target_residency = 4, + .enter = &intel_idle }, + { /* MWAIT C2 */ + .name = "ATM-C2", + .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 20, + .power_usage = 500, + .target_residency = 80, + .enter = &intel_idle }, + { /* MWAIT C3 */ }, + { /* MWAIT C4 */ + .name = "ATM-C4", + .desc = "MWAIT 0x30", + .driver_data = (void *) 0x30, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 100, + .power_usage = 250, + .target_residency = 400, + .enter = &intel_idle }, + { /* MWAIT C5 */ }, + { /* MWAIT C6 */ + .name = "ATM-C6", + .desc = "MWAIT 0x40", + .driver_data = (void *) 0x40, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 200, + .power_usage = 150, + .target_residency = 800, + .enter = NULL }, /* disabled */ +}; + +/* + * choose_tunable_substate() + * + * Run-time decision on which C-state substate to invoke + * If power_policy = 0, choose shallowest substate (0) + * If power_policy = 15, choose deepest substate + * If power_policy = middle, choose middle substate etc. + */ +static int choose_tunable_substate(int cstate) +{ + unsigned int num_substates; + unsigned int substate_choice; + + power_policy &= 0xF; /* valid range: 0-15 */ + cstate &= 7; /* valid range: 0-7 */ + + num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; + + if (num_substates <= 1) + return 0; + + substate_choice = ((power_policy + (power_policy + 1) * + (num_substates - 1)) / 16); + + return substate_choice; +} + +/* + * choose_zero_substate() + */ +static int choose_zero_substate(int cstate) +{ + return 0; +} + +/** + * intel_idle + * @dev: cpuidle_device + * @state: cpuidle state + * + */ +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) +{ + unsigned long ecx = 1; /* break on interrupt flag */ + unsigned long eax = (unsigned long)cpuidle_get_statedata(state); + unsigned int cstate; + ktime_t kt_before, kt_after; + s64 usec_delta; + int cpu = smp_processor_id(); + + cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; + + eax = eax + (choose_substate)(cstate); + + local_irq_disable(); + + if (!(lapic_timer_reliable_states & (1 << (cstate)))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); + + kt_before = ktime_get_real(); + + stop_critical_timings(); +#ifndef MODULE + trace_power_start(POWER_CSTATE, (eax >> 4) + 1); +#endif + if (!need_resched()) { + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(eax, ecx); + } + + start_critical_timings(); + + kt_after = ktime_get_real(); + usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); + + local_irq_enable(); + + if (!(lapic_timer_reliable_states & (1 << (cstate)))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); + + return usec_delta; +} + +/* + * intel_idle_probe() + */ +static int intel_idle_probe(void) +{ + unsigned int eax, ebx, ecx, edx; + + if (max_cstate == 0) { + pr_debug(PREFIX "disabled\n"); + return -EPERM; + } + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_MWAIT)) + return -ENODEV; + + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return -ENODEV; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + return -ENODEV; +#ifdef DEBUG + if (substates == 0) /* can over-ride via modparam */ +#endif + substates = edx; + + pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates); + + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ + lapic_timer_reliable_states = 0xFFFFFFFF; + + if (boot_cpu_data.x86 != 6) /* family 6 */ + return -ENODEV; + + switch (boot_cpu_data.x86_model) { + + case 0x1A: /* Core i7, Xeon 5500 series */ + case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ + case 0x1F: /* Core i7 and i5 Processor - Nehalem */ + case 0x2E: /* Nehalem-EX Xeon */ + lapic_timer_reliable_states = (1 << 1); /* C1 */ + + case 0x25: /* Westmere */ + case 0x2C: /* Westmere */ + cpuidle_state_table = nehalem_cstates; + choose_substate = choose_tunable_substate; + break; + + case 0x1C: /* 28 - Atom Processor */ + lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ + cpuidle_state_table = atom_cstates; + choose_substate = choose_zero_substate; + break; +#ifdef FUTURE_USE + case 0x17: /* 23 - Core 2 Duo */ + lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ +#endif + + default: + pr_debug(PREFIX "does not run on family %d model %d\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + return -ENODEV; + } + + pr_debug(PREFIX "v" INTEL_IDLE_VERSION + " model 0x%X\n", boot_cpu_data.x86_model); + + pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", + lapic_timer_reliable_states); + return 0; +} + +/* + * intel_idle_cpuidle_devices_uninit() + * unregister, free cpuidle_devices + */ +static void intel_idle_cpuidle_devices_uninit(void) +{ + int i; + struct cpuidle_device *dev; + + for_each_online_cpu(i) { + dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + cpuidle_unregister_device(dev); + } + + free_percpu(intel_idle_cpuidle_devices); + return; +} +/* + * intel_idle_cpuidle_devices_init() + * allocate, initialize, register cpuidle_devices + */ +static int intel_idle_cpuidle_devices_init(void) +{ + int i, cstate; + struct cpuidle_device *dev; + + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (intel_idle_cpuidle_devices == NULL) + return -ENOMEM; + + for_each_online_cpu(i) { + dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + + dev->state_count = 1; + + for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { + int num_substates; + + if (cstate > max_cstate) { + printk(PREFIX "max_cstate %d reached\n", + max_cstate); + break; + } + + /* does the state exist in CPUID.MWAIT? */ + num_substates = (substates >> ((cstate) * 4)) + & MWAIT_SUBSTATE_MASK; + if (num_substates == 0) + continue; + /* is the state not enabled? */ + if (cpuidle_state_table[cstate].enter == NULL) { + /* does the driver not know about the state? */ + if (*cpuidle_state_table[cstate].name == '\0') + pr_debug(PREFIX "unaware of model 0x%x" + " MWAIT %d please" + " contact lenb@kernel.org", + boot_cpu_data.x86_model, cstate); + continue; + } + + if ((cstate > 2) && + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle" + " states deeper than C2"); + + dev->states[dev->state_count] = /* structure copy */ + cpuidle_state_table[cstate]; + + dev->state_count += 1; + } + + dev->cpu = i; + if (cpuidle_register_device(dev)) { + pr_debug(PREFIX "cpuidle_register_device %d failed!\n", + i); + intel_idle_cpuidle_devices_uninit(); + return -EIO; + } + } + + return 0; +} + + +static int __init intel_idle_init(void) +{ + int retval; + + retval = intel_idle_probe(); + if (retval) + return retval; + + retval = cpuidle_register_driver(&intel_idle_driver); + if (retval) { + printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", + cpuidle_get_driver()->name); + return retval; + } + + retval = intel_idle_cpuidle_devices_init(); + if (retval) { + cpuidle_unregister_driver(&intel_idle_driver); + return retval; + } + + return 0; +} + +static void __exit intel_idle_exit(void) +{ + intel_idle_cpuidle_devices_uninit(); + cpuidle_unregister_driver(&intel_idle_driver); + + return; +} + +module_init(intel_idle_init); +module_exit(intel_idle_exit); + +module_param(power_policy, int, 0644); +module_param(max_cstate, int, 0444); +#ifdef DEBUG +module_param(substates, int, 0444); +#endif + +MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); +MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 9fd4a0d3206e..adaefabc40e9 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file) "and will not be available in the new firewire driver stack. " "Try libraw1394 based programs instead.\n", current->comm); - return 0; + return nonseekable_open(inode, file); } @@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev; static const struct file_operations dv1394_fops= { .owner = THIS_MODULE, - .poll = dv1394_poll, + .poll = dv1394_poll, .unlocked_ioctl = dv1394_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = dv1394_compat_ioctl, #endif .mmap = dv1394_mmap, .open = dv1394_open, - .write = dv1394_write, - .read = dv1394_read, + .write = dv1394_write, + .read = dv1394_read, .release = dv1394_release, - .fasync = dv1394_fasync, + .fasync = dv1394_fasync, + .llseek = no_llseek, }; diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 8aa56ac07e29..b563d5e9fa2e 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file) file->private_data = fi; - return 0; + return nonseekable_open(inode, file); } static int raw1394_release(struct inode *inode, struct file *file) @@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = { .poll = raw1394_poll, .open = raw1394_open, .release = raw1394_release, + .llseek = no_llseek, }; static int __init init_raw1394(void) diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 949064a05675..a42bd6893bcf 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file) ctx->current_ctx = NULL; file->private_data = ctx; - return 0; + return nonseekable_open(inode, file); } static int video1394_release(struct inode *inode, struct file *file) @@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops= .poll = video1394_poll, .mmap = video1394_mmap, .open = video1394_open, - .release = video1394_release + .release = video1394_release, + .llseek = no_llseek, }; /*** HOTPLUG STUFF **********************************************************/ diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 330d2a423362..89d70de5e235 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/ipath/Kconfig" +source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/ehca/Kconfig" source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index 0c4e589d746e..9cc7a47d3e67 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_INFINIBAND) += core/ obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ +obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 05ac36e6acdb..a565af5c2d2e 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,7 +38,9 @@ #include <rdma/ib_verbs.h> -int ib_device_register_sysfs(struct ib_device *device); +int ib_device_register_sysfs(struct ib_device *device, + int (*port_callback)(struct ib_device *, + u8, struct kobject *)); void ib_device_unregister_sysfs(struct ib_device *device); int ib_sysfs_setup(void); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index d1fba4153332..a19effad0811 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -267,7 +267,9 @@ out: * callback for each device that is added. @device must be allocated * with ib_alloc_device(). */ -int ib_register_device(struct ib_device *device) +int ib_register_device(struct ib_device *device, + int (*port_callback)(struct ib_device *, + u8, struct kobject *)) { int ret; @@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device) goto out; } - ret = ib_device_register_sysfs(device); + ret = ib_device_register_sysfs(device, port_callback); if (ret) { printk(KERN_WARNING "Couldn't register device %s with driver model\n", device->name); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6dc7b77d5d29..ef1304f151dc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API"); MODULE_AUTHOR("Hal Rosenstock"); MODULE_AUTHOR("Sean Hefty"); -int mad_sendq_size = IB_MAD_QP_SEND_SIZE; -int mad_recvq_size = IB_MAD_QP_RECV_SIZE; +static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; +static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; module_param_named(send_queue_size, mad_sendq_size, int, 0444); MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index f901957abc8b..3627300e2a10 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -475,7 +475,9 @@ err: return NULL; } -static int add_port(struct ib_device *device, int port_num) +static int add_port(struct ib_device *device, int port_num, + int (*port_callback)(struct ib_device *, + u8, struct kobject *)) { struct ib_port *p; struct ib_port_attr attr; @@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num) if (ret) goto err_free_pkey; + if (port_callback) { + ret = port_callback(device, port_num, &p->kobj); + if (ret) + goto err_remove_pkey; + } + list_add_tail(&p->kobj.entry, &device->port_list); kobject_uevent(&p->kobj, KOBJ_ADD); return 0; +err_remove_pkey: + sysfs_remove_group(&p->kobj, &p->pkey_group); + err_free_pkey: for (i = 0; i < attr.pkey_tbl_len; ++i) kfree(p->pkey_group.attrs[i]); @@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = { .attrs = iw_proto_stats_attrs, }; -int ib_device_register_sysfs(struct ib_device *device) +int ib_device_register_sysfs(struct ib_device *device, + int (*port_callback)(struct ib_device *, + u8, struct kobject *)) { struct device *class_dev = &device->dev; int ret; @@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device) } if (device->node_type == RDMA_NODE_IB_SWITCH) { - ret = add_port(device, 0); + ret = add_port(device, 0, port_callback); if (ret) goto err_put; } else { for (i = 1; i <= device->phys_port_cnt; ++i) { - ret = add_port(device, i); + ret = add_port(device, i, port_callback); if (ret) goto err_put; } diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 46474842cfe9..08f948df8fa9 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len) if (!len) return 0; - data = kmalloc(len, GFP_KERNEL); - if (!data) - return -ENOMEM; - - if (copy_from_user(data, (void __user *)(unsigned long)src, len)) { - kfree(data); - return -EFAULT; - } + data = memdup_user((void __user *)(unsigned long)src, len); + if (IS_ERR(data)) + return PTR_ERR(data); *dest = data; return 0; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index c47f618d12e8..aeebc4d37e33 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.iwcm->create_listen = c2_service_create; dev->ibdev.iwcm->destroy_listen = c2_service_destroy; - ret = ib_register_device(&dev->ibdev); + ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto out_free_iwcm; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 19b1c4a62a23..fca0b4b747e4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; dev->ibdev.iwcm->get_qp = iwch_get_qp; - ret = ib_register_device(&dev->ibdev); + ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto bail1; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index fb1aafcc294f..2447f5295482 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -373,6 +373,7 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, V_CQE_SWCQE(SW_CQE(hw_cqe)) | V_CQE_OPCODE(FW_RI_READ_REQ) | V_CQE_TYPE(1)); + read_cqe->bits_type_ts = hw_cqe->bits_type_ts; } /* @@ -780,6 +781,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, /* account for the status page. */ entries++; + /* IQ needs one extra entry to differentiate full vs empty. */ + entries++; + /* * entries must be multiple of 16 for HW. */ @@ -801,7 +805,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, chp->rhp = rhp; chp->cq.size--; /* status page */ - chp->ibcq.cqe = chp->cq.size; + chp->ibcq.cqe = chp->cq.size - 1; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index be23b5eab13b..d870f9c17c1e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -306,7 +306,8 @@ static void c4iw_remove(struct c4iw_dev *dev) PDBG("%s c4iw_dev %p\n", __func__, dev); cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); - c4iw_unregister_device(dev); + if (dev->registered) + c4iw_unregister_device(dev); c4iw_rdev_close(&dev->rdev); idr_destroy(&dev->cqidr); idr_destroy(&dev->qpidr); @@ -343,12 +344,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) list_add_tail(&devp->entry, &dev_list); mutex_unlock(&dev_mutex); - if (c4iw_register_device(devp)) { - printk(KERN_ERR MOD "Unable to register device\n"); - mutex_lock(&dev_mutex); - c4iw_remove(devp); - mutex_unlock(&dev_mutex); - } if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( pci_name(devp->rdev.lldi.pdev), @@ -379,9 +374,6 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) for (i = 0; i < dev->rdev.lldi.nrxq; i++) PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); - - printk(KERN_INFO MOD "Initialized device %s\n", - pci_name(dev->rdev.lldi.pdev)); out: return dev; } @@ -471,7 +463,41 @@ nomem: static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { + struct c4iw_dev *dev = handle; + PDBG("%s new_state %u\n", __func__, new_state); + switch (new_state) { + case CXGB4_STATE_UP: + printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); + if (!dev->registered) { + int ret; + ret = c4iw_register_device(dev); + if (ret) + printk(KERN_ERR MOD + "%s: RDMA registration failed: %d\n", + pci_name(dev->rdev.lldi.pdev), ret); + } + break; + case CXGB4_STATE_DOWN: + printk(KERN_INFO MOD "%s: Down\n", + pci_name(dev->rdev.lldi.pdev)); + if (dev->registered) + c4iw_unregister_device(dev); + break; + case CXGB4_STATE_START_RECOVERY: + printk(KERN_INFO MOD "%s: Fatal Error\n", + pci_name(dev->rdev.lldi.pdev)); + if (dev->registered) + c4iw_unregister_device(dev); + break; + case CXGB4_STATE_DETACH: + printk(KERN_INFO MOD "%s: Detach\n", + pci_name(dev->rdev.lldi.pdev)); + mutex_lock(&dev_mutex); + c4iw_remove(dev); + mutex_unlock(&dev_mutex); + break; + } return 0; } @@ -504,14 +530,12 @@ static void __exit c4iw_exit_module(void) { struct c4iw_dev *dev, *tmp; - cxgb4_unregister_uld(CXGB4_ULD_RDMA); - mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { c4iw_remove(dev); } mutex_unlock(&dev_mutex); - + cxgb4_unregister_uld(CXGB4_ULD_RDMA); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index a6269981e815..277ab589b44d 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -152,6 +152,7 @@ struct c4iw_dev { struct list_head entry; struct delayed_work db_drop_task; struct dentry *debugfs_root; + u8 registered; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index e54ff6d25691..7f94da1a2437 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -712,8 +712,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); - if (!mhp) + if (!mhp) { + ret = -ENOMEM; goto err; + } mhp->rhp = rhp; ret = alloc_pbl(mhp, pbl_depth); @@ -730,8 +732,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) + if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { + ret = -ENOMEM; goto err3; + } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmr); @@ -755,9 +759,6 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, dma_addr_t dma_addr; int size = sizeof *c4pl + page_list_len * sizeof(u64); - if (page_list_len > T4_MAX_FR_DEPTH) - return ERR_PTR(-EINVAL); - c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, &dma_addr, GFP_KERNEL); if (!c4pl) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index dfc49020bb9c..8f645c83a125 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; dev->ibdev.iwcm->get_qp = c4iw_get_qp; - ret = ib_register_device(&dev->ibdev); + ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto bail1; @@ -496,6 +496,7 @@ int c4iw_register_device(struct c4iw_dev *dev) if (ret) goto bail2; } + dev->registered = 1; return 0; bail2: ib_unregister_device(&dev->ibdev); @@ -514,5 +515,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev) c4iw_class_attributes[i]); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); + dev->registered = 0; return; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 83a01dc0c4c1..0c28ed1eafa6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, err = build_rdma_write(wqe, wr, &len16); break; case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: fw_opcode = FW_RI_RDMA_READ_WR; swsqe->opcode = FW_RI_READ_REQ; - fw_flags = 0; + if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) + fw_flags |= FW_RI_RDMA_READ_INVALIDATE; + else + fw_flags = 0; err = build_rdma_read(wqe, wr, &len16); if (err) break; @@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, err = build_fastreg(wqe, wr, &len16); break; case IB_WR_LOCAL_INV: + if (wr->send_flags & IB_SEND_FENCE) + fw_flags |= FW_RI_LOCAL_FENCE_FLAG; fw_opcode = FW_RI_INV_LSTAG_WR; swsqe->opcode = FW_RI_LOCAL_INV; err = build_inv_stag(wqe, wr, &len16); @@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) wait_event(qhp->wait, !qhp->ep); remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); - remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); @@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (ret) goto err2; - ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid); - if (ret) - goto err3; - if (udata) { mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); if (!mm1) { ret = -ENOMEM; - goto err4; + goto err3; } mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) { ret = -ENOMEM; - goto err5; + goto err4; } mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); if (!mm3) { ret = -ENOMEM; - goto err6; + goto err5; } mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); if (!mm4) { ret = -ENOMEM; - goto err7; + goto err6; } uresp.qid_mask = rhp->rdev.qpmask; @@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) - goto err8; + goto err7; mm1->key = uresp.sq_key; mm1->addr = virt_to_phys(qhp->wq.sq.queue); mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); @@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, qhp->wq.sq.qid); return &qhp->ibqp; -err8: - kfree(mm4); err7: - kfree(mm3); + kfree(mm4); err6: - kfree(mm2); + kfree(mm3); err5: - kfree(mm1); + kfree(mm2); err4: - remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); + kfree(mm1); err3: remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); err2: diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index d0e8af352408..1057cb96302e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -41,11 +41,13 @@ #define T4_MAX_NUM_QP (1<<16) #define T4_MAX_NUM_CQ (1<<15) #define T4_MAX_NUM_PD (1<<15) -#define T4_MAX_PBL_SIZE 256 -#define T4_MAX_RQ_SIZE 1024 -#define T4_MAX_SQ_SIZE 1024 -#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) -#define T4_MAX_CQ_DEPTH 8192 +#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) +#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) +#define T4_MAX_IQ_SIZE (65520 - 1) +#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) +#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) +#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) +#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) #define T4_MAX_NUM_STAG (1<<15) #define T4_MAX_MR_SIZE (~0ULL - 1) #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ @@ -79,12 +81,11 @@ struct t4_status_page { sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ sizeof(struct fw_ri_immd))) -#define T4_MAX_FR_DEPTH 255 +#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) -#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ - sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) +#define T4_MAX_RECV_SGE 4 union t4_wr { struct fw_ri_res_wr res; @@ -434,7 +435,7 @@ struct t4_cq { struct c4iw_rdev *rdev; u64 ugts; size_t memsize; - u64 timestamp; + __be64 bits_type_ts; u32 cqid; u16 size; /* including status page */ u16 cidx; @@ -449,25 +450,17 @@ struct t4_cq { static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; - u16 inc; - - do { - /* - * inc must be less the both the max update value -and- - * the size of the CQ. - */ - inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc : - CIDXINC_MASK; - inc = inc <= (cq->size - 1) ? inc : (cq->size - 1); - if (inc == cq->cidx_inc) - val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) | - INGRESSQID(cq->cqid); - else - val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) | - INGRESSQID(cq->cqid); - cq->cidx_inc -= inc; + + while (cq->cidx_inc > CIDXINC_MASK) { + val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | + INGRESSQID(cq->cqid); writel(val, cq->gts); - } while (cq->cidx_inc); + cq->cidx_inc -= CIDXINC_MASK; + } + val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | + INGRESSQID(cq->cqid); + writel(val, cq->gts); + cq->cidx_inc = 0; return 0; } @@ -487,7 +480,9 @@ static inline void t4_swcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq) { - cq->cidx_inc++; + cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; + if (++cq->cidx_inc == cq->size) + cq->cidx_inc = 0; if (++cq->cidx == cq->size) { cq->cidx = 0; cq->gen ^= 1; @@ -501,20 +496,23 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { - int ret = 0; - u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts); + int ret; + u16 prev_cidx; - if (G_CQE_GENBIT(bits_type_ts) == cq->gen) { - *cqe = &cq->queue[cq->cidx]; - cq->timestamp = G_CQE_TS(bits_type_ts); - } else if (G_CQE_TS(bits_type_ts) > cq->timestamp) - ret = -EOVERFLOW; + if (cq->cidx == 0) + prev_cidx = cq->size - 1; else - ret = -ENODATA; - if (ret == -EOVERFLOW) { - printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + prev_cidx = cq->cidx - 1; + + if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { + ret = -EOVERFLOW; cq->error = 1; - } + printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { + *cqe = &cq->queue[cq->cidx]; + ret = 0; + } else + ret = -ENODATA; return ret; } diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 07cae552cafb..e571e60ecb88 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb, ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); if (!create_comp_task(pool, cpu)) { ehca_gen_err("Can't create comp_task for cpu: %x", cpu); - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 129a6bebd6e3..ecb51b396c42 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -291,8 +291,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca) }; ehca_gen_dbg("Probing adapter %s...", - shca->ofdev->node->full_name); - loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL); + shca->ofdev->dev.of_node->full_name); + loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code", + NULL); if (loc_code) ehca_gen_dbg(" ... location lode=%s", loc_code); @@ -720,16 +721,16 @@ static int __devinit ehca_probe(struct of_device *dev, int ret, i, eq_size; unsigned long flags; - handle = of_get_property(dev->node, "ibm,hca-handle", NULL); + handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL); if (!handle) { ehca_gen_err("Cannot get eHCA handle for adapter: %s.", - dev->node->full_name); + dev->dev.of_node->full_name); return -ENODEV; } if (!(*handle)) { ehca_gen_err("Wrong eHCA handle for adapter: %s.", - dev->node->full_name); + dev->dev.of_node->full_name); return -ENODEV; } @@ -798,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev, goto probe5; } - ret = ib_register_device(&shca->ib_device); + ret = ib_register_device(&shca->ib_device, NULL); if (ret) { ehca_err(&shca->ib_device, "ib_register_device() failed ret=%i", ret); @@ -936,12 +937,13 @@ static struct of_device_id ehca_device_table[] = MODULE_DEVICE_TABLE(of, ehca_device_table); static struct of_platform_driver ehca_driver = { - .name = "ehca", - .match_table = ehca_device_table, .probe = ehca_probe, .remove = ehca_remove, - .driver = { + .driver = { + .name = "ehca", + .owner = THIS_MODULE, .groups = ehca_drv_attr_groups, + .of_match_table = ehca_device_table, }, }; diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 3c7968f25ec2..1d9bb115cbf6 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig @@ -1,9 +1,11 @@ config INFINIBAND_IPATH - tristate "QLogic InfiniPath Driver" - depends on 64BIT && NET + tristate "QLogic HTX HCA support" + depends on 64BIT && NET && HT_IRQ ---help--- - This is a driver for QLogic InfiniPath host channel adapters, + This is a driver for the obsolete QLogic Hyper-Transport + IB host channel adapter (model QHT7140), including InfiniBand verbs support. This driver allows these devices to be used with both kernel upper level protocols such as IP-over-InfiniBand as well as with userspace applications (in conjunction with InfiniBand userspace access). + For QLogic PCIe QLE based cards, use the QIB driver instead. diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index bf9450061986..fa3df82681df 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile @@ -29,13 +29,9 @@ ib_ipath-y := \ ipath_user_pages.o \ ipath_user_sdma.o \ ipath_verbs_mcast.o \ - ipath_verbs.o \ - ipath_iba7220.o \ - ipath_sd7220.o \ - ipath_sd7220_img.o + ipath_verbs.o ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o -ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 6302626d17f0..21337468c652 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *, /* Only needed for registration, nothing else needs this info */ #define PCI_VENDOR_ID_PATHSCALE 0x1fc1 -#define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_DEVICE_ID_INFINIPATH_HT 0xd -#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 -#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220 /* Number of seconds before our card status check... */ #define STATUS_TIMEOUT 60 static const struct pci_device_id ipath_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, - { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, - { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) }, { 0, } }; @@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, /* setup the chip-specific functions, as early as possible. */ switch (ent->device) { case PCI_DEVICE_ID_INFINIPATH_HT: -#ifdef CONFIG_HT_IRQ ipath_init_iba6110_funcs(dd); break; -#else - ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if " - "CONFIG_HT_IRQ is not enabled\n", ent->device); - return -ENODEV; -#endif - case PCI_DEVICE_ID_INFINIPATH_PE800: -#ifdef CONFIG_PCI_MSI - ipath_init_iba6120_funcs(dd); - break; -#else - ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if " - "CONFIG_PCI_MSI is not enabled\n", ent->device); - return -ENODEV; -#endif - case PCI_DEVICE_ID_INFINIPATH_7220: -#ifndef CONFIG_PCI_MSI - ipath_dbg("CONFIG_PCI_MSI is not enabled, " - "using INTx for unit %u\n", dd->ipath_unit); -#endif - ipath_init_iba7220_funcs(dd); - break; + default: ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " "failing\n", ent->device); diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c deleted file mode 100644 index 4b4a30b0dabd..000000000000 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ /dev/null @@ -1,1862 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. - * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -/* - * This file contains all of the code that is specific to the - * InfiniPath PCIe chip. - */ - -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <rdma/ib_verbs.h> - -#include "ipath_kernel.h" -#include "ipath_registers.h" - -static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64); - -/* - * This file contains all the chip-specific register information and - * access functions for the QLogic InfiniPath PCI-Express chip. - * - * This lists the InfiniPath registers, in the actual chip layout. - * This structure should never be directly accessed. - */ -struct _infinipath_do_not_use_kernel_regs { - unsigned long long Revision; - unsigned long long Control; - unsigned long long PageAlign; - unsigned long long PortCnt; - unsigned long long DebugPortSelect; - unsigned long long Reserved0; - unsigned long long SendRegBase; - unsigned long long UserRegBase; - unsigned long long CounterRegBase; - unsigned long long Scratch; - unsigned long long Reserved1; - unsigned long long Reserved2; - unsigned long long IntBlocked; - unsigned long long IntMask; - unsigned long long IntStatus; - unsigned long long IntClear; - unsigned long long ErrorMask; - unsigned long long ErrorStatus; - unsigned long long ErrorClear; - unsigned long long HwErrMask; - unsigned long long HwErrStatus; - unsigned long long HwErrClear; - unsigned long long HwDiagCtrl; - unsigned long long MDIO; - unsigned long long IBCStatus; - unsigned long long IBCCtrl; - unsigned long long ExtStatus; - unsigned long long ExtCtrl; - unsigned long long GPIOOut; - unsigned long long GPIOMask; - unsigned long long GPIOStatus; - unsigned long long GPIOClear; - unsigned long long RcvCtrl; - unsigned long long RcvBTHQP; - unsigned long long RcvHdrSize; - unsigned long long RcvHdrCnt; - unsigned long long RcvHdrEntSize; - unsigned long long RcvTIDBase; - unsigned long long RcvTIDCnt; - unsigned long long RcvEgrBase; - unsigned long long RcvEgrCnt; - unsigned long long RcvBufBase; - unsigned long long RcvBufSize; - unsigned long long RxIntMemBase; - unsigned long long RxIntMemSize; - unsigned long long RcvPartitionKey; - unsigned long long Reserved3; - unsigned long long RcvPktLEDCnt; - unsigned long long Reserved4[8]; - unsigned long long SendCtrl; - unsigned long long SendPIOBufBase; - unsigned long long SendPIOSize; - unsigned long long SendPIOBufCnt; - unsigned long long SendPIOAvailAddr; - unsigned long long TxIntMemBase; - unsigned long long TxIntMemSize; - unsigned long long Reserved5; - unsigned long long PCIeRBufTestReg0; - unsigned long long PCIeRBufTestReg1; - unsigned long long Reserved51[6]; - unsigned long long SendBufferError; - unsigned long long SendBufferErrorCONT1; - unsigned long long Reserved6SBE[6]; - unsigned long long RcvHdrAddr0; - unsigned long long RcvHdrAddr1; - unsigned long long RcvHdrAddr2; - unsigned long long RcvHdrAddr3; - unsigned long long RcvHdrAddr4; - unsigned long long Reserved7RHA[11]; - unsigned long long RcvHdrTailAddr0; - unsigned long long RcvHdrTailAddr1; - unsigned long long RcvHdrTailAddr2; - unsigned long long RcvHdrTailAddr3; - unsigned long long RcvHdrTailAddr4; - unsigned long long Reserved8RHTA[11]; - unsigned long long Reserved9SW[8]; - unsigned long long SerdesConfig0; - unsigned long long SerdesConfig1; - unsigned long long SerdesStatus; - unsigned long long XGXSConfig; - unsigned long long IBPLLCfg; - unsigned long long Reserved10SW2[3]; - unsigned long long PCIEQ0SerdesConfig0; - unsigned long long PCIEQ0SerdesConfig1; - unsigned long long PCIEQ0SerdesStatus; - unsigned long long Reserved11; - unsigned long long PCIEQ1SerdesConfig0; - unsigned long long PCIEQ1SerdesConfig1; - unsigned long long PCIEQ1SerdesStatus; - unsigned long long Reserved12; -}; - -struct _infinipath_do_not_use_counters { - __u64 LBIntCnt; - __u64 LBFlowStallCnt; - __u64 Reserved1; - __u64 TxUnsupVLErrCnt; - __u64 TxDataPktCnt; - __u64 TxFlowPktCnt; - __u64 TxDwordCnt; - __u64 TxLenErrCnt; - __u64 TxMaxMinLenErrCnt; - __u64 TxUnderrunCnt; - __u64 TxFlowStallCnt; - __u64 TxDroppedPktCnt; - __u64 RxDroppedPktCnt; - __u64 RxDataPktCnt; - __u64 RxFlowPktCnt; - __u64 RxDwordCnt; - __u64 RxLenErrCnt; - __u64 RxMaxMinLenErrCnt; - __u64 RxICRCErrCnt; - __u64 RxVCRCErrCnt; - __u64 RxFlowCtrlErrCnt; - __u64 RxBadFormatCnt; - __u64 RxLinkProblemCnt; - __u64 RxEBPCnt; - __u64 RxLPCRCErrCnt; - __u64 RxBufOvflCnt; - __u64 RxTIDFullErrCnt; - __u64 RxTIDValidErrCnt; - __u64 RxPKeyMismatchCnt; - __u64 RxP0HdrEgrOvflCnt; - __u64 RxP1HdrEgrOvflCnt; - __u64 RxP2HdrEgrOvflCnt; - __u64 RxP3HdrEgrOvflCnt; - __u64 RxP4HdrEgrOvflCnt; - __u64 RxP5HdrEgrOvflCnt; - __u64 RxP6HdrEgrOvflCnt; - __u64 RxP7HdrEgrOvflCnt; - __u64 RxP8HdrEgrOvflCnt; - __u64 Reserved6; - __u64 Reserved7; - __u64 IBStatusChangeCnt; - __u64 IBLinkErrRecoveryCnt; - __u64 IBLinkDownedCnt; - __u64 IBSymbolErrCnt; -}; - -#define IPATH_KREG_OFFSET(field) (offsetof( \ - struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) -#define IPATH_CREG_OFFSET(field) (offsetof( \ - struct _infinipath_do_not_use_counters, field) / sizeof(u64)) - -static const struct ipath_kregs ipath_pe_kregs = { - .kr_control = IPATH_KREG_OFFSET(Control), - .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), - .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), - .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), - .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), - .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), - .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), - .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), - .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), - .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), - .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), - .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), - .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), - .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), - .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), - .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), - .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), - .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), - .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), - .kr_intclear = IPATH_KREG_OFFSET(IntClear), - .kr_intmask = IPATH_KREG_OFFSET(IntMask), - .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), - .kr_mdio = IPATH_KREG_OFFSET(MDIO), - .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), - .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), - .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), - .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), - .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), - .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), - .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), - .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), - .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), - .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), - .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), - .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), - .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), - .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), - .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), - .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), - .kr_revision = IPATH_KREG_OFFSET(Revision), - .kr_scratch = IPATH_KREG_OFFSET(Scratch), - .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), - .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), - .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr), - .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase), - .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt), - .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize), - .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), - .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), - .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), - .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), - .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0), - .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1), - .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), - .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), - .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), - - /* - * These should not be used directly via ipath_write_kreg64(), - * use them with ipath_write_kreg64_port(), - */ - .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), - .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), - - /* The rcvpktled register controls one of the debug port signals, so - * a packet activity LED can be connected to it. */ - .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), - .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0), - .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1), - .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0), - .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1), - .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus), - .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0), - .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1), - .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus) -}; - -static const struct ipath_cregs ipath_pe_cregs = { - .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), - .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), - .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), - .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), - .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), - .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), - .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), - .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), - .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), - .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), - .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), - .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), - .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), - .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), - .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), - .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), - .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), - .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), - .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), - .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), - .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), - .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), - .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), - .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), - .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), - .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), - .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), - .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), - .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), - .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), - .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), - .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), - .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) -}; - -/* kr_control bits */ -#define INFINIPATH_C_RESET 1U - -/* kr_intstatus, kr_intclear, kr_intmask bits */ -#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) -#define INFINIPATH_I_RCVURG_SHIFT 0 -#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) -#define INFINIPATH_I_RCVAVAIL_SHIFT 12 - -/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ -#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL -#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0 -#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL -#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL -#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL -#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL -#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL -#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL -#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL - -#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf -#define IBA6120_IBCS_LINKSTATE_SHIFT 4 - -/* kr_extstatus bits */ -#define INFINIPATH_EXTS_FREQSEL 0x2 -#define INFINIPATH_EXTS_SERDESSEL 0x4 -#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 -#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000 - -/* kr_xgxsconfig bits */ -#define INFINIPATH_XGXS_RESET 0x5ULL - -#define _IPATH_GPIO_SDA_NUM 1 -#define _IPATH_GPIO_SCL_NUM 0 - -#define IPATH_GPIO_SDA (1ULL << \ - (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) -#define IPATH_GPIO_SCL (1ULL << \ - (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) - -#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL -#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \ - ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1) -#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid)) -#define INFINIPATH_RT_IS_VALID(tid) \ - (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \ - ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK))) -#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */ -#define INFINIPATH_RT_ADDR_SHIFT 10 - -#define INFINIPATH_R_INTRAVAIL_SHIFT 16 -#define INFINIPATH_R_TAILUPD_SHIFT 31 - -/* 6120 specific hardware errors... */ -static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { - INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), - INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), - /* - * In practice, it's unlikely wthat we'll see PCIe PLL, or bus - * parity or memory parity error failures, because most likely we - * won't be able to talk to the core of the chip. Nonetheless, we - * might see them, if they are in parts of the PCIe core that aren't - * essential. - */ - INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), - INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), - INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), - INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), -}; - -#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ - INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ - << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) -#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ - << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) - -static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, - u32, unsigned long); - -/* - * On platforms using this chip, and not having ordered WC stores, we - * can get TXE parity errors due to speculative reads to the PIO buffers, - * and this, due to a chip bug can result in (many) false parity error - * reports. So it's a debug print on those, and an info print on systems - * where the speculative reads don't occur. - */ -static void ipath_pe_txe_recover(struct ipath_devdata *dd) -{ - if (ipath_unordered_wc()) - ipath_dbg("Recovering from TXE PIO parity error\n"); - else { - ++ipath_stats.sps_txeparity; - dev_info(&dd->pcidev->dev, - "Recovering from TXE PIO parity error\n"); - } -} - - -/** - * ipath_pe_handle_hwerrors - display hardware errors. - * @dd: the infinipath device - * @msg: the output buffer - * @msgl: the size of the output buffer - * - * Use same msg buffer as regular errors to avoid excessive stack - * use. Most hardware errors are catastrophic, but for right now, - * we'll print them and continue. We reuse the same message buffer as - * ipath_handle_errors() to avoid excessive stack usage. - */ -static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, - size_t msgl) -{ - ipath_err_t hwerrs; - u32 bits, ctrl; - int isfatal = 0; - char bitsmsg[64]; - int log_idx; - - hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); - if (!hwerrs) { - /* - * better than printing cofusing messages - * This seems to be related to clearing the crc error, or - * the pll error during init. - */ - ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); - return; - } else if (hwerrs == ~0ULL) { - ipath_dev_err(dd, "Read of hardware error status failed " - "(all bits set); ignoring\n"); - return; - } - ipath_stats.sps_hwerrs++; - - /* Always clear the error status register, except MEMBISTFAIL, - * regardless of whether we continue or stop using the chip. - * We want that set so we know it failed, even across driver reload. - * We'll still ignore it in the hwerrmask. We do this partly for - * diagnostics, but also for support */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, - hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); - - hwerrs &= dd->ipath_hwerrmask; - - /* We log some errors to EEPROM, check if we have any of those. */ - for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) - if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) - ipath_inc_eeprom_err(dd, log_idx, 1); - - /* - * make sure we get this much out, unless told to be quiet, - * or it's occurred within the last 5 seconds - */ - if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY | - RXE_EAGER_PARITY)) || - (ipath_debug & __IPATH_VERBDBG)) - dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " - "(cleared)\n", (unsigned long long) hwerrs); - dd->ipath_lasthwerror |= hwerrs; - - if (hwerrs & ~dd->ipath_hwe_bitsextant) - ipath_dev_err(dd, "hwerror interrupt with unknown errors " - "%llx set\n", (unsigned long long) - (hwerrs & ~dd->ipath_hwe_bitsextant)); - - ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); - if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { - /* - * parity errors in send memory are recoverable, - * just cancel the send (if indicated in * sendbuffererror), - * count the occurrence, unfreeze (if no other handled - * hardware error bits are set), and continue. They can - * occur if a processor speculative read is done to the PIO - * buffer while we are sending a packet, for example. - */ - if (hwerrs & TXE_PIO_PARITY) { - ipath_pe_txe_recover(dd); - hwerrs &= ~TXE_PIO_PARITY; - } - if (!hwerrs) { - static u32 freeze_cnt; - - freeze_cnt++; - ipath_dbg("Clearing freezemode on ignored or recovered " - "hardware error (%u)\n", freeze_cnt); - ipath_clear_freeze(dd); - } - } - - *msg = '\0'; - - if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { - strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", - msgl); - /* ignore from now on, so disable until driver reloaded */ - *dd->ipath_statusp |= IPATH_STATUS_HWERROR; - dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - ipath_format_hwerrors(hwerrs, - ipath_6120_hwerror_msgs, - sizeof(ipath_6120_hwerror_msgs)/ - sizeof(ipath_6120_hwerror_msgs[0]), - msg, msgl); - - if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK - << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { - bits = (u32) ((hwerrs >> - INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) & - INFINIPATH_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof bitsmsg, - "[PCIe Mem Parity Errs %x] ", bits); - strlcat(msg, bitsmsg, msgl); - } - -#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ - INFINIPATH_HWE_COREPLL_RFSLIP ) - - if (hwerrs & _IPATH_PLL_FAIL) { - snprintf(bitsmsg, sizeof bitsmsg, - "[PLL failed (%llx), InfiniPath hardware unusable]", - (unsigned long long) hwerrs & _IPATH_PLL_FAIL); - strlcat(msg, bitsmsg, msgl); - /* ignore from now on, so disable until driver reloaded */ - dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { - /* - * If it occurs, it is left masked since the external - * interface is unused - */ - dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - if (hwerrs) { - /* - * if any set that we aren't ignoring; only - * make the complaint once, in case it's stuck - * or recurring, and we get here multiple - * times. - */ - ipath_dev_err(dd, "%s hardware error\n", msg); - if (dd->ipath_flags & IPATH_INITTED) { - ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); - ipath_setup_pe_setextled(dd, - INFINIPATH_IBCS_L_STATE_DOWN, - INFINIPATH_IBCS_LT_STATE_DISABLED); - ipath_dev_err(dd, "Fatal Hardware Error (freeze " - "mode), no longer usable, SN %.16s\n", - dd->ipath_serial); - isfatal = 1; - } - *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; - /* mark as having had error */ - *dd->ipath_statusp |= IPATH_STATUS_HWERROR; - /* - * mark as not usable, at a minimum until driver - * is reloaded, probably until reboot, since no - * other reset is possible. - */ - dd->ipath_flags &= ~IPATH_INITTED; - } else - *msg = 0; /* recovered from all of them */ - - if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) { - /* - * for /sys status file ; if no trailing brace is copied, - * we'll know it was truncated. - */ - snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, - "{%s}", msg); - } -} - -/** - * ipath_pe_boardname - fill in the board name - * @dd: the infinipath device - * @name: the output buffer - * @namelen: the size of the output buffer - * - * info is based on the board revision register - */ -static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, - size_t namelen) -{ - char *n = NULL; - u8 boardrev = dd->ipath_boardrev; - int ret; - - switch (boardrev) { - case 0: - n = "InfiniPath_Emulation"; - break; - case 1: - n = "InfiniPath_QLE7140-Bringup"; - break; - case 2: - n = "InfiniPath_QLE7140"; - break; - case 3: - n = "InfiniPath_QMI7140"; - break; - case 4: - n = "InfiniPath_QEM7140"; - break; - case 5: - n = "InfiniPath_QMH7140"; - break; - case 6: - n = "InfiniPath_QLE7142"; - break; - default: - ipath_dev_err(dd, - "Don't yet know about board with ID %u\n", - boardrev); - snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", - boardrev); - break; - } - if (n) - snprintf(name, namelen, "%s", n); - - if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { - ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", - dd->ipath_majrev, dd->ipath_minrev); - ret = 1; - } else { - ret = 0; - if (dd->ipath_minrev >= 2) - dd->ipath_f_put_tid = ipath_pe_put_tid_2; - } - - /* - * set here, not in ipath_init_*_funcs because we have to do - * it after we can read chip registers. - */ - dd->ipath_ureg_align = - ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); - - return ret; -} - -/** - * ipath_pe_init_hwerrors - enable hardware errors - * @dd: the infinipath device - * - * now that we have finished initializing everything that might reasonably - * cause a hardware error, and cleared those errors bits as they occur, - * we can enable hardware errors in the mask (potentially enabling - * freeze mode), and enable hardware errors as errors (along with - * everything else) in errormask - */ -static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) -{ - ipath_err_t val; - u64 extsval; - - extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); - - if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) - ipath_dev_err(dd, "MemBIST did not complete!\n"); - if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND) - ipath_dbg("MemBIST corrected\n"); - - val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ - - if (!dd->ipath_boardrev) // no PLL for Emulator - val &= ~INFINIPATH_HWE_SERDESPLLFAILED; - - if (dd->ipath_minrev < 2) { - /* workaround bug 9460 in internal interface bus parity - * checking. Fixed (HW bug 9490) in Rev2. - */ - val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM; - } - dd->ipath_hwerrmask = val; -} - -/** - * ipath_pe_bringup_serdes - bring up the serdes - * @dd: the infinipath device - */ -static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) -{ - u64 val, config1, prev_val; - int ret = 0; - - ipath_dbg("Trying to bringup serdes\n"); - - if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & - INFINIPATH_HWE_SERDESPLLFAILED) { - ipath_dbg("At start, serdes PLL failed bit set " - "in hwerrstatus, clearing and continuing\n"); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, - INFINIPATH_HWE_SERDESPLLFAILED); - } - - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); - - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); - config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); - - ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, " - "xgxsconfig %llx\n", (unsigned long long) val, - (unsigned long long) config1, (unsigned long long) - ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); - - /* - * Force reset on, also set rxdetect enable. Must do before reading - * serdesstatus at least for simulation, or some of the bits in - * serdes status will come back as undefined and cause simulation - * failures - */ - val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN - | INFINIPATH_SERDC0_L1PWR_DN; - ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); - /* be sure chip saw it */ - ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - udelay(5); /* need pll reset set at least for a bit */ - /* - * after PLL is reset, set the per-lane Resets and TxIdle and - * clear the PLL reset and rxdetect (to get falling edge). - * Leave L1PWR bits set (permanently) - */ - val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL - | INFINIPATH_SERDC0_L1PWR_DN); - val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE; - ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets " - "and txidle (%llx)\n", (unsigned long long) val); - ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); - /* be sure chip saw it */ - ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - /* need PLL reset clear for at least 11 usec before lane - * resets cleared; give it a few more to be sure */ - udelay(15); - val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE); - - ipath_cdbg(VERBOSE, "Clearing lane resets and txidle " - "(writing %llx)\n", (unsigned long long) val); - ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); - /* be sure chip saw it */ - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); - prev_val = val; - if (val & INFINIPATH_XGXS_RESET) - val &= ~INFINIPATH_XGXS_RESET; - if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & - INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { - /* need to compensate for Tx inversion in partner */ - val &= ~(INFINIPATH_XGXS_RX_POL_MASK << - INFINIPATH_XGXS_RX_POL_SHIFT); - val |= dd->ipath_rx_pol_inv << - INFINIPATH_XGXS_RX_POL_SHIFT; - } - if (val != prev_val) - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); - - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); - - /* clear current and de-emphasis bits */ - config1 &= ~0x0ffffffff00ULL; - /* set current to 20ma */ - config1 |= 0x00000000000ULL; - /* set de-emphasis to -5.68dB */ - config1 |= 0x0cccc000000ULL; - ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1); - - ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx " - "config1=%llx, sstatus=%llx xgxs=%llx\n", - (unsigned long long) val, (unsigned long long) config1, - (unsigned long long) - ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus), - (unsigned long long) - ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); - - return ret; -} - -/** - * ipath_pe_quiet_serdes - set serdes to txidle - * @dd: the infinipath device - * Called when driver is being unloaded - */ -static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) -{ - u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); - - if (dd->ibsymdelta || dd->iblnkerrdelta || - dd->ibdeltainprog) { - u64 diagc; - /* enable counter writes */ - diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, - diagc | INFINIPATH_DC_COUNTERWREN); - - if (dd->ibsymdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - if (dd->ibdeltainprog) - val -= val - dd->ibsymsnap; - val -= dd->ibsymdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt, val); - } - if (dd->iblnkerrdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - if (dd->ibdeltainprog) - val -= val - dd->iblnkerrsnap; - val -= dd->iblnkerrdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt, val); - } - - /* and disable counter writes */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); - } - val |= INFINIPATH_SERDC0_TXIDLE; - ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", - (unsigned long long) val); - ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); -} - -static int ipath_pe_intconfig(struct ipath_devdata *dd) -{ - u32 chiprev; - - /* - * If the chip supports added error indication via GPIO pins, - * enable interrupts on those bits so the interrupt routine - * can count the events. Also set flag so interrupt routine - * can know they are expected. - */ - chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT; - if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { - /* Rev2+ reports extra errors via internal GPIO pins */ - dd->ipath_flags |= IPATH_GPIO_ERRINTRS; - dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; - ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, - dd->ipath_gpio_mask); - } - return 0; -} - -/** - * ipath_setup_pe_setextled - set the state of the two external LEDs - * @dd: the infinipath device - * @lst: the L state - * @ltst: the LT state - - * These LEDs indicate the physical and logical state of IB link. - * For this chip (at least with recommended board pinouts), LED1 - * is Yellow (logical state) and LED2 is Green (physical state), - * - * Note: We try to match the Mellanox HCA LED behavior as best - * we can. Green indicates physical link state is OK (something is - * plugged in, and we can train). - * Amber indicates the link is logically up (ACTIVE). - * Mellanox further blinks the amber LED to indicate data packet - * activity, but we have no hardware support for that, so it would - * require waking up every 10-20 msecs and checking the counters - * on the chip, and then turning the LED off if appropriate. That's - * visible overhead, so not something we will do. - * - */ -static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst, - u64 ltst) -{ - u64 extctl; - unsigned long flags = 0; - - /* the diags use the LED to indicate diag info, so we leave - * the external LED alone when the diags are running */ - if (ipath_diag_inuse) - return; - - /* Allow override of LED display for, e.g. Locating system in rack */ - if (dd->ipath_led_override) { - ltst = (dd->ipath_led_override & IPATH_LED_PHYS) - ? INFINIPATH_IBCS_LT_STATE_LINKUP - : INFINIPATH_IBCS_LT_STATE_DISABLED; - lst = (dd->ipath_led_override & IPATH_LED_LOG) - ? INFINIPATH_IBCS_L_STATE_ACTIVE - : INFINIPATH_IBCS_L_STATE_DOWN; - } - - spin_lock_irqsave(&dd->ipath_gpio_lock, flags); - extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | - INFINIPATH_EXTC_LED2PRIPORT_ON); - - if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) - extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; - if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) - extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; - dd->ipath_extctrl = extctl; - ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); - spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); -} - -/** - * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff - * @dd: the infinipath device - * - * This is called during driver unload. - * We do the pci_disable_msi here, not in generic code, because it - * isn't used for the HT chips. If we do end up needing pci_enable_msi - * at some point in the future for HT, we'll move the call back - * into the main init_one code. - */ -static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) -{ - dd->ipath_msi_lo = 0; /* just in case unload fails */ - pci_disable_msi(dd->pcidev); -} - -static void ipath_6120_pcie_params(struct ipath_devdata *dd) -{ - u16 linkstat, speed; - int pos; - - pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); - if (!pos) { - ipath_dev_err(dd, "Can't find PCI Express capability!\n"); - goto bail; - } - - pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA, - &linkstat); - /* - * speed is bits 0-4, linkwidth is bits 4-8 - * no defines for them in headers - */ - speed = linkstat & 0xf; - linkstat >>= 4; - linkstat &= 0x1f; - dd->ipath_lbus_width = linkstat; - - switch (speed) { - case 1: - dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */ - break; - case 2: - dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */ - break; - default: /* not defined, assume gen1 */ - dd->ipath_lbus_speed = 2500; - break; - } - - if (linkstat < 8) - ipath_dev_err(dd, - "PCIe width %u (x8 HCA), performance reduced\n", - linkstat); - else - ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n", - dd->ipath_lbus_speed, linkstat); - - if (speed != 1) - ipath_dev_err(dd, - "PCIe linkspeed %u is incorrect; " - "should be 1 (2500)!\n", speed); -bail: - /* fill in string, even on errors */ - snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info), - "PCIe,%uMHz,x%u\n", - dd->ipath_lbus_speed, - dd->ipath_lbus_width); - - return; -} - -/** - * ipath_setup_pe_config - setup PCIe config related stuff - * @dd: the infinipath device - * @pdev: the PCI device - * - * The pci_enable_msi() call will fail on systems with MSI quirks - * such as those with AMD8131, even if the device of interest is not - * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed - * late in 2.6.16). - * All that can be done is to edit the kernel source to remove the quirk - * check until that is fixed. - * We do not need to call enable_msi() for our HyperTransport chip, - * even though it uses MSI, and we want to avoid the quirk warning, so - * So we call enable_msi only for PCIe. If we do end up needing - * pci_enable_msi at some point in the future for HT, we'll move the - * call back into the main init_one code. - * We save the msi lo and hi values, so we can restore them after - * chip reset (the kernel PCI infrastructure doesn't yet handle that - * correctly). - */ -static int ipath_setup_pe_config(struct ipath_devdata *dd, - struct pci_dev *pdev) -{ - int pos, ret; - - dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ - ret = pci_enable_msi(dd->pcidev); - if (ret) - ipath_dev_err(dd, "pci_enable_msi failed: %d, " - "interrupts may not work\n", ret); - /* continue even if it fails, we may still be OK... */ - dd->ipath_irq = pdev->irq; - - if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { - u16 control; - pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, - &dd->ipath_msi_lo); - pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, - &dd->ipath_msi_hi); - pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, - &control); - /* now save the data (vector) info */ - pci_read_config_word(dd->pcidev, - pos + ((control & PCI_MSI_FLAGS_64BIT) - ? 12 : 8), - &dd->ipath_msi_data); - ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset " - "0x%x, control=0x%x\n", dd->ipath_msi_data, - pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), - control); - /* we save the cachelinesize also, although it doesn't - * really matter */ - pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, - &dd->ipath_pci_cacheline); - } else - ipath_dev_err(dd, "Can't find MSI capability, " - "can't save MSI settings for reset\n"); - - ipath_6120_pcie_params(dd); - - dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; - dd->ipath_link_speed_supported = IPATH_IB_SDR; - dd->ipath_link_width_enabled = IB_WIDTH_4X; - dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported; - /* these can't change for this chip, so set once */ - dd->ipath_link_width_active = dd->ipath_link_width_enabled; - dd->ipath_link_speed_active = dd->ipath_link_speed_enabled; - return 0; -} - -static void ipath_init_pe_variables(struct ipath_devdata *dd) -{ - /* - * setup the register offsets, since they are different for each - * chip - */ - dd->ipath_kregs = &ipath_pe_kregs; - dd->ipath_cregs = &ipath_pe_cregs; - - /* - * bits for selecting i2c direction and values, - * used for I2C serial flash - */ - dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; - dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; - dd->ipath_gpio_sda = IPATH_GPIO_SDA; - dd->ipath_gpio_scl = IPATH_GPIO_SCL; - - /* - * Fill in data for field-values that change in newer chips. - * We dynamically specify only the mask for LINKTRAININGSTATE - * and only the shift for LINKSTATE, as they are the only ones - * that change. Also precalculate the 3 link states of interest - * and the combined mask. - */ - dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT; - dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK; - dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK << - dd->ibcs_ls_shift) | dd->ibcs_lts_mask; - dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift); - dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift); - dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift); - - /* - * Fill in data for ibcc field-values that change in newer chips. - * We dynamically specify only the mask for LINKINITCMD - * and only the shift for LINKCMD and MAXPKTLEN, as they are - * the only ones that change. - */ - dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK; - dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT; - dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT; - - /* Fill in shifts for RcvCtrl. */ - dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT; - dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT; - dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT; - dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */ - - /* variables for sanity checking interrupt and errors */ - dd->ipath_hwe_bitsextant = - (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << - INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | - (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << - INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | - (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << - INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | - INFINIPATH_HWE_PCIE1PLLFAILED | - INFINIPATH_HWE_PCIE0PLLFAILED | - INFINIPATH_HWE_PCIEPOISONEDTLP | - INFINIPATH_HWE_PCIECPLTIMEOUT | - INFINIPATH_HWE_PCIEBUSPARITYXTLH | - INFINIPATH_HWE_PCIEBUSPARITYXADM | - INFINIPATH_HWE_PCIEBUSPARITYRADM | - INFINIPATH_HWE_MEMBISTFAILED | - INFINIPATH_HWE_COREPLL_FBSLIP | - INFINIPATH_HWE_COREPLL_RFSLIP | - INFINIPATH_HWE_SERDESPLLFAILED | - INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | - INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; - dd->ipath_i_bitsextant = - (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | - (INFINIPATH_I_RCVAVAIL_MASK << - INFINIPATH_I_RCVAVAIL_SHIFT) | - INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | - INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; - dd->ipath_e_bitsextant = - INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | - INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | - INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | - INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | - INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | - INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | - INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | - INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | - INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | - INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN | - INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN | - INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT | - INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | - INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED | - INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | - INFINIPATH_E_HARDWARE; - - dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; - dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; - dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT; - dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT; - - /* - * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. - * 2 is Some Misc, 3 is reserved for future. - */ - dd->ipath_eep_st_masks[0].hwerrs_to_log = - INFINIPATH_HWE_TXEMEMPARITYERR_MASK << - INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; - - /* Ignore errors in PIO/PBC on systems with unordered write-combining */ - if (ipath_unordered_wc()) - dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY; - - dd->ipath_eep_st_masks[1].hwerrs_to_log = - INFINIPATH_HWE_RXEMEMPARITYERR_MASK << - INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; - - dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET; - dd->delay_mult = 2; /* SDR, 4X, can't change */ -} - -/* setup the MSI stuff again after a reset. I'd like to just call - * pci_enable_msi() and request_irq() again, but when I do that, - * the MSI enable bit doesn't get set in the command word, and - * we switch to to a different interrupt vector, which is confusing, - * so I instead just do it all inline. Perhaps somehow can tie this - * into the PCIe hotplug support at some point - * Note, because I'm doing it all here, I don't call pci_disable_msi() - * or free_irq() at the start of ipath_setup_pe_reset(). - */ -static int ipath_reinit_msi(struct ipath_devdata *dd) -{ - int pos; - u16 control; - int ret; - - if (!dd->ipath_msi_lo) { - dev_info(&dd->pcidev->dev, "Can't restore MSI config, " - "initial setup failed?\n"); - ret = 0; - goto bail; - } - - if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { - ipath_dev_err(dd, "Can't find MSI capability, " - "can't restore MSI settings\n"); - ret = 0; - goto bail; - } - ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", - dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO); - pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, - dd->ipath_msi_lo); - ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", - dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI); - pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, - dd->ipath_msi_hi); - pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); - if (!(control & PCI_MSI_FLAGS_ENABLE)) { - ipath_cdbg(VERBOSE, "MSI control at off %x was %x, " - "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, - control, control | PCI_MSI_FLAGS_ENABLE); - control |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, - control); - } - /* now rewrite the data (vector) info */ - pci_write_config_word(dd->pcidev, pos + - ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), - dd->ipath_msi_data); - /* we restore the cachelinesize also, although it doesn't really - * matter */ - pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, - dd->ipath_pci_cacheline); - /* and now set the pci master bit again */ - pci_set_master(dd->pcidev); - ret = 1; - -bail: - return ret; -} - -/* This routine sleeps, so it can only be called from user context, not - * from interrupt context. If we need interrupt context, we can split - * it into two routines. -*/ -static int ipath_setup_pe_reset(struct ipath_devdata *dd) -{ - u64 val; - int i; - int ret; - u16 cmdval; - - pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval); - - /* Use ERROR so it shows up in logs, etc. */ - ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); - /* keep chip from being accessed in a few places */ - dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); - val = dd->ipath_control | INFINIPATH_C_RESET; - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); - mb(); - - for (i = 1; i <= 5; i++) { - int r; - /* allow MBIST, etc. to complete; longer on each retry. - * We sometimes get machine checks from bus timeout if no - * response, so for now, make it *really* long. - */ - msleep(1000 + (1 + i) * 2000); - if ((r = - pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, - dd->ipath_pcibar0))) - ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", - r); - if ((r = - pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, - dd->ipath_pcibar1))) - ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", - r); - /* now re-enable memory access */ - pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval); - if ((r = pci_enable_device(dd->pcidev))) - ipath_dev_err(dd, "pci_enable_device failed after " - "reset: %d\n", r); - /* - * whether it fully enabled or not, mark as present, - * again (but not INITTED) - */ - dd->ipath_flags |= IPATH_PRESENT; - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); - if (val == dd->ipath_revision) { - ipath_cdbg(VERBOSE, "Got matching revision " - "register %llx on try %d\n", - (unsigned long long) val, i); - ret = ipath_reinit_msi(dd); - goto bail; - } - /* Probably getting -1 back */ - ipath_dbg("Didn't get expected revision register, " - "got %llx, try %d\n", (unsigned long long) val, - i + 1); - } - ret = 0; /* failed */ - -bail: - if (ret) - ipath_6120_pcie_params(dd); - return ret; -} - -/** - * ipath_pe_put_tid - write a TID in chip - * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to update - * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected - * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing - * - * This exists as a separate routine to allow for special locking etc. - * It's used for both the full cleanup on exit, as well as the normal - * setup and teardown. - */ -static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, - u32 type, unsigned long pa) -{ - u32 __iomem *tidp32 = (u32 __iomem *)tidptr; - unsigned long flags = 0; /* keep gcc quiet */ - int tidx; - spinlock_t *tidlockp; - - if (!dd->ipath_kregbase) - return; - - if (pa != dd->ipath_tidinvalid) { - if (pa & ((1U << 11) - 1)) { - dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " - "not 2KB aligned!\n", pa); - return; - } - pa >>= 11; - /* paranoia check */ - if (pa & ~INFINIPATH_RT_ADDR_MASK) - ipath_dev_err(dd, - "BUG: Physical page address 0x%lx " - "has bits set in 31-29\n", pa); - - if (type == RCVHQ_RCV_TYPE_EAGER) - pa |= dd->ipath_tidtemplate; - else /* for now, always full 4KB page */ - pa |= 2 << 29; - } - - /* - * Workaround chip bug 9437 by writing the scratch register - * before and after the TID, and with an io write barrier. - * We use a spinlock around the writes, so they can't intermix - * with other TID (eager or expected) writes (the chip bug - * is triggered by back to back TID writes). Unfortunately, this - * call can be done from interrupt level for the port 0 eager TIDs, - * so we have to use irqsave locks. - */ - /* - * Assumes tidptr always > ipath_egrtidbase - * if type == RCVHQ_RCV_TYPE_EAGER. - */ - tidx = tidptr - dd->ipath_egrtidbase; - - tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt) - ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock; - spin_lock_irqsave(tidlockp, flags); - ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf); - writel(pa, tidp32); - ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef); - mmiowb(); - spin_unlock_irqrestore(tidlockp, flags); -} - -/** - * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher - * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to update - * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected - * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing - * - * This exists as a separate routine to allow for selection of the - * appropriate "flavor". The static calls in cleanup just use the - * revision-agnostic form, as they are not performance critical. - */ -static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr, - u32 type, unsigned long pa) -{ - u32 __iomem *tidp32 = (u32 __iomem *)tidptr; - u32 tidx; - - if (!dd->ipath_kregbase) - return; - - if (pa != dd->ipath_tidinvalid) { - if (pa & ((1U << 11) - 1)) { - dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " - "not 2KB aligned!\n", pa); - return; - } - pa >>= 11; - /* paranoia check */ - if (pa & ~INFINIPATH_RT_ADDR_MASK) - ipath_dev_err(dd, - "BUG: Physical page address 0x%lx " - "has bits set in 31-29\n", pa); - - if (type == RCVHQ_RCV_TYPE_EAGER) - pa |= dd->ipath_tidtemplate; - else /* for now, always full 4KB page */ - pa |= 2 << 29; - } - tidx = tidptr - dd->ipath_egrtidbase; - writel(pa, tidp32); - mmiowb(); -} - - -/** - * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager - * @dd: the infinipath device - * @port: the port - * - * clear all TID entries for a port, expected and eager. - * Used from ipath_close(). On this chip, TIDs are only 32 bits, - * not 64, but they are still on 64 bit boundaries, so tidbase - * is declared as u64 * for the pointer math, even though we write 32 bits - */ -static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port) -{ - u64 __iomem *tidbase; - unsigned long tidinv; - int i; - - if (!dd->ipath_kregbase) - return; - - ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); - - tidinv = dd->ipath_tidinvalid; - tidbase = (u64 __iomem *) - ((char __iomem *)(dd->ipath_kregbase) + - dd->ipath_rcvtidbase + - port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); - - for (i = 0; i < dd->ipath_rcvtidcnt; i++) - dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, - tidinv); - - tidbase = (u64 __iomem *) - ((char __iomem *)(dd->ipath_kregbase) + - dd->ipath_rcvegrbase + - port * dd->ipath_rcvegrcnt * sizeof(*tidbase)); - - for (i = 0; i < dd->ipath_rcvegrcnt; i++) - dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, - tidinv); -} - -/** - * ipath_pe_tidtemplate - setup constants for TID updates - * @dd: the infinipath device - * - * We setup stuff that we use a lot, to avoid calculating each time - */ -static void ipath_pe_tidtemplate(struct ipath_devdata *dd) -{ - u32 egrsize = dd->ipath_rcvegrbufsize; - - /* For now, we always allocate 4KB buffers (at init) so we can - * receive max size packets. We may want a module parameter to - * specify 2KB or 4KB and/or make be per port instead of per device - * for those who want to reduce memory footprint. Note that the - * ipath_rcvhdrentsize size must be large enough to hold the largest - * IB header (currently 96 bytes) that we expect to handle (plus of - * course the 2 dwords of RHF). - */ - if (egrsize == 2048) - dd->ipath_tidtemplate = 1U << 29; - else if (egrsize == 4096) - dd->ipath_tidtemplate = 2U << 29; - else { - egrsize = 4096; - dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize " - "%u, using %u\n", dd->ipath_rcvegrbufsize, - egrsize); - dd->ipath_tidtemplate = 2U << 29; - } - dd->ipath_tidinvalid = 0; -} - -static int ipath_pe_early_init(struct ipath_devdata *dd) -{ - dd->ipath_flags |= IPATH_4BYTE_TID; - if (ipath_unordered_wc()) - dd->ipath_flags |= IPATH_PIO_FLUSH_WC; - - /* - * For openfabrics, we need to be able to handle an IB header of - * 24 dwords. HT chip has arbitrary sized receive buffers, so we - * made them the same size as the PIO buffers. This chip does not - * handle arbitrary size buffers, so we need the header large enough - * to handle largest IB header, but still have room for a 2KB MTU - * standard IB packet. - */ - dd->ipath_rcvhdrentsize = 24; - dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; - dd->ipath_rhf_offset = 0; - dd->ipath_egrtidbase = (u64 __iomem *) - ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase); - - dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048; - /* - * the min() check here is currently a nop, but it may not always - * be, depending on just how we do ipath_rcvegrbufsize - */ - dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k : - dd->ipath_piosize2k, - dd->ipath_rcvegrbufsize + - (dd->ipath_rcvhdrentsize << 2)); - dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; - - /* - * We can request a receive interrupt for 1 or - * more packets from current offset. For now, we set this - * up for a single packet. - */ - dd->ipath_rhdrhead_intr_off = 1ULL<<32; - - ipath_get_eeprom_info(dd); - - return 0; -} - -int __attribute__((weak)) ipath_unordered_wc(void) -{ - return 0; -} - -/** - * ipath_init_pe_get_base_info - set chip-specific flags for user code - * @pd: the infinipath port - * @kbase: ipath_base_info pointer - * - * We set the PCIE flag because the lower bandwidth on PCIe vs - * HyperTransport can affect some user packet algorithms. - */ -static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) -{ - struct ipath_base_info *kinfo = kbase; - struct ipath_devdata *dd; - - if (ipath_unordered_wc()) { - kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER; - ipath_cdbg(PROC, "Intel processor, forcing WC order\n"); - } - else - ipath_cdbg(PROC, "Not Intel processor, WC ordered\n"); - - if (pd == NULL) - goto done; - - dd = pd->port_dd; - -done: - kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE | - IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED; - return 0; -} - -static void ipath_pe_free_irq(struct ipath_devdata *dd) -{ - free_irq(dd->ipath_irq, dd); - dd->ipath_irq = 0; -} - - -static struct ipath_message_header * -ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr) -{ - return (struct ipath_message_header *) - &rhf_addr[sizeof(u64) / sizeof(u32)]; -} - -static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports) -{ - dd->ipath_portcnt = - ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); - dd->ipath_p0_rcvegrcnt = - ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); -} - -static void ipath_pe_read_counters(struct ipath_devdata *dd, - struct infinipath_counters *cntrs) -{ - cntrs->LBIntCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt)); - cntrs->LBFlowStallCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt)); - cntrs->TxSDmaDescCnt = 0; - cntrs->TxUnsupVLErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt)); - cntrs->TxDataPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt)); - cntrs->TxFlowPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt)); - cntrs->TxDwordCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt)); - cntrs->TxLenErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt)); - cntrs->TxMaxMinLenErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt)); - cntrs->TxUnderrunCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt)); - cntrs->TxFlowStallCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt)); - cntrs->TxDroppedPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt)); - cntrs->RxDroppedPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt)); - cntrs->RxDataPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt)); - cntrs->RxFlowPktCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt)); - cntrs->RxDwordCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt)); - cntrs->RxLenErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt)); - cntrs->RxMaxMinLenErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt)); - cntrs->RxICRCErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt)); - cntrs->RxVCRCErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt)); - cntrs->RxFlowCtrlErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt)); - cntrs->RxBadFormatCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt)); - cntrs->RxLinkProblemCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt)); - cntrs->RxEBPCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt)); - cntrs->RxLPCRCErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt)); - cntrs->RxBufOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt)); - cntrs->RxTIDFullErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt)); - cntrs->RxTIDValidErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt)); - cntrs->RxPKeyMismatchCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt)); - cntrs->RxP0HdrEgrOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt)); - cntrs->RxP1HdrEgrOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt)); - cntrs->RxP2HdrEgrOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt)); - cntrs->RxP3HdrEgrOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt)); - cntrs->RxP4HdrEgrOvflCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt)); - cntrs->RxP5HdrEgrOvflCnt = 0; - cntrs->RxP6HdrEgrOvflCnt = 0; - cntrs->RxP7HdrEgrOvflCnt = 0; - cntrs->RxP8HdrEgrOvflCnt = 0; - cntrs->RxP9HdrEgrOvflCnt = 0; - cntrs->RxP10HdrEgrOvflCnt = 0; - cntrs->RxP11HdrEgrOvflCnt = 0; - cntrs->RxP12HdrEgrOvflCnt = 0; - cntrs->RxP13HdrEgrOvflCnt = 0; - cntrs->RxP14HdrEgrOvflCnt = 0; - cntrs->RxP15HdrEgrOvflCnt = 0; - cntrs->RxP16HdrEgrOvflCnt = 0; - cntrs->IBStatusChangeCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt)); - cntrs->IBLinkErrRecoveryCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt)); - cntrs->IBLinkDownedCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt)); - cntrs->IBSymbolErrCnt = - ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt)); - cntrs->RxVL15DroppedPktCnt = 0; - cntrs->RxOtherLocalPhyErrCnt = 0; - cntrs->PcieRetryBufDiagQwordCnt = 0; - cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs; - cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs; - cntrs->RxVlErrCnt = 0; - cntrs->RxDlidFltrCnt = 0; -} - - -/* no interrupt fallback for these chips */ -static int ipath_pe_nointr_fallback(struct ipath_devdata *dd) -{ - return 0; -} - - -/* - * reset the XGXS (between serdes and IBC). Slightly less intrusive - * than resetting the IBC or external link state, and useful in some - * cases to cause some retraining. To do this right, we reset IBC - * as well. - */ -static void ipath_pe_xgxs_reset(struct ipath_devdata *dd) -{ - u64 val, prev_val; - - prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); - val = prev_val | INFINIPATH_XGXS_RESET; - prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, - dd->ipath_control & ~INFINIPATH_C_LINKENABLE); - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val); - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, - dd->ipath_control); -} - - -static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which) -{ - int ret; - - switch (which) { - case IPATH_IB_CFG_LWID: - ret = dd->ipath_link_width_active; - break; - case IPATH_IB_CFG_SPD: - ret = dd->ipath_link_speed_active; - break; - case IPATH_IB_CFG_LWID_ENB: - ret = dd->ipath_link_width_enabled; - break; - case IPATH_IB_CFG_SPD_ENB: - ret = dd->ipath_link_speed_enabled; - break; - default: - ret = -ENOTSUPP; - break; - } - return ret; -} - - -/* we assume range checking is already done, if needed */ -static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val) -{ - int ret = 0; - - if (which == IPATH_IB_CFG_LWID_ENB) - dd->ipath_link_width_enabled = val; - else if (which == IPATH_IB_CFG_SPD_ENB) - dd->ipath_link_speed_enabled = val; - else - ret = -ENOTSUPP; - return ret; -} - -static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b) -{ -} - - -static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) -{ - if (ibup) { - if (dd->ibdeltainprog) { - dd->ibdeltainprog = 0; - dd->ibsymdelta += - ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt) - - dd->ibsymsnap; - dd->iblnkerrdelta += - ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt) - - dd->iblnkerrsnap; - } - } else { - dd->ipath_lli_counter = 0; - if (!dd->ibdeltainprog) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } - } - - ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), - ipath_ib_linktrstate(dd, ibcs)); - return 0; -} - - -/** - * ipath_init_iba6120_funcs - set up the chip-specific function pointers - * @dd: the infinipath device - * - * This is global, and is called directly at init to set up the - * chip-specific function pointers for later use. - */ -void ipath_init_iba6120_funcs(struct ipath_devdata *dd) -{ - dd->ipath_f_intrsetup = ipath_pe_intconfig; - dd->ipath_f_bus = ipath_setup_pe_config; - dd->ipath_f_reset = ipath_setup_pe_reset; - dd->ipath_f_get_boardname = ipath_pe_boardname; - dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors; - dd->ipath_f_early_init = ipath_pe_early_init; - dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors; - dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes; - dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; - dd->ipath_f_clear_tids = ipath_pe_clear_tids; - /* - * _f_put_tid may get changed after we read the chip revision, - * but we start with the safe version for all revs - */ - dd->ipath_f_put_tid = ipath_pe_put_tid; - dd->ipath_f_cleanup = ipath_setup_pe_cleanup; - dd->ipath_f_setextled = ipath_setup_pe_setextled; - dd->ipath_f_get_base_info = ipath_pe_get_base_info; - dd->ipath_f_free_irq = ipath_pe_free_irq; - dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; - dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback; - dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset; - dd->ipath_f_get_msgheader = ipath_pe_get_msgheader; - dd->ipath_f_config_ports = ipath_pe_config_ports; - dd->ipath_f_read_counters = ipath_pe_read_counters; - dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg; - dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg; - dd->ipath_f_config_jint = ipath_pe_config_jint; - dd->ipath_f_ib_updown = ipath_pe_ib_updown; - - - /* initialize chip-specific variables */ - ipath_init_pe_variables(dd); -} - diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c deleted file mode 100644 index 34b778ed97fc..000000000000 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ /dev/null @@ -1,2631 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. - * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -/* - * This file contains all of the code that is specific to the - * InfiniPath 7220 chip (except that specific to the SerDes) - */ - -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <rdma/ib_verbs.h> - -#include "ipath_kernel.h" -#include "ipath_registers.h" -#include "ipath_7220.h" - -static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64); - -static unsigned ipath_compat_ddr_negotiate = 1; - -module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint, - S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(compat_ddr_negotiate, - "Attempt pre-IBTA 1.2 DDR speed negotiation"); - -static unsigned ipath_sdma_fetch_arb = 1; -module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO); -MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); - -/* - * This file contains almost all the chip-specific register information and - * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the - * exception of SerDes support, which in in ipath_sd7220.c. - * - * This lists the InfiniPath registers, in the actual chip layout. - * This structure should never be directly accessed. - */ -struct _infinipath_do_not_use_kernel_regs { - unsigned long long Revision; - unsigned long long Control; - unsigned long long PageAlign; - unsigned long long PortCnt; - unsigned long long DebugPortSelect; - unsigned long long DebugSigsIntSel; /* was Reserved0;*/ - unsigned long long SendRegBase; - unsigned long long UserRegBase; - unsigned long long CounterRegBase; - unsigned long long Scratch; - unsigned long long EEPROMAddrCmd; /* was Reserved1; */ - unsigned long long EEPROMData; /* was Reserved2; */ - unsigned long long IntBlocked; - unsigned long long IntMask; - unsigned long long IntStatus; - unsigned long long IntClear; - unsigned long long ErrorMask; - unsigned long long ErrorStatus; - unsigned long long ErrorClear; - unsigned long long HwErrMask; - unsigned long long HwErrStatus; - unsigned long long HwErrClear; - unsigned long long HwDiagCtrl; - unsigned long long MDIO; - unsigned long long IBCStatus; - unsigned long long IBCCtrl; - unsigned long long ExtStatus; - unsigned long long ExtCtrl; - unsigned long long GPIOOut; - unsigned long long GPIOMask; - unsigned long long GPIOStatus; - unsigned long long GPIOClear; - unsigned long long RcvCtrl; - unsigned long long RcvBTHQP; - unsigned long long RcvHdrSize; - unsigned long long RcvHdrCnt; - unsigned long long RcvHdrEntSize; - unsigned long long RcvTIDBase; - unsigned long long RcvTIDCnt; - unsigned long long RcvEgrBase; - unsigned long long RcvEgrCnt; - unsigned long long RcvBufBase; - unsigned long long RcvBufSize; - unsigned long long RxIntMemBase; - unsigned long long RxIntMemSize; - unsigned long long RcvPartitionKey; - unsigned long long RcvQPMulticastPort; - unsigned long long RcvPktLEDCnt; - unsigned long long IBCDDRCtrl; - unsigned long long HRTBT_GUID; - unsigned long long IB_SDTEST_IF_TX; - unsigned long long IB_SDTEST_IF_RX; - unsigned long long IBCDDRCtrl2; - unsigned long long IBCDDRStatus; - unsigned long long JIntReload; - unsigned long long IBNCModeCtrl; - unsigned long long SendCtrl; - unsigned long long SendBufBase; - unsigned long long SendBufSize; - unsigned long long SendBufCnt; - unsigned long long SendAvailAddr; - unsigned long long TxIntMemBase; - unsigned long long TxIntMemSize; - unsigned long long SendDmaBase; - unsigned long long SendDmaLenGen; - unsigned long long SendDmaTail; - unsigned long long SendDmaHead; - unsigned long long SendDmaHeadAddr; - unsigned long long SendDmaBufMask0; - unsigned long long SendDmaBufMask1; - unsigned long long SendDmaBufMask2; - unsigned long long SendDmaStatus; - unsigned long long SendBufferError; - unsigned long long SendBufferErrorCONT1; - unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */ - unsigned long long Reserved6L[2]; - unsigned long long AvailUpdCount; - unsigned long long RcvHdrAddr0; - unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */ - unsigned long long Reserved7hdtl; /* Align next to 300 */ - unsigned long long RcvHdrTailAddr0; /* 300, like others */ - unsigned long long RcvHdrTailAddrs[16]; - unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */ - unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */ - unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */ - unsigned long long Reserved10sds; /* was SerdesStatus on */ - unsigned long long XGXSConfig; - unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */ - unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */ - unsigned long long EEPAddrCmd; - unsigned long long EEPData; - unsigned long long PcieEpbAccCtl; - unsigned long long PcieEpbTransCtl; - unsigned long long EfuseCtl; /* E-Fuse control */ - unsigned long long EfuseData[4]; - unsigned long long ProcMon; - /* this chip moves following two from previous 200, 208 */ - unsigned long long PCIeRBufTestReg0; - unsigned long long PCIeRBufTestReg1; - /* added for this chip */ - unsigned long long PCIeRBufTestReg2; - unsigned long long PCIeRBufTestReg3; - /* added for this chip, debug only */ - unsigned long long SPC_JTAG_ACCESS_REG; - unsigned long long LAControlReg; - unsigned long long GPIODebugSelReg; - unsigned long long DebugPortValueReg; - /* added for this chip, DMA */ - unsigned long long SendDmaBufUsed[3]; - unsigned long long SendDmaReqTagUsed; - /* - * added for this chip, EFUSE: note that these program 64-bit - * words 2 and 3 */ - unsigned long long efuse_pgm_data[2]; - unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */ - /* we have 30 regs for DDS and RXEQ in IB SERDES */ - unsigned long long SerDesDDSRXEQ[30]; - unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */ - /* added for LA debug support */ - unsigned long long LAMemory[32]; -}; - -struct _infinipath_do_not_use_counters { - __u64 LBIntCnt; - __u64 LBFlowStallCnt; - __u64 TxSDmaDescCnt; /* was Reserved1 */ - __u64 TxUnsupVLErrCnt; - __u64 TxDataPktCnt; - __u64 TxFlowPktCnt; - __u64 TxDwordCnt; - __u64 TxLenErrCnt; - __u64 TxMaxMinLenErrCnt; - __u64 TxUnderrunCnt; - __u64 TxFlowStallCnt; - __u64 TxDroppedPktCnt; - __u64 RxDroppedPktCnt; - __u64 RxDataPktCnt; - __u64 RxFlowPktCnt; - __u64 RxDwordCnt; - __u64 RxLenErrCnt; - __u64 RxMaxMinLenErrCnt; - __u64 RxICRCErrCnt; - __u64 RxVCRCErrCnt; - __u64 RxFlowCtrlErrCnt; - __u64 RxBadFormatCnt; - __u64 RxLinkProblemCnt; - __u64 RxEBPCnt; - __u64 RxLPCRCErrCnt; - __u64 RxBufOvflCnt; - __u64 RxTIDFullErrCnt; - __u64 RxTIDValidErrCnt; - __u64 RxPKeyMismatchCnt; - __u64 RxP0HdrEgrOvflCnt; - __u64 RxP1HdrEgrOvflCnt; - __u64 RxP2HdrEgrOvflCnt; - __u64 RxP3HdrEgrOvflCnt; - __u64 RxP4HdrEgrOvflCnt; - __u64 RxP5HdrEgrOvflCnt; - __u64 RxP6HdrEgrOvflCnt; - __u64 RxP7HdrEgrOvflCnt; - __u64 RxP8HdrEgrOvflCnt; - __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */ - __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */ - __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */ - __u64 IBStatusChangeCnt; - __u64 IBLinkErrRecoveryCnt; - __u64 IBLinkDownedCnt; - __u64 IBSymbolErrCnt; - /* The following are new for IBA7220 */ - __u64 RxVL15DroppedPktCnt; - __u64 RxOtherLocalPhyErrCnt; - __u64 PcieRetryBufDiagQwordCnt; - __u64 ExcessBufferOvflCnt; - __u64 LocalLinkIntegrityErrCnt; - __u64 RxVlErrCnt; - __u64 RxDlidFltrCnt; - __u64 Reserved8[7]; - __u64 PSStat; - __u64 PSStart; - __u64 PSInterval; - __u64 PSRcvDataCount; - __u64 PSRcvPktsCount; - __u64 PSXmitDataCount; - __u64 PSXmitPktsCount; - __u64 PSXmitWaitCount; -}; - -#define IPATH_KREG_OFFSET(field) (offsetof( \ - struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) -#define IPATH_CREG_OFFSET(field) (offsetof( \ - struct _infinipath_do_not_use_counters, field) / sizeof(u64)) - -static const struct ipath_kregs ipath_7220_kregs = { - .kr_control = IPATH_KREG_OFFSET(Control), - .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), - .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), - .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), - .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), - .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), - .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), - .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), - .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), - .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), - .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), - .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), - .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), - .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), - .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), - .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), - .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), - .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), - .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), - .kr_intclear = IPATH_KREG_OFFSET(IntClear), - .kr_intmask = IPATH_KREG_OFFSET(IntMask), - .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), - .kr_mdio = IPATH_KREG_OFFSET(MDIO), - .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), - .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), - .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), - .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), - .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), - .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), - .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), - .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), - .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), - .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), - .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), - .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), - .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), - .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), - .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), - .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), - .kr_revision = IPATH_KREG_OFFSET(Revision), - .kr_scratch = IPATH_KREG_OFFSET(Scratch), - .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), - .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), - .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr), - .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase), - .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt), - .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize), - .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), - .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), - .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), - .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), - - .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), - - /* send dma related regs */ - .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase), - .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen), - .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail), - .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead), - .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr), - .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0), - .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1), - .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2), - .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus), - - /* SerDes related regs */ - .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl), - .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl), - .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg), - .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl), - .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl), - .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ), - - /* - * These should not be used directly via ipath_read_kreg64(), - * use them with ipath_read_kreg64_port() - */ - .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), - .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), - - /* - * The rcvpktled register controls one of the debug port signals, so - * a packet activity LED can be connected to it. - */ - .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), - .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0), - .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1), - - .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID), - .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl), - .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus), - .kr_jintreload = IPATH_KREG_OFFSET(JIntReload) -}; - -static const struct ipath_cregs ipath_7220_cregs = { - .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), - .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), - .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), - .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), - .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), - .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), - .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), - .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), - .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), - .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), - .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), - .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), - .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), - .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), - .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), - .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), - .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), - .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), - .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), - .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), - .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), - .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), - .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), - .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), - .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), - .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), - .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), - .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), - .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), - .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), - .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), - .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), - .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt), - .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt), - .cr_rxotherlocalphyerrcnt = - IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt), - .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt), - .cr_locallinkintegrityerrcnt = - IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt), - .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt), - .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt), - .cr_psstat = IPATH_CREG_OFFSET(PSStat), - .cr_psstart = IPATH_CREG_OFFSET(PSStart), - .cr_psinterval = IPATH_CREG_OFFSET(PSInterval), - .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount), - .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount), - .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount), - .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount), - .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount), -}; - -/* kr_control bits */ -#define INFINIPATH_C_RESET (1U<<7) - -/* kr_intstatus, kr_intclear, kr_intmask bits */ -#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1) -#define INFINIPATH_I_RCVURG_SHIFT 32 -#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1) -#define INFINIPATH_I_RCVAVAIL_SHIFT 0 -#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27) - -/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ -#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL -#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0 -#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL -#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL -#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL -#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL -#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL -#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL -#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL -#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL -/* specific to this chip */ -#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL -#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL -#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL -#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL -#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL -#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL -#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL -#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL -#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL -#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL -#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL -#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL - -#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F -#define IBA7220_IBCS_LINKSTATE_SHIFT 5 -#define IBA7220_IBCS_LINKSPEED_SHIFT 8 -#define IBA7220_IBCS_LINKWIDTH_SHIFT 9 - -#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL -#define IBA7220_IBCC_LINKCMD_SHIFT 19 -#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21 - -/* kr_ibcddrctrl bits */ -#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL -#define IBA7220_IBC_DLIDLMC_SHIFT 32 -#define IBA7220_IBC_HRTBT_MASK 3 -#define IBA7220_IBC_HRTBT_SHIFT 16 -#define IBA7220_IBC_HRTBT_ENB 0x10000UL -#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) -#define IBA7220_IBC_LREV_MASK 1 -#define IBA7220_IBC_LREV_SHIFT 8 -#define IBA7220_IBC_RXPOL_MASK 1 -#define IBA7220_IBC_RXPOL_SHIFT 7 -#define IBA7220_IBC_WIDTH_SHIFT 5 -#define IBA7220_IBC_WIDTH_MASK 0x3 -#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT) -#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT) -#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT) -#define IBA7220_IBC_SPEED_AUTONEG (1<<1) -#define IBA7220_IBC_SPEED_SDR (1<<2) -#define IBA7220_IBC_SPEED_DDR (1<<3) -#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1) -#define IBA7220_IBC_IBTA_1_2_MASK (1) - -/* kr_ibcddrstatus */ -/* link latency shift is 0, don't bother defining */ -#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff - -/* kr_extstatus bits */ -#define INFINIPATH_EXTS_FREQSEL 0x2 -#define INFINIPATH_EXTS_SERDESSEL 0x4 -#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 -#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000 - -/* kr_xgxsconfig bits */ -#define INFINIPATH_XGXS_RESET 0x5ULL -#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63) - -/* kr_rcvpktledcnt */ -#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ -#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ - -#define _IPATH_GPIO_SDA_NUM 1 -#define _IPATH_GPIO_SCL_NUM 0 - -#define IPATH_GPIO_SDA (1ULL << \ - (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) -#define IPATH_GPIO_SCL (1ULL << \ - (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) - -#define IBA7220_R_INTRAVAIL_SHIFT 17 -#define IBA7220_R_TAILUPD_SHIFT 35 -#define IBA7220_R_PORTCFG_SHIFT 36 - -#define INFINIPATH_JINT_PACKETSHIFT 16 -#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0 -#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0 - -#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ - -/* - * the size bits give us 2^N, in KB units. 0 marks as invalid, - * and 7 is reserved. We currently use only 2KB and 4KB - */ -#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ -#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */ -#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */ -#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ - -#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ - -static char int_type[16] = "auto"; -module_param_string(interrupt_type, int_type, sizeof(int_type), 0444); -MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx"); - -/* packet rate matching delay; chip has support */ -static u8 rate_to_delay[2][2] = { - /* 1x, 4x */ - { 8, 2 }, /* SDR */ - { 4, 1 } /* DDR */ -}; - -/* 7220 specific hardware errors... */ -static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = { - INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), - INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), - /* - * In practice, it's unlikely wthat we'll see PCIe PLL, or bus - * parity or memory parity error failures, because most likely we - * won't be able to talk to the core of the chip. Nonetheless, we - * might see them, if they are in parts of the PCIe core that aren't - * essential. - */ - INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), - INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), - INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), - INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), - INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), - INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"), - INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"), - INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"), - INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"), - INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT, - "PCIe serdes Q0 no clock"), - INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT, - "PCIe serdes Q1 no clock"), - INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT, - "PCIe serdes Q2 no clock"), - INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT, - "PCIe serdes Q3 no clock"), - INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR, - "DDS RXEQ memory parity"), - INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"), - INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR, - "PCIe uC oct0 memory parity"), - INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR, - "PCIe uC oct1 memory parity"), -}; - -static void autoneg_work(struct work_struct *); - -/* - * the offset is different for different configured port numbers, since - * port0 is fixed in size, but others can vary. Make it a function to - * make the issue more obvious. -*/ -static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port) -{ - return port ? dd->ipath_p0_rcvegrcnt + - (port-1) * dd->ipath_rcvegrcnt : 0; -} - -static void ipath_7220_txe_recover(struct ipath_devdata *dd) -{ - ++ipath_stats.sps_txeparity; - - dev_info(&dd->pcidev->dev, - "Recovering from TXE PIO parity error\n"); - ipath_disarm_senderrbufs(dd); -} - - -/** - * ipath_7220_handle_hwerrors - display hardware errors. - * @dd: the infinipath device - * @msg: the output buffer - * @msgl: the size of the output buffer - * - * Use same msg buffer as regular errors to avoid excessive stack - * use. Most hardware errors are catastrophic, but for right now, - * we'll print them and continue. We reuse the same message buffer as - * ipath_handle_errors() to avoid excessive stack usage. - */ -static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg, - size_t msgl) -{ - ipath_err_t hwerrs; - u32 bits, ctrl; - int isfatal = 0; - char bitsmsg[64]; - int log_idx; - - hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); - if (!hwerrs) { - /* - * better than printing cofusing messages - * This seems to be related to clearing the crc error, or - * the pll error during init. - */ - ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); - goto bail; - } else if (hwerrs == ~0ULL) { - ipath_dev_err(dd, "Read of hardware error status failed " - "(all bits set); ignoring\n"); - goto bail; - } - ipath_stats.sps_hwerrs++; - - /* - * Always clear the error status register, except MEMBISTFAIL, - * regardless of whether we continue or stop using the chip. - * We want that set so we know it failed, even across driver reload. - * We'll still ignore it in the hwerrmask. We do this partly for - * diagnostics, but also for support. - */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, - hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); - - hwerrs &= dd->ipath_hwerrmask; - - /* We log some errors to EEPROM, check if we have any of those. */ - for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) - if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) - ipath_inc_eeprom_err(dd, log_idx, 1); - /* - * Make sure we get this much out, unless told to be quiet, - * or it's occurred within the last 5 seconds. - */ - if ((hwerrs & ~(dd->ipath_lasthwerror | - ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | - INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) - << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || - (ipath_debug & __IPATH_VERBDBG)) - dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " - "(cleared)\n", (unsigned long long) hwerrs); - dd->ipath_lasthwerror |= hwerrs; - - if (hwerrs & ~dd->ipath_hwe_bitsextant) - ipath_dev_err(dd, "hwerror interrupt with unknown errors " - "%llx set\n", (unsigned long long) - (hwerrs & ~dd->ipath_hwe_bitsextant)); - - if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) - ipath_sd7220_clr_ibpar(dd); - - ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); - if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { - /* - * Parity errors in send memory are recoverable by h/w - * just do housekeeping, exit freeze mode and continue. - */ - if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | - INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) - << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { - ipath_7220_txe_recover(dd); - hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | - INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) - << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); - } - if (hwerrs) { - /* - * If any set that we aren't ignoring only make the - * complaint once, in case it's stuck or recurring, - * and we get here multiple times - * Force link down, so switch knows, and - * LEDs are turned off. - */ - if (dd->ipath_flags & IPATH_INITTED) { - ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); - ipath_setup_7220_setextled(dd, - INFINIPATH_IBCS_L_STATE_DOWN, - INFINIPATH_IBCS_LT_STATE_DISABLED); - ipath_dev_err(dd, "Fatal Hardware Error " - "(freeze mode), no longer" - " usable, SN %.16s\n", - dd->ipath_serial); - isfatal = 1; - } - /* - * Mark as having had an error for driver, and also - * for /sys and status word mapped to user programs. - * This marks unit as not usable, until reset. - */ - *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; - *dd->ipath_statusp |= IPATH_STATUS_HWERROR; - dd->ipath_flags &= ~IPATH_INITTED; - } else { - ipath_dbg("Clearing freezemode on ignored or " - "recovered hardware error\n"); - ipath_clear_freeze(dd); - } - } - - *msg = '\0'; - - if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { - strlcat(msg, "[Memory BIST test failed, " - "InfiniPath hardware unusable]", msgl); - /* ignore from now on, so disable until driver reloaded */ - *dd->ipath_statusp |= IPATH_STATUS_HWERROR; - dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - ipath_format_hwerrors(hwerrs, - ipath_7220_hwerror_msgs, - ARRAY_SIZE(ipath_7220_hwerror_msgs), - msg, msgl); - - if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK - << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { - bits = (u32) ((hwerrs >> - INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) & - INFINIPATH_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof bitsmsg, - "[PCIe Mem Parity Errs %x] ", bits); - strlcat(msg, bitsmsg, msgl); - } - -#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ - INFINIPATH_HWE_COREPLL_RFSLIP) - - if (hwerrs & _IPATH_PLL_FAIL) { - snprintf(bitsmsg, sizeof bitsmsg, - "[PLL failed (%llx), InfiniPath hardware unusable]", - (unsigned long long) hwerrs & _IPATH_PLL_FAIL); - strlcat(msg, bitsmsg, msgl); - /* ignore from now on, so disable until driver reloaded */ - dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { - /* - * If it occurs, it is left masked since the eternal - * interface is unused. - */ - dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); - } - - ipath_dev_err(dd, "%s hardware error\n", msg); - /* - * For /sys status file. if no trailing } is copied, we'll - * know it was truncated. - */ - if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) - snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, - "{%s}", msg); -bail:; -} - -/** - * ipath_7220_boardname - fill in the board name - * @dd: the infinipath device - * @name: the output buffer - * @namelen: the size of the output buffer - * - * info is based on the board revision register - */ -static int ipath_7220_boardname(struct ipath_devdata *dd, char *name, - size_t namelen) -{ - char *n = NULL; - u8 boardrev = dd->ipath_boardrev; - int ret; - - if (boardrev == 15) { - /* - * Emulator sometimes comes up all-ones, rather than zero. - */ - boardrev = 0; - dd->ipath_boardrev = boardrev; - } - switch (boardrev) { - case 0: - n = "InfiniPath_7220_Emulation"; - break; - case 1: - n = "InfiniPath_QLE7240"; - break; - case 2: - n = "InfiniPath_QLE7280"; - break; - case 3: - n = "InfiniPath_QLE7242"; - break; - case 4: - n = "InfiniPath_QEM7240"; - break; - case 5: - n = "InfiniPath_QMI7240"; - break; - case 6: - n = "InfiniPath_QMI7264"; - break; - case 7: - n = "InfiniPath_QMH7240"; - break; - case 8: - n = "InfiniPath_QME7240"; - break; - case 9: - n = "InfiniPath_QLE7250"; - break; - case 10: - n = "InfiniPath_QLE7290"; - break; - case 11: - n = "InfiniPath_QEM7250"; - break; - case 12: - n = "InfiniPath_QLE-Bringup"; - break; - default: - ipath_dev_err(dd, - "Don't yet know about board with ID %u\n", - boardrev); - snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", - boardrev); - break; - } - if (n) - snprintf(name, namelen, "%s", n); - - if (dd->ipath_majrev != 5 || !dd->ipath_minrev || - dd->ipath_minrev > 2) { - ipath_dev_err(dd, "Unsupported InfiniPath hardware " - "revision %u.%u!\n", - dd->ipath_majrev, dd->ipath_minrev); - ret = 1; - } else if (dd->ipath_minrev == 1 && - !(dd->ipath_flags & IPATH_INITTED)) { - /* Rev1 chips are prototype. Complain at init, but allow use */ - ipath_dev_err(dd, "Unsupported hardware " - "revision %u.%u, Contact support@qlogic.com\n", - dd->ipath_majrev, dd->ipath_minrev); - ret = 0; - } else - ret = 0; - - /* - * Set here not in ipath_init_*_funcs because we have to do - * it after we can read chip registers. - */ - dd->ipath_ureg_align = 0x10000; /* 64KB alignment */ - - return ret; -} - -/** - * ipath_7220_init_hwerrors - enable hardware errors - * @dd: the infinipath device - * - * now that we have finished initializing everything that might reasonably - * cause a hardware error, and cleared those errors bits as they occur, - * we can enable hardware errors in the mask (potentially enabling - * freeze mode), and enable hardware errors as errors (along with - * everything else) in errormask - */ -static void ipath_7220_init_hwerrors(struct ipath_devdata *dd) -{ - ipath_err_t val; - u64 extsval; - - extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); - - if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST | - INFINIPATH_EXTS_MEMBIST_DISABLED))) - ipath_dev_err(dd, "MemBIST did not complete!\n"); - if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED) - dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n"); - - val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ - - if (!dd->ipath_boardrev) /* no PLL for Emulator */ - val &= ~INFINIPATH_HWE_SERDESPLLFAILED; - - if (dd->ipath_minrev == 1) - val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */ - - val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR; - dd->ipath_hwerrmask = val; - - /* - * special trigger "error" is for debugging purposes. It - * works around a processor/chipset problem. The error - * interrupt allows us to count occurrences, but we don't - * want to pay the overhead for normal use. Emulation only - */ - if (!dd->ipath_boardrev) - dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER; -} - -/* - * All detailed interaction with the SerDes has been moved to ipath_sd7220.c - * - * The portion of IBA7220-specific bringup_serdes() that actually deals with - * registers and memory within the SerDes itself is ipath_sd7220_init(). - */ - -/** - * ipath_7220_bringup_serdes - bring up the serdes - * @dd: the infinipath device - */ -static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) -{ - int ret = 0; - u64 val, prev_val, guid; - int was_reset; /* Note whether uC was reset */ - - ipath_dbg("Trying to bringup serdes\n"); - - if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & - INFINIPATH_HWE_SERDESPLLFAILED) { - ipath_dbg("At start, serdes PLL failed bit set " - "in hwerrstatus, clearing and continuing\n"); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, - INFINIPATH_HWE_SERDESPLLFAILED); - } - - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); - - if (!dd->ipath_ibcddrctrl) { - /* not on re-init after reset */ - dd->ipath_ibcddrctrl = - ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl); - - if (dd->ipath_link_speed_enabled == - (IPATH_IB_SDR | IPATH_IB_DDR)) - dd->ipath_ibcddrctrl |= - IBA7220_IBC_SPEED_AUTONEG_MASK | - IBA7220_IBC_IBTA_1_2_MASK; - else - dd->ipath_ibcddrctrl |= - dd->ipath_link_speed_enabled == IPATH_IB_DDR - ? IBA7220_IBC_SPEED_DDR : - IBA7220_IBC_SPEED_SDR; - if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X | - IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)) - dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; - else - dd->ipath_ibcddrctrl |= - dd->ipath_link_width_enabled == IB_WIDTH_4X - ? IBA7220_IBC_WIDTH_4X_ONLY : - IBA7220_IBC_WIDTH_1X_ONLY; - - /* always enable these on driver reload, not sticky */ - dd->ipath_ibcddrctrl |= - IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; - dd->ipath_ibcddrctrl |= - IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; - /* - * automatic lane reversal detection for receive - * doesn't work correctly in rev 1, so disable it - * on that rev, otherwise enable (disabling not - * sticky across reload for >rev1) - */ - if (dd->ipath_minrev == 1) - dd->ipath_ibcddrctrl &= - ~IBA7220_IBC_LANE_REV_SUPPORTED; - else - dd->ipath_ibcddrctrl |= - IBA7220_IBC_LANE_REV_SUPPORTED; - } - - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, - dd->ipath_ibcddrctrl); - - ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull); - - /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */ - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); - /* remember if uC was in Reset or not, for dactrim */ - was_reset = (val & 1); - ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n", - was_reset ? "Asserted" : "Negated", (unsigned long long) - ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); - - if (dd->ipath_boardrev) { - /* - * Hardware is not emulator, and may have been reset. Init it. - * Below will release reset, but needs to know if chip was - * originally in reset, to only trim DACs on first time - * after chip reset or powercycle (not driver reload) - */ - ret = ipath_sd7220_init(dd, was_reset); - } - - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); - prev_val = val; - val |= INFINIPATH_XGXS_FC_SAFE; - if (val != prev_val) { - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - } - if (val & INFINIPATH_XGXS_RESET) - val &= ~INFINIPATH_XGXS_RESET; - if (val != prev_val) - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); - - ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", - (unsigned long long) - ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), - (unsigned long long) prev_val); - - guid = be64_to_cpu(dd->ipath_guid); - - if (!guid) { - /* have to have something, so use likely unique tsc */ - guid = get_cycles(); - ipath_dbg("No GUID for heartbeat, faking %llx\n", - (unsigned long long)guid); - } else - ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", - (unsigned long long) guid); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); - return ret; -} - -static void ipath_7220_config_jint(struct ipath_devdata *dd, - u16 idle_ticks, u16 max_packets) -{ - - /* - * We can request a receive interrupt for 1 or more packets - * from current offset. - */ - if (idle_ticks == 0 || max_packets == 0) - /* interrupt after one packet if no mitigation */ - dd->ipath_rhdrhead_intr_off = - 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT; - else - /* Turn off RcvHdrHead interrupts if using mitigation */ - dd->ipath_rhdrhead_intr_off = 0ULL; - - /* refresh kernel RcvHdrHead registers... */ - ipath_write_ureg(dd, ur_rcvhdrhead, - dd->ipath_rhdrhead_intr_off | - dd->ipath_pd[0]->port_head, 0); - - dd->ipath_jint_max_packets = max_packets; - dd->ipath_jint_idle_ticks = idle_ticks; - ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload, - ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) | - idle_ticks); -} - -/** - * ipath_7220_quiet_serdes - set serdes to txidle - * @dd: the infinipath device - * Called when driver is being unloaded - */ -static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) -{ - u64 val; - if (dd->ibsymdelta || dd->iblnkerrdelta || - dd->ibdeltainprog) { - u64 diagc; - /* enable counter writes */ - diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, - diagc | INFINIPATH_DC_COUNTERWREN); - - if (dd->ibsymdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - if (dd->ibdeltainprog) - val -= val - dd->ibsymsnap; - val -= dd->ibsymdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt, val); - } - if (dd->iblnkerrdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - if (dd->ibdeltainprog) - val -= val - dd->iblnkerrsnap; - val -= dd->iblnkerrdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt, val); - } - - /* and disable counter writes */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); - } - - dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; - wake_up(&dd->ipath_autoneg_wait); - cancel_delayed_work(&dd->ipath_autoneg_work); - flush_scheduled_work(); - ipath_shutdown_relock_poll(dd); - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); - val |= INFINIPATH_XGXS_RESET; - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); -} - -static int ipath_7220_intconfig(struct ipath_devdata *dd) -{ - ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks, - dd->ipath_jint_max_packets); - return 0; -} - -/** - * ipath_setup_7220_setextled - set the state of the two external LEDs - * @dd: the infinipath device - * @lst: the L state - * @ltst: the LT state - * - * These LEDs indicate the physical and logical state of IB link. - * For this chip (at least with recommended board pinouts), LED1 - * is Yellow (logical state) and LED2 is Green (physical state), - * - * Note: We try to match the Mellanox HCA LED behavior as best - * we can. Green indicates physical link state is OK (something is - * plugged in, and we can train). - * Amber indicates the link is logically up (ACTIVE). - * Mellanox further blinks the amber LED to indicate data packet - * activity, but we have no hardware support for that, so it would - * require waking up every 10-20 msecs and checking the counters - * on the chip, and then turning the LED off if appropriate. That's - * visible overhead, so not something we will do. - * - */ -static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst, - u64 ltst) -{ - u64 extctl, ledblink = 0; - unsigned long flags = 0; - - /* the diags use the LED to indicate diag info, so we leave - * the external LED alone when the diags are running */ - if (ipath_diag_inuse) - return; - - /* Allow override of LED display for, e.g. Locating system in rack */ - if (dd->ipath_led_override) { - ltst = (dd->ipath_led_override & IPATH_LED_PHYS) - ? INFINIPATH_IBCS_LT_STATE_LINKUP - : INFINIPATH_IBCS_LT_STATE_DISABLED; - lst = (dd->ipath_led_override & IPATH_LED_LOG) - ? INFINIPATH_IBCS_L_STATE_ACTIVE - : INFINIPATH_IBCS_L_STATE_DOWN; - } - - spin_lock_irqsave(&dd->ipath_gpio_lock, flags); - extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | - INFINIPATH_EXTC_LED2PRIPORT_ON); - if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) { - extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; - /* - * counts are in chip clock (4ns) periods. - * This is 1/16 sec (66.6ms) on, - * 3/16 sec (187.5 ms) off, with packets rcvd - */ - ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT) - | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT); - } - if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) - extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; - dd->ipath_extctrl = extctl; - ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); - spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); - - if (ledblink) /* blink the LED on packet receive */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt, - ledblink); -} - -/* - * Similar to pci_intx(pdev, 1), except that we make sure - * msi is off... - */ -static void ipath_enable_intx(struct pci_dev *pdev) -{ - u16 cw, new; - int pos; - - /* first, turn on INTx */ - pci_read_config_word(pdev, PCI_COMMAND, &cw); - new = cw & ~PCI_COMMAND_INTX_DISABLE; - if (new != cw) - pci_write_config_word(pdev, PCI_COMMAND, new); - - /* then turn off MSI */ - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); - new = cw & ~PCI_MSI_FLAGS_ENABLE; - if (new != cw) - pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new); - } -} - -static int ipath_msi_enabled(struct pci_dev *pdev) -{ - int pos, ret = 0; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (pos) { - u16 cw; - - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); - ret = !!(cw & PCI_MSI_FLAGS_ENABLE); - } - return ret; -} - -/* - * disable msi interrupt if enabled, and clear the flag. - * flag is used primarily for the fallback to INTx, but - * is also used in reinit after reset as a flag. - */ -static void ipath_7220_nomsi(struct ipath_devdata *dd) -{ - dd->ipath_msi_lo = 0; - - if (ipath_msi_enabled(dd->pcidev)) { - /* - * free, but don't zero; later kernels require - * it be freed before disable_msi, so the intx - * setup has to request it again. - */ - if (dd->ipath_irq) - free_irq(dd->ipath_irq, dd); - pci_disable_msi(dd->pcidev); - } -} - -/* - * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff - * @dd: the infinipath device - * - * Nothing but msi interrupt cleanup for now. - * - * This is called during driver unload. - */ -static void ipath_setup_7220_cleanup(struct ipath_devdata *dd) -{ - ipath_7220_nomsi(dd); -} - - -static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev) -{ - u16 linkstat, minwidth, speed; - int pos; - - pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); - if (!pos) { - ipath_dev_err(dd, "Can't find PCI Express capability!\n"); - goto bail; - } - - pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA, - &linkstat); - /* - * speed is bits 0-4, linkwidth is bits 4-8 - * no defines for them in headers - */ - speed = linkstat & 0xf; - linkstat >>= 4; - linkstat &= 0x1f; - dd->ipath_lbus_width = linkstat; - switch (boardrev) { - case 0: - case 2: - case 10: - case 12: - minwidth = 16; /* x16 capable boards */ - break; - default: - minwidth = 8; /* x8 capable boards */ - break; - } - - switch (speed) { - case 1: - dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */ - break; - case 2: - dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */ - break; - default: /* not defined, assume gen1 */ - dd->ipath_lbus_speed = 2500; - break; - } - - if (linkstat < minwidth) - ipath_dev_err(dd, - "PCIe width %u (x%u HCA), performance " - "reduced\n", linkstat, minwidth); - else - ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n", - dd->ipath_lbus_speed, linkstat, minwidth); - - if (speed != 1) - ipath_dev_err(dd, - "PCIe linkspeed %u is incorrect; " - "should be 1 (2500)!\n", speed); - -bail: - /* fill in string, even on errors */ - snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info), - "PCIe,%uMHz,x%u\n", - dd->ipath_lbus_speed, - dd->ipath_lbus_width); - return; -} - - -/** - * ipath_setup_7220_config - setup PCIe config related stuff - * @dd: the infinipath device - * @pdev: the PCI device - * - * The pci_enable_msi() call will fail on systems with MSI quirks - * such as those with AMD8131, even if the device of interest is not - * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed - * late in 2.6.16). - * All that can be done is to edit the kernel source to remove the quirk - * check until that is fixed. - * We do not need to call enable_msi() for our HyperTransport chip, - * even though it uses MSI, and we want to avoid the quirk warning, so - * So we call enable_msi only for PCIe. If we do end up needing - * pci_enable_msi at some point in the future for HT, we'll move the - * call back into the main init_one code. - * We save the msi lo and hi values, so we can restore them after - * chip reset (the kernel PCI infrastructure doesn't yet handle that - * correctly). - */ -static int ipath_setup_7220_config(struct ipath_devdata *dd, - struct pci_dev *pdev) -{ - int pos, ret = -1; - u32 boardrev; - - dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto")) - ret = pci_enable_msi(pdev); - if (ret) { - if (!strcmp(int_type, "force_msi")) { - ipath_dev_err(dd, "pci_enable_msi failed: %d, " - "force_msi is on, so not continuing.\n", - ret); - return ret; - } - - ipath_enable_intx(pdev); - if (!strcmp(int_type, "auto")) - ipath_dev_err(dd, "pci_enable_msi failed: %d, " - "falling back to INTx\n", ret); - } else if (pos) { - u16 control; - pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, - &dd->ipath_msi_lo); - pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, - &dd->ipath_msi_hi); - pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, - &control); - /* now save the data (vector) info */ - pci_read_config_word(pdev, - pos + ((control & PCI_MSI_FLAGS_64BIT) - ? PCI_MSI_DATA_64 : - PCI_MSI_DATA_32), - &dd->ipath_msi_data); - } else - ipath_dev_err(dd, "Can't find MSI capability, " - "can't save MSI settings for reset\n"); - - dd->ipath_irq = pdev->irq; - - /* - * We save the cachelinesize also, although it doesn't - * really matter. - */ - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, - &dd->ipath_pci_cacheline); - - /* - * this function called early, ipath_boardrev not set yet. Can't - * use ipath_read_kreg64() yet, too early in init, so use readq() - */ - boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision]) - >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK; - - ipath_7220_pcie_params(dd, boardrev); - - dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA | - IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE; - dd->ipath_pioupd_thresh = 4U; /* set default update threshold */ - return 0; -} - -static void ipath_init_7220_variables(struct ipath_devdata *dd) -{ - /* - * setup the register offsets, since they are different for each - * chip - */ - dd->ipath_kregs = &ipath_7220_kregs; - dd->ipath_cregs = &ipath_7220_cregs; - - /* - * bits for selecting i2c direction and values, - * used for I2C serial flash - */ - dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; - dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; - dd->ipath_gpio_sda = IPATH_GPIO_SDA; - dd->ipath_gpio_scl = IPATH_GPIO_SCL; - - /* - * Fill in data for field-values that change in IBA7220. - * We dynamically specify only the mask for LINKTRAININGSTATE - * and only the shift for LINKSTATE, as they are the only ones - * that change. Also precalculate the 3 link states of interest - * and the combined mask. - */ - dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT; - dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK; - dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK << - dd->ibcs_ls_shift) | dd->ibcs_lts_mask; - dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift); - dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift); - dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP << - INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | - (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift); - - /* - * Fill in data for ibcc field-values that change in IBA7220. - * We dynamically specify only the mask for LINKINITCMD - * and only the shift for LINKCMD and MAXPKTLEN, as they are - * the only ones that change. - */ - dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK; - dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT; - dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT; - - /* Fill in shifts for RcvCtrl. */ - dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT; - dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT; - dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT; - dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT; - - /* variables for sanity checking interrupt and errors */ - dd->ipath_hwe_bitsextant = - (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << - INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | - (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << - INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | - (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << - INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | - INFINIPATH_HWE_PCIE1PLLFAILED | - INFINIPATH_HWE_PCIE0PLLFAILED | - INFINIPATH_HWE_PCIEPOISONEDTLP | - INFINIPATH_HWE_PCIECPLTIMEOUT | - INFINIPATH_HWE_PCIEBUSPARITYXTLH | - INFINIPATH_HWE_PCIEBUSPARITYXADM | - INFINIPATH_HWE_PCIEBUSPARITYRADM | - INFINIPATH_HWE_MEMBISTFAILED | - INFINIPATH_HWE_COREPLL_FBSLIP | - INFINIPATH_HWE_COREPLL_RFSLIP | - INFINIPATH_HWE_SERDESPLLFAILED | - INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | - INFINIPATH_HWE_IBCBUSFRSPCPARITYERR | - INFINIPATH_HWE_PCIECPLDATAQUEUEERR | - INFINIPATH_HWE_PCIECPLHDRQUEUEERR | - INFINIPATH_HWE_SDMAMEMREADERR | - INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED | - INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT | - INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT | - INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT | - INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT | - INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR | - INFINIPATH_HWE_IB_UC_MEMORYPARITYERR | - INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR | - INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR; - dd->ipath_i_bitsextant = - INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED | - (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | - (INFINIPATH_I_RCVAVAIL_MASK << - INFINIPATH_I_RCVAVAIL_SHIFT) | - INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | - INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO | - INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE; - dd->ipath_e_bitsextant = - INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | - INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | - INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | - INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | - INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | - INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | - INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | - INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | - INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | - INFINIPATH_E_SENDSPECIALTRIGGER | - INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN | - INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN | - INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT | - INFINIPATH_E_SDROPPEDDATAPKT | - INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | - INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE | - INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | - INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | - INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | - INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | - INFINIPATH_E_SDMAUNEXPDATA | - INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR | - INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE | - INFINIPATH_E_SDMADESCADDRMISALIGN | - INFINIPATH_E_INVALIDEEPCMD; - - dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; - dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; - dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT; - dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT; - dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED - | IPATH_HAS_LINK_LATENCY; - - /* - * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. - * 2 is Some Misc, 3 is reserved for future. - */ - dd->ipath_eep_st_masks[0].hwerrs_to_log = - INFINIPATH_HWE_TXEMEMPARITYERR_MASK << - INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; - - dd->ipath_eep_st_masks[1].hwerrs_to_log = - INFINIPATH_HWE_RXEMEMPARITYERR_MASK << - INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; - - dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET; - - ipath_linkrecovery = 0; - - init_waitqueue_head(&dd->ipath_autoneg_wait); - INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work); - - dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; - dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR; - - dd->ipath_link_width_enabled = dd->ipath_link_width_supported; - dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported; - /* - * set the initial values to reasonable default, will be set - * for real when link is up. - */ - dd->ipath_link_width_active = IB_WIDTH_4X; - dd->ipath_link_speed_active = IPATH_IB_SDR; - dd->delay_mult = rate_to_delay[0][1]; -} - - -/* - * Setup the MSI stuff again after a reset. I'd like to just call - * pci_enable_msi() and request_irq() again, but when I do that, - * the MSI enable bit doesn't get set in the command word, and - * we switch to to a different interrupt vector, which is confusing, - * so I instead just do it all inline. Perhaps somehow can tie this - * into the PCIe hotplug support at some point - * Note, because I'm doing it all here, I don't call pci_disable_msi() - * or free_irq() at the start of ipath_setup_7220_reset(). - */ -static int ipath_reinit_msi(struct ipath_devdata *dd) -{ - int ret = 0; - - int pos; - u16 control; - if (!dd->ipath_msi_lo) /* Using intX, or init problem */ - goto bail; - - pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); - if (!pos) { - ipath_dev_err(dd, "Can't find MSI capability, " - "can't restore MSI settings\n"); - goto bail; - } - ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", - dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO); - pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, - dd->ipath_msi_lo); - ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", - dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI); - pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, - dd->ipath_msi_hi); - pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); - if (!(control & PCI_MSI_FLAGS_ENABLE)) { - ipath_cdbg(VERBOSE, "MSI control at off %x was %x, " - "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, - control, control | PCI_MSI_FLAGS_ENABLE); - control |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, - control); - } - /* now rewrite the data (vector) info */ - pci_write_config_word(dd->pcidev, pos + - ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), - dd->ipath_msi_data); - ret = 1; - -bail: - if (!ret) { - ipath_dbg("Using INTx, MSI disabled or not configured\n"); - ipath_enable_intx(dd->pcidev); - ret = 1; - } - /* - * We restore the cachelinesize also, although it doesn't really - * matter. - */ - pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, - dd->ipath_pci_cacheline); - /* and now set the pci master bit again */ - pci_set_master(dd->pcidev); - - return ret; -} - -/* - * This routine sleeps, so it can only be called from user context, not - * from interrupt context. If we need interrupt context, we can split - * it into two routines. - */ -static int ipath_setup_7220_reset(struct ipath_devdata *dd) -{ - u64 val; - int i; - int ret; - u16 cmdval; - - pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval); - - /* Use dev_err so it shows up in logs, etc. */ - ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); - - /* keep chip from being accessed in a few places */ - dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT); - val = dd->ipath_control | INFINIPATH_C_RESET; - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); - mb(); - - for (i = 1; i <= 5; i++) { - int r; - - /* - * Allow MBIST, etc. to complete; longer on each retry. - * We sometimes get machine checks from bus timeout if no - * response, so for now, make it *really* long. - */ - msleep(1000 + (1 + i) * 2000); - r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, - dd->ipath_pcibar0); - if (r) - ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r); - r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, - dd->ipath_pcibar1); - if (r) - ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r); - /* now re-enable memory access */ - pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval); - r = pci_enable_device(dd->pcidev); - if (r) - ipath_dev_err(dd, "pci_enable_device failed after " - "reset: %d\n", r); - /* - * whether it fully enabled or not, mark as present, - * again (but not INITTED) - */ - dd->ipath_flags |= IPATH_PRESENT; - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); - if (val == dd->ipath_revision) { - ipath_cdbg(VERBOSE, "Got matching revision " - "register %llx on try %d\n", - (unsigned long long) val, i); - ret = ipath_reinit_msi(dd); - goto bail; - } - /* Probably getting -1 back */ - ipath_dbg("Didn't get expected revision register, " - "got %llx, try %d\n", (unsigned long long) val, - i + 1); - } - ret = 0; /* failed */ - -bail: - if (ret) - ipath_7220_pcie_params(dd, dd->ipath_boardrev); - - return ret; -} - -/** - * ipath_7220_put_tid - write a TID to the chip - * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to update - * @tidtype: 0 for eager, 1 for expected - * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing - * - * This exists as a separate routine to allow for selection of the - * appropriate "flavor". The static calls in cleanup just use the - * revision-agnostic form, as they are not performance critical. - */ -static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, - u32 type, unsigned long pa) -{ - if (pa != dd->ipath_tidinvalid) { - u64 chippa = pa >> IBA7220_TID_PA_SHIFT; - - /* paranoia checks */ - if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { - dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " - "not 2KB aligned!\n", pa); - return; - } - if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { - ipath_dev_err(dd, - "BUG: Physical page address 0x%lx " - "larger than supported\n", pa); - return; - } - - if (type == RCVHQ_RCV_TYPE_EAGER) - chippa |= dd->ipath_tidtemplate; - else /* for now, always full 4KB page */ - chippa |= IBA7220_TID_SZ_4K; - writeq(chippa, tidptr); - } else - writeq(pa, tidptr); - mmiowb(); -} - -/** - * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager - * @dd: the infinipath device - * @port: the port - * - * clear all TID entries for a port, expected and eager. - * Used from ipath_close(). On this chip, TIDs are only 32 bits, - * not 64, but they are still on 64 bit boundaries, so tidbase - * is declared as u64 * for the pointer math, even though we write 32 bits - */ -static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port) -{ - u64 __iomem *tidbase; - unsigned long tidinv; - int i; - - if (!dd->ipath_kregbase) - return; - - ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); - - tidinv = dd->ipath_tidinvalid; - tidbase = (u64 __iomem *) - ((char __iomem *)(dd->ipath_kregbase) + - dd->ipath_rcvtidbase + - port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); - - for (i = 0; i < dd->ipath_rcvtidcnt; i++) - ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, - tidinv); - - tidbase = (u64 __iomem *) - ((char __iomem *)(dd->ipath_kregbase) + - dd->ipath_rcvegrbase + port_egrtid_idx(dd, port) - * sizeof(*tidbase)); - - for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--) - ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER, - tidinv); -} - -/** - * ipath_7220_tidtemplate - setup constants for TID updates - * @dd: the infinipath device - * - * We setup stuff that we use a lot, to avoid calculating each time - */ -static void ipath_7220_tidtemplate(struct ipath_devdata *dd) -{ - /* For now, we always allocate 4KB buffers (at init) so we can - * receive max size packets. We may want a module parameter to - * specify 2KB or 4KB and/or make be per port instead of per device - * for those who want to reduce memory footprint. Note that the - * ipath_rcvhdrentsize size must be large enough to hold the largest - * IB header (currently 96 bytes) that we expect to handle (plus of - * course the 2 dwords of RHF). - */ - if (dd->ipath_rcvegrbufsize == 2048) - dd->ipath_tidtemplate = IBA7220_TID_SZ_2K; - else if (dd->ipath_rcvegrbufsize == 4096) - dd->ipath_tidtemplate = IBA7220_TID_SZ_4K; - else { - dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize " - "%u, using %u\n", dd->ipath_rcvegrbufsize, - 4096); - dd->ipath_tidtemplate = IBA7220_TID_SZ_4K; - } - dd->ipath_tidinvalid = 0; -} - -static int ipath_7220_early_init(struct ipath_devdata *dd) -{ - u32 i, s; - - if (strcmp(int_type, "auto") && - strcmp(int_type, "force_msi") && - strcmp(int_type, "force_intx")) { - ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting " - "auto, force_msi or force_intx\n", int_type); - return -EINVAL; - } - - /* - * Control[4] has been added to change the arbitration within - * the SDMA engine between favoring data fetches over descriptor - * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority. - */ - if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1)) - dd->ipath_control |= 1<<4; - - dd->ipath_flags |= IPATH_4BYTE_TID; - - /* - * For openfabrics, we need to be able to handle an IB header of - * 24 dwords. HT chip has arbitrary sized receive buffers, so we - * made them the same size as the PIO buffers. This chip does not - * handle arbitrary size buffers, so we need the header large enough - * to handle largest IB header, but still have room for a 2KB MTU - * standard IB packet. - */ - dd->ipath_rcvhdrentsize = 24; - dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; - dd->ipath_rhf_offset = - dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32); - - dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048; - /* - * the min() check here is currently a nop, but it may not always - * be, depending on just how we do ipath_rcvegrbufsize - */ - dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k : - dd->ipath_piosize2k, - dd->ipath_rcvegrbufsize + - (dd->ipath_rcvhdrentsize << 2)); - dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; - - ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS, - INFINIPATH_JINT_DEFAULT_MAX_PACKETS); - - if (dd->ipath_boardrev) /* no eeprom on emulator */ - ipath_get_eeprom_info(dd); - - /* start of code to check and print procmon */ - s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); - s &= ~(1U<<31); /* clear done bit */ - s |= 1U<<14; /* clear counter (write 1 to clear) */ - ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); - /* make sure clear_counter low long enough before start */ - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - - s &= ~(1U<<14); /* allow counter to count (before starting) */ - ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); - - s |= 1U<<15; /* start the counter */ - s &= ~(1U<<31); /* clear done bit */ - s &= ~0x7ffU; /* clear frequency bits */ - s |= 0xe29; /* set frequency bits, in case cleared */ - ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); - - s = 0; - for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) { - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); - } - if (!(s&(1U<<31))) - ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s); - else - ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff); - - return 0; -} - -/** - * ipath_init_7220_get_base_info - set chip-specific flags for user code - * @pd: the infinipath port - * @kbase: ipath_base_info pointer - * - * We set the PCIE flag because the lower bandwidth on PCIe vs - * HyperTransport can affect some user packet algorithims. - */ -static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase) -{ - struct ipath_base_info *kinfo = kbase; - - kinfo->spi_runtime_flags |= - IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL | - IPATH_RUNTIME_SDMA; - - return 0; -} - -static void ipath_7220_free_irq(struct ipath_devdata *dd) -{ - free_irq(dd->ipath_irq, dd); - dd->ipath_irq = 0; -} - -static struct ipath_message_header * -ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr) -{ - u32 offset = ipath_hdrget_offset(rhf_addr); - - return (struct ipath_message_header *) - (rhf_addr - dd->ipath_rhf_offset + offset); -} - -static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports) -{ - u32 nchipports; - - nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); - if (!cfgports) { - int ncpus = num_online_cpus(); - - if (ncpus <= 4) - dd->ipath_portcnt = 5; - else if (ncpus <= 8) - dd->ipath_portcnt = 9; - if (dd->ipath_portcnt) - ipath_dbg("Auto-configured for %u ports, %d cpus " - "online\n", dd->ipath_portcnt, ncpus); - } else if (cfgports <= nchipports) - dd->ipath_portcnt = cfgports; - if (!dd->ipath_portcnt) /* none of the above, set to max */ - dd->ipath_portcnt = nchipports; - /* - * chip can be configured for 5, 9, or 17 ports, and choice - * affects number of eager TIDs per port (1K, 2K, 4K). - */ - if (dd->ipath_portcnt > 9) - dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT; - else if (dd->ipath_portcnt > 5) - dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT; - /* else configure for default 5 receive ports */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, - dd->ipath_rcvctrl); - dd->ipath_p0_rcvegrcnt = 2048; /* always */ - if (dd->ipath_flags & IPATH_HAS_SEND_DMA) - dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */ -} - - -static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which) -{ - int lsb, ret = 0; - u64 maskr; /* right-justified mask */ - - switch (which) { - case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ - lsb = IBA7220_IBC_HRTBT_SHIFT; - maskr = IBA7220_IBC_HRTBT_MASK; - break; - - case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */ - ret = dd->ipath_link_width_enabled; - goto done; - - case IPATH_IB_CFG_LWID: /* Get currently active Link-width */ - ret = dd->ipath_link_width_active; - goto done; - - case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ - ret = dd->ipath_link_speed_enabled; - goto done; - - case IPATH_IB_CFG_SPD: /* Get current Link spd */ - ret = dd->ipath_link_speed_active; - goto done; - - case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ - lsb = IBA7220_IBC_RXPOL_SHIFT; - maskr = IBA7220_IBC_RXPOL_MASK; - break; - - case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ - lsb = IBA7220_IBC_LREV_SHIFT; - maskr = IBA7220_IBC_LREV_MASK; - break; - - case IPATH_IB_CFG_LINKLATENCY: - ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus) - & IBA7220_DDRSTAT_LINKLAT_MASK; - goto done; - - default: - ret = -ENOTSUPP; - goto done; - } - ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr); -done: - return ret; -} - -static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val) -{ - int lsb, ret = 0, setforce = 0; - u64 maskr; /* right-justified mask */ - - switch (which) { - case IPATH_IB_CFG_LIDLMC: - /* - * Set LID and LMC. Combined to avoid possible hazard - * caller puts LMC in 16MSbits, DLID in 16LSbits of val - */ - lsb = IBA7220_IBC_DLIDLMC_SHIFT; - maskr = IBA7220_IBC_DLIDLMC_MASK; - break; - - case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ - if (val & IPATH_IB_HRTBT_ON && - (dd->ipath_flags & IPATH_NO_HRTBT)) - goto bail; - lsb = IBA7220_IBC_HRTBT_SHIFT; - maskr = IBA7220_IBC_HRTBT_MASK; - break; - - case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */ - /* - * As with speed, only write the actual register if - * the link is currently down, otherwise takes effect - * on next link change. - */ - dd->ipath_link_width_enabled = val; - if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) != - IPATH_LINKDOWN) - goto bail; - /* - * We set the IPATH_IB_FORCE_NOTIFY bit so updown - * will get called because we want update - * link_width_active, and the change may not take - * effect for some time (if we are in POLL), so this - * flag will force the updown routine to be called - * on the next ibstatuschange down interrupt, even - * if it's not an down->up transition. - */ - val--; /* convert from IB to chip */ - maskr = IBA7220_IBC_WIDTH_MASK; - lsb = IBA7220_IBC_WIDTH_SHIFT; - setforce = 1; - dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY; - break; - - case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */ - /* - * If we turn off IB1.2, need to preset SerDes defaults, - * but not right now. Set a flag for the next time - * we command the link down. As with width, only write the - * actual register if the link is currently down, otherwise - * takes effect on next link change. Since setting is being - * explictly requested (via MAD or sysfs), clear autoneg - * failure status if speed autoneg is enabled. - */ - dd->ipath_link_speed_enabled = val; - if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK && - !(val & (val - 1))) - dd->ipath_presets_needed = 1; - if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) != - IPATH_LINKDOWN) - goto bail; - /* - * We set the IPATH_IB_FORCE_NOTIFY bit so updown - * will get called because we want update - * link_speed_active, and the change may not take - * effect for some time (if we are in POLL), so this - * flag will force the updown routine to be called - * on the next ibstatuschange down interrupt, even - * if it's not an down->up transition. When setting - * speed autoneg, clear AUTONEG_FAILED. - */ - if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) { - val = IBA7220_IBC_SPEED_AUTONEG_MASK | - IBA7220_IBC_IBTA_1_2_MASK; - dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED; - } else - val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR - : IBA7220_IBC_SPEED_SDR; - maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | - IBA7220_IBC_IBTA_1_2_MASK; - lsb = 0; /* speed bits are low bits */ - setforce = 1; - break; - - case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ - lsb = IBA7220_IBC_RXPOL_SHIFT; - maskr = IBA7220_IBC_RXPOL_MASK; - break; - - case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ - lsb = IBA7220_IBC_LREV_SHIFT; - maskr = IBA7220_IBC_LREV_MASK; - break; - - default: - ret = -ENOTSUPP; - goto bail; - } - dd->ipath_ibcddrctrl &= ~(maskr << lsb); - dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb); - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, - dd->ipath_ibcddrctrl); - if (setforce) - dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY; -bail: - return ret; -} - -static void ipath_7220_read_counters(struct ipath_devdata *dd, - struct infinipath_counters *cntrs) -{ - u64 *counters = (u64 *) cntrs; - int i; - - for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++) - counters[i] = ipath_snap_cntr(dd, i); -} - -/* if we are using MSI, try to fallback to INTx */ -static int ipath_7220_intr_fallback(struct ipath_devdata *dd) -{ - if (dd->ipath_msi_lo) { - dev_info(&dd->pcidev->dev, "MSI interrupt not detected," - " trying INTx interrupts\n"); - ipath_7220_nomsi(dd); - ipath_enable_intx(dd->pcidev); - /* - * some newer kernels require free_irq before disable_msi, - * and irq can be changed during disable and intx enable - * and we need to therefore use the pcidev->irq value, - * not our saved MSI value. - */ - dd->ipath_irq = dd->pcidev->irq; - if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED, - IPATH_DRV_NAME, dd)) - ipath_dev_err(dd, - "Could not re-request_irq for INTx\n"); - return 1; - } - return 0; -} - -/* - * reset the XGXS (between serdes and IBC). Slightly less intrusive - * than resetting the IBC or external link state, and useful in some - * cases to cause some retraining. To do this right, we reset IBC - * as well. - */ -static void ipath_7220_xgxs_reset(struct ipath_devdata *dd) -{ - u64 val, prev_val; - - prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); - val = prev_val | INFINIPATH_XGXS_RESET; - prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, - dd->ipath_control & ~INFINIPATH_C_LINKENABLE); - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); - ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val); - ipath_write_kreg(dd, dd->ipath_kregs->kr_control, - dd->ipath_control); -} - - -/* Still needs cleanup, too much hardwired stuff */ -static void autoneg_send(struct ipath_devdata *dd, - u32 *hdr, u32 dcnt, u32 *data) -{ - int i; - u64 cnt; - u32 __iomem *piobuf; - u32 pnum; - - i = 0; - cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ - while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) { - if (i++ > 15) { - ipath_dbg("Couldn't get pio buffer for send\n"); - return; - } - udelay(2); - } - if (dd->ipath_flags&IPATH_HAS_PBC_CNT) - cnt |= 0x80000000UL<<32; /* mark as VL15 */ - writeq(cnt, piobuf); - ipath_flush_wc(); - __iowrite32_copy(piobuf + 2, hdr, 7); - __iowrite32_copy(piobuf + 9, data, dcnt); - ipath_flush_wc(); -} - -/* - * _start packet gets sent twice at start, _done gets sent twice at end - */ -static void ipath_autoneg_send(struct ipath_devdata *dd, int which) -{ - static u32 swapped; - u32 dw, i, hcnt, dcnt, *data; - static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; - static u32 madpayload_start[0x40] = { - 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, - 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ - }; - static u32 madpayload_done[0x40] = { - 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, - 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x40000001, 0x1388, 0x15e, /* rest 0's */ - }; - dcnt = ARRAY_SIZE(madpayload_start); - hcnt = ARRAY_SIZE(hdr); - if (!swapped) { - /* for maintainability, do it at runtime */ - for (i = 0; i < hcnt; i++) { - dw = (__force u32) cpu_to_be32(hdr[i]); - hdr[i] = dw; - } - for (i = 0; i < dcnt; i++) { - dw = (__force u32) cpu_to_be32(madpayload_start[i]); - madpayload_start[i] = dw; - dw = (__force u32) cpu_to_be32(madpayload_done[i]); - madpayload_done[i] = dw; - } - swapped = 1; - } - - data = which ? madpayload_done : madpayload_start; - ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start"); - - autoneg_send(dd, hdr, dcnt, data); - ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - udelay(2); - autoneg_send(dd, hdr, dcnt, data); - ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); - udelay(2); -} - - - -/* - * Do the absolute minimum to cause an IB speed change, and make it - * ready, but don't actually trigger the change. The caller will - * do that when ready (if link is in Polling training state, it will - * happen immediately, otherwise when link next goes down) - * - * This routine should only be used as part of the DDR autonegotation - * code for devices that are not compliant with IB 1.2 (or code that - * fixes things up for same). - * - * When link has gone down, and autoneg enabled, or autoneg has - * failed and we give up until next time we set both speeds, and - * then we want IBTA enabled as well as "use max enabled speed. - */ -static void set_speed_fast(struct ipath_devdata *dd, u32 speed) -{ - dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | - IBA7220_IBC_IBTA_1_2_MASK | - (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT)); - - if (speed == (IPATH_IB_SDR | IPATH_IB_DDR)) - dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | - IBA7220_IBC_IBTA_1_2_MASK; - else - dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ? - IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; - - /* - * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto - * to chip-centric 0 = 1x, 1 = 4x, 2 = auto - */ - dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) << - IBA7220_IBC_WIDTH_SHIFT; - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, - dd->ipath_ibcddrctrl); - ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed); -} - - -/* - * this routine is only used when we are not talking to another - * IB 1.2-compliant device that we think can do DDR. - * (This includes all existing switch chips as of Oct 2007.) - * 1.2-compliant devices go directly to DDR prior to reaching INIT - */ -static void try_auto_neg(struct ipath_devdata *dd) -{ - /* - * required for older non-IB1.2 DDR switches. Newer - * non-IB-compliant switches don't need it, but so far, - * aren't bothered by it either. "Magic constant" - */ - ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), - 0x3b9dc07); - dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG; - ipath_autoneg_send(dd, 0); - set_speed_fast(dd, IPATH_IB_DDR); - ipath_toggle_rclkrls(dd); - /* 2 msec is minimum length of a poll cycle */ - schedule_delayed_work(&dd->ipath_autoneg_work, - msecs_to_jiffies(2)); -} - - -static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) -{ - int ret = 0, symadj = 0; - u32 ltstate = ipath_ib_linkstate(dd, ibcs); - - dd->ipath_link_width_active = - ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ? - IB_WIDTH_4X : IB_WIDTH_1X; - dd->ipath_link_speed_active = - ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ? - IPATH_IB_DDR : IPATH_IB_SDR; - - if (!ibup) { - /* - * when link goes down we don't want aeq running, so it - * won't't interfere with IBC training, etc., and we need - * to go back to the static SerDes preset values - */ - if (dd->ipath_x1_fix_tries && - ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET && - ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) - dd->ipath_x1_fix_tries = 0; - if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED | - IPATH_IB_AUTONEG_INPROG))) - set_speed_fast(dd, dd->ipath_link_speed_enabled); - if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { - ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n"); - ipath_sd7220_presets(dd); - } - /* this might better in ipath_sd7220_presets() */ - ipath_set_relock_poll(dd, ibup); - } else { - if (ipath_compat_ddr_negotiate && - !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED | - IPATH_IB_AUTONEG_INPROG)) && - dd->ipath_link_speed_active == IPATH_IB_SDR && - (dd->ipath_link_speed_enabled & - (IPATH_IB_DDR | IPATH_IB_SDR)) == - (IPATH_IB_DDR | IPATH_IB_SDR) && - dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) { - /* we are SDR, and DDR auto-negotiation enabled */ - ++dd->ipath_autoneg_tries; - ipath_dbg("DDR negotiation try, %u/%u\n", - dd->ipath_autoneg_tries, - IPATH_AUTONEG_TRIES); - if (!dd->ibdeltainprog) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } - try_auto_neg(dd); - ret = 1; /* no other IB status change processing */ - } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) - && dd->ipath_link_speed_active == IPATH_IB_SDR) { - ipath_autoneg_send(dd, 1); - set_speed_fast(dd, IPATH_IB_DDR); - udelay(2); - ipath_toggle_rclkrls(dd); - ret = 1; /* no other IB status change processing */ - } else { - if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) && - (dd->ipath_link_speed_active & IPATH_IB_DDR)) { - ipath_dbg("Got to INIT with DDR autoneg\n"); - dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG - | IPATH_IB_AUTONEG_FAILED); - dd->ipath_autoneg_tries = 0; - /* re-enable SDR, for next link down */ - set_speed_fast(dd, - dd->ipath_link_speed_enabled); - wake_up(&dd->ipath_autoneg_wait); - symadj = 1; - } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { - /* - * clear autoneg failure flag, and do setup - * so we'll try next time link goes down and - * back to INIT (possibly connected to different - * device). - */ - ipath_dbg("INIT %sDR after autoneg failure\n", - (dd->ipath_link_speed_active & - IPATH_IB_DDR) ? "D" : "S"); - dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED; - dd->ipath_ibcddrctrl |= - IBA7220_IBC_IBTA_1_2_MASK; - ipath_write_kreg(dd, - IPATH_KREG_OFFSET(IBNCModeCtrl), 0); - symadj = 1; - } - } - /* - * if we are in 1X on rev1 only, and are in autoneg width, - * it could be due to an xgxs problem, so if we haven't - * already tried, try twice to get to 4X; if we - * tried, and couldn't, report it, since it will - * probably not be what is desired. - */ - if (dd->ipath_minrev == 1 && - (dd->ipath_link_width_enabled & (IB_WIDTH_1X | - IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) - && dd->ipath_link_width_active == IB_WIDTH_1X - && dd->ipath_x1_fix_tries < 3) { - if (++dd->ipath_x1_fix_tries == 3) { - dev_info(&dd->pcidev->dev, - "IB link is in 1X mode\n"); - if (!(dd->ipath_flags & - IPATH_IB_AUTONEG_INPROG)) - symadj = 1; - } - else { - ipath_cdbg(VERBOSE, "IB 1X in " - "auto-width, try %u to be " - "sure it's really 1X; " - "ltstate %u\n", - dd->ipath_x1_fix_tries, - ltstate); - dd->ipath_f_xgxs_reset(dd); - ret = 1; /* skip other processing */ - } - } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) - symadj = 1; - - if (!ret) { - dd->delay_mult = rate_to_delay - [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1] - [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1]; - - ipath_set_relock_poll(dd, ibup); - } - } - - if (symadj) { - if (dd->ibdeltainprog) { - dd->ibdeltainprog = 0; - dd->ibsymdelta += ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt) - - dd->ibsymsnap; - dd->iblnkerrdelta += ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt) - - dd->iblnkerrsnap; - } - } else if (!ibup && !dd->ibdeltainprog - && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } - - if (!ret) - ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), - ltstate); - return ret; -} - - -/* - * Handle the empirically determined mechanism for auto-negotiation - * of DDR speed with switches. - */ -static void autoneg_work(struct work_struct *work) -{ - struct ipath_devdata *dd; - u64 startms; - u32 lastlts, i; - - dd = container_of(work, struct ipath_devdata, - ipath_autoneg_work.work); - - startms = jiffies_to_msecs(jiffies); - - /* - * busy wait for this first part, it should be at most a - * few hundred usec, since we scheduled ourselves for 2msec. - */ - for (i = 0; i < 25; i++) { - lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat); - if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) { - ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE); - break; - } - udelay(100); - } - - if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) - goto done; /* we got there early or told to stop */ - - /* we expect this to timeout */ - if (wait_event_timeout(dd->ipath_autoneg_wait, - !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), - msecs_to_jiffies(90))) - goto done; - - ipath_toggle_rclkrls(dd); - - /* we expect this to timeout */ - if (wait_event_timeout(dd->ipath_autoneg_wait, - !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), - msecs_to_jiffies(1700))) - goto done; - - set_speed_fast(dd, IPATH_IB_SDR); - ipath_toggle_rclkrls(dd); - - /* - * wait up to 250 msec for link to train and get to INIT at DDR; - * this should terminate early - */ - wait_event_timeout(dd->ipath_autoneg_wait, - !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), - msecs_to_jiffies(250)); -done: - if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { - ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", - ipath_ib_state(dd, dd->ipath_lastibcstat), - (unsigned long long) jiffies_to_msecs(jiffies)-startms); - dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; - if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { - dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; - ipath_dbg("Giving up on DDR until next IB " - "link Down\n"); - dd->ipath_autoneg_tries = 0; - } - set_speed_fast(dd, dd->ipath_link_speed_enabled); - } -} - - -/** - * ipath_init_iba7220_funcs - set up the chip-specific function pointers - * @dd: the infinipath device - * - * This is global, and is called directly at init to set up the - * chip-specific function pointers for later use. - */ -void ipath_init_iba7220_funcs(struct ipath_devdata *dd) -{ - dd->ipath_f_intrsetup = ipath_7220_intconfig; - dd->ipath_f_bus = ipath_setup_7220_config; - dd->ipath_f_reset = ipath_setup_7220_reset; - dd->ipath_f_get_boardname = ipath_7220_boardname; - dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors; - dd->ipath_f_early_init = ipath_7220_early_init; - dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors; - dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes; - dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes; - dd->ipath_f_clear_tids = ipath_7220_clear_tids; - dd->ipath_f_put_tid = ipath_7220_put_tid; - dd->ipath_f_cleanup = ipath_setup_7220_cleanup; - dd->ipath_f_setextled = ipath_setup_7220_setextled; - dd->ipath_f_get_base_info = ipath_7220_get_base_info; - dd->ipath_f_free_irq = ipath_7220_free_irq; - dd->ipath_f_tidtemplate = ipath_7220_tidtemplate; - dd->ipath_f_intr_fallback = ipath_7220_intr_fallback; - dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset; - dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg; - dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg; - dd->ipath_f_config_jint = ipath_7220_config_jint; - dd->ipath_f_config_ports = ipath_7220_config_ports; - dd->ipath_f_read_counters = ipath_7220_read_counters; - dd->ipath_f_get_msgheader = ipath_7220_get_msgheader; - dd->ipath_f_ib_updown = ipath_7220_ib_updown; - - /* initialize chip-specific variables */ - ipath_init_7220_variables(dd); -} diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index b3d7efcdf021..6559af60bffd 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd); u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *); void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, unsigned len, int avail); -void ipath_init_iba7220_funcs(struct ipath_devdata *); -void ipath_init_iba6120_funcs(struct ipath_devdata *); void ipath_init_iba6110_funcs(struct ipath_devdata *); void ipath_get_eeprom_info(struct ipath_devdata *); int ipath_update_eeprom_log(struct ipath_devdata *dd); diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 559f39be0dcc..dd7f26d04d46 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); - ret = ib_register_device(dev); + ret = ib_register_device(dev, NULL); if (ret) goto err_reg; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 39051417054c..4e94e360e43b 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); - if (ib_register_device(&ibdev->ib_dev)) + if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_map; if (mlx4_ib_mad_init(ibdev)) diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index f080a784bc79..1e0b4b6074ad 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev) mutex_init(&dev->cap_mask_mutex); - ret = ib_register_device(&dev->ib_dev); + ret = ib_register_device(&dev->ib_dev, NULL); if (ret) return ret; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 86acb7d57064..57874a165083 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) break; } } - spin_unlock_irqrestore(&nesadapter->phy_lock, flags); if (phy_data & 0x0004) { if (wide_ppm_offset && @@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) } } + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); + nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; } @@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_adapter *nesadapter = nesdev->nesadapter; u32 aeq_info; u32 next_iwarp_state = 0; + u32 aeqe_cq_id; u16 async_event_id; u8 tcp_state; u8 iwarp_state; @@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); + aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); + if (aeq_info & NES_AEQE_QP) { + if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps, + aeqe_cq_id)) || + (atomic_read(&nesqp->close_timer_started))) + return; + } + switch (async_event_id) { case NES_AEQE_AEID_LLP_FIN_RECEIVED: if (nesqp->term_flags) diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index e95e8d09ff38..5cc0a9ae5bb1 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1001,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) return ret; } + static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Link Change Interrupts", "Linearized SKBs", @@ -1015,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Rx Jabber Errors", "Rx Oversized Frames", "Rx Short Frames", + "Rx Length Errors", + "Rx CRC Errors", + "Rx Port Discard", "Endnode Rx Discards", "Endnode Rx Octets", "Endnode Rx Frames", "Endnode Tx Octets", "Endnode Tx Frames", + "Tx Errors", "mh detected", "mh pauses", "Retransmission Count", @@ -1048,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "CM Nodes Destroyed", "CM Accel Drops", "CM Resets Received", + "Free 4Kpbls", + "Free 256pbls", "Timer Inits", - "CQ Depth 1", - "CQ Depth 4", - "CQ Depth 16", - "CQ Depth 24", - "CQ Depth 32", - "CQ Depth 128", - "CQ Depth 256", "LRO aggregated", "LRO flushed", "LRO no_desc", }; - #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) /** @@ -1120,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset, /** * nes_netdev_get_ethtool_stats */ + static void nes_netdev_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) { u64 u64temp; struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; + struct nes_adapter *nesadapter = nesdev->nesadapter; u32 nic_count; u32 u32temp; u32 index = 0; @@ -1154,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, nesvnic->nesdev->port_tx_discards += u32temp; nesvnic->netstats.tx_dropped += u32temp; + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->netstats.rx_dropped += u32temp; + nesvnic->nesdev->mac_rx_errors += u32temp; + nesvnic->nesdev->mac_rx_short_frames += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->netstats.rx_dropped += u32temp; + nesvnic->nesdev->mac_rx_errors += u32temp; + nesvnic->nesdev->mac_rx_oversized_frames += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->netstats.rx_dropped += u32temp; + nesvnic->nesdev->mac_rx_errors += u32temp; + nesvnic->nesdev->mac_rx_jabber_frames += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->netstats.rx_dropped += u32temp; + nesvnic->nesdev->mac_rx_errors += u32temp; + nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->netstats.rx_length_errors += u32temp; + nesvnic->nesdev->mac_rx_errors += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->nesdev->mac_rx_errors += u32temp; + nesvnic->nesdev->mac_rx_crc_errors += u32temp; + nesvnic->netstats.rx_crc_errors += u32temp; + + u32temp = nes_read_indexed(nesdev, + NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); + nesvnic->nesdev->mac_tx_errors += u32temp; + nesvnic->netstats.tx_errors += u32temp; + for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { if (nesvnic->qp_nic_index[nic_count] == 0xf) break; @@ -1218,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; + target_stat_values[++index] = nesvnic->netstats.rx_length_errors; + target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors; + target_stat_values[++index] = nesvnic->nesdev->port_rx_discards; target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; + target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors; target_stat_values[++index] = mh_detected; target_stat_values[++index] = mh_pauses_sent; target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; @@ -1251,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); target_stat_values[++index] = atomic_read(&cm_resets_recvd); + target_stat_values[++index] = nesadapter->free_4kpbl; + target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; - target_stat_values[++index] = int_mod_cq_depth_1; - target_stat_values[++index] = int_mod_cq_depth_4; - target_stat_values[++index] = int_mod_cq_depth_16; - target_stat_values[++index] = int_mod_cq_depth_24; - target_stat_values[++index] = int_mod_cq_depth_32; - target_stat_values[++index] = int_mod_cq_depth_128; - target_stat_values[++index] = int_mod_cq_depth_256; target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; - } - /** * nes_netdev_get_drvinfo */ diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 925e1f2d1d55..9bc2d744b2ea 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev) struct nes_adapter *nesadapter = nesdev->nesadapter; int i, ret; - ret = ib_register_device(&nesvnic->nesibdev->ibdev); + ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL); if (ret) { return ret; } diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig new file mode 100644 index 000000000000..7c03a70c55a2 --- /dev/null +++ b/drivers/infiniband/hw/qib/Kconfig @@ -0,0 +1,7 @@ +config INFINIBAND_QIB + tristate "QLogic PCIe HCA support" + depends on 64BIT && NET + ---help--- + This is a low-level driver for QLogic PCIe QLE InfiniBand host + channel adapters. This driver does not support the QLogic + HyperTransport card (model QHT7140). diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile new file mode 100644 index 000000000000..c6515a1b9a6a --- /dev/null +++ b/drivers/infiniband/hw/qib/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o + +ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \ + qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \ + qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \ + qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \ + qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \ + qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \ + qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o + +# 6120 has no fallback if no MSI interrupts, others can do INTx +ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o + +ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o +ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h new file mode 100644 index 000000000000..32d9208efcff --- /dev/null +++ b/drivers/infiniband/hw/qib/qib.h @@ -0,0 +1,1439 @@ +#ifndef _QIB_KERNEL_H +#define _QIB_KERNEL_H +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This header file is the base header file for qlogic_ib kernel code + * qib_user.h serves a similar purpose for user code. + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/scatterlist.h> +#include <linux/io.h> +#include <linux/fs.h> +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/sched.h> + +#include "qib_common.h" +#include "qib_verbs.h" + +/* only s/w major version of QLogic_IB we can handle */ +#define QIB_CHIP_VERS_MAJ 2U + +/* don't care about this except printing */ +#define QIB_CHIP_VERS_MIN 0U + +/* The Organization Unique Identifier (Mfg code), and its position in GUID */ +#define QIB_OUI 0x001175 +#define QIB_OUI_LSB 40 + +/* + * per driver stats, either not device nor port-specific, or + * summed over all of the devices and ports. + * They are described by name via ipathfs filesystem, so layout + * and number of elements can change without breaking compatibility. + * If members are added or deleted qib_statnames[] in qib_fs.c must + * change to match. + */ +struct qlogic_ib_stats { + __u64 sps_ints; /* number of interrupts handled */ + __u64 sps_errints; /* number of error interrupts */ + __u64 sps_txerrs; /* tx-related packet errors */ + __u64 sps_rcverrs; /* non-crc rcv packet errors */ + __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */ + __u64 sps_nopiobufs; /* no pio bufs avail from kernel */ + __u64 sps_ctxts; /* number of contexts currently open */ + __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */ + __u64 sps_buffull; + __u64 sps_hdrfull; +}; + +extern struct qlogic_ib_stats qib_stats; +extern struct pci_error_handlers qib_pci_err_handler; +extern struct pci_driver qib_driver; + +#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ +/* + * First-cut critierion for "device is active" is + * two thousand dwords combined Tx, Rx traffic per + * 5-second interval. SMA packets are 64 dwords, + * and occur "a few per second", presumably each way. + */ +#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000) + +/* + * Struct used to indicate which errors are logged in each of the + * error-counters that are logged to EEPROM. A counter is incremented + * _once_ (saturating at 255) for each event with any bits set in + * the error or hwerror register masks below. + */ +#define QIB_EEP_LOG_CNT (4) +struct qib_eep_log_mask { + u64 errs_to_log; + u64 hwerrs_to_log; +}; + +/* + * Below contains all data related to a single context (formerly called port). + */ +struct qib_ctxtdata { + void **rcvegrbuf; + dma_addr_t *rcvegrbuf_phys; + /* rcvhdrq base, needs mmap before useful */ + void *rcvhdrq; + /* kernel virtual address where hdrqtail is updated */ + void *rcvhdrtail_kvaddr; + /* + * temp buffer for expected send setup, allocated at open, instead + * of each setup call + */ + void *tid_pg_list; + /* + * Shared page for kernel to signal user processes that send buffers + * need disarming. The process should call QIB_CMD_DISARM_BUFS + * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set. + */ + unsigned long *user_event_mask; + /* when waiting for rcv or pioavail */ + wait_queue_head_t wait; + /* + * rcvegr bufs base, physical, must fit + * in 44 bits so 32 bit programs mmap64 44 bit works) + */ + dma_addr_t rcvegr_phys; + /* mmap of hdrq, must fit in 44 bits */ + dma_addr_t rcvhdrq_phys; + dma_addr_t rcvhdrqtailaddr_phys; + + /* + * number of opens (including slave sub-contexts) on this instance + * (ignoring forks, dup, etc. for now) + */ + int cnt; + /* + * how much space to leave at start of eager TID entries for + * protocol use, on each TID + */ + /* instead of calculating it */ + unsigned ctxt; + /* non-zero if ctxt is being shared. */ + u16 subctxt_cnt; + /* non-zero if ctxt is being shared. */ + u16 subctxt_id; + /* number of eager TID entries. */ + u16 rcvegrcnt; + /* index of first eager TID entry. */ + u16 rcvegr_tid_base; + /* number of pio bufs for this ctxt (all procs, if shared) */ + u32 piocnt; + /* first pio buffer for this ctxt */ + u32 pio_base; + /* chip offset of PIO buffers for this ctxt */ + u32 piobufs; + /* how many alloc_pages() chunks in rcvegrbuf_pages */ + u32 rcvegrbuf_chunks; + /* how many egrbufs per chunk */ + u32 rcvegrbufs_perchunk; + /* order for rcvegrbuf_pages */ + size_t rcvegrbuf_size; + /* rcvhdrq size (for freeing) */ + size_t rcvhdrq_size; + /* per-context flags for fileops/intr communication */ + unsigned long flag; + /* next expected TID to check when looking for free */ + u32 tidcursor; + /* WAIT_RCV that timed out, no interrupt */ + u32 rcvwait_to; + /* WAIT_PIO that timed out, no interrupt */ + u32 piowait_to; + /* WAIT_RCV already happened, no wait */ + u32 rcvnowait; + /* WAIT_PIO already happened, no wait */ + u32 pionowait; + /* total number of polled urgent packets */ + u32 urgent; + /* saved total number of polled urgent packets for poll edge trigger */ + u32 urgent_poll; + /* pid of process using this ctxt */ + pid_t pid; + pid_t subpid[QLOGIC_IB_MAX_SUBCTXT]; + /* same size as task_struct .comm[], command that opened context */ + char comm[16]; + /* pkeys set by this use of this ctxt */ + u16 pkeys[4]; + /* so file ops can get at unit */ + struct qib_devdata *dd; + /* so funcs that need physical port can get it easily */ + struct qib_pportdata *ppd; + /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ + void *subctxt_uregbase; + /* An array of pages for the eager receive buffers * N */ + void *subctxt_rcvegrbuf; + /* An array of pages for the eager header queue entries * N */ + void *subctxt_rcvhdr_base; + /* The version of the library which opened this ctxt */ + u32 userversion; + /* Bitmask of active slaves */ + u32 active_slaves; + /* Type of packets or conditions we want to poll for */ + u16 poll_type; + /* receive packet sequence counter */ + u8 seq_cnt; + u8 redirect_seq_cnt; + /* ctxt rcvhdrq head offset */ + u32 head; + u32 pkt_count; + /* QPs waiting for context processing */ + struct list_head qp_wait_list; +}; + +struct qib_sge_state; + +struct qib_sdma_txreq { + int flags; + int sg_count; + dma_addr_t addr; + void (*callback)(struct qib_sdma_txreq *, int); + u16 start_idx; /* sdma private */ + u16 next_descq_idx; /* sdma private */ + struct list_head list; /* sdma private */ +}; + +struct qib_sdma_desc { + __le64 qw[2]; +}; + +struct qib_verbs_txreq { + struct qib_sdma_txreq txreq; + struct qib_qp *qp; + struct qib_swqe *wqe; + u32 dwords; + u16 hdr_dwords; + u16 hdr_inx; + struct qib_pio_header *align_buf; + struct qib_mregion *mr; + struct qib_sge_state *ss; +}; + +#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1 +#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2 +#define QIB_SDMA_TXREQ_F_INTREQ 0x4 +#define QIB_SDMA_TXREQ_F_FREEBUF 0x8 +#define QIB_SDMA_TXREQ_F_FREEDESC 0x10 + +#define QIB_SDMA_TXREQ_S_OK 0 +#define QIB_SDMA_TXREQ_S_SENDERROR 1 +#define QIB_SDMA_TXREQ_S_ABORTED 2 +#define QIB_SDMA_TXREQ_S_SHUTDOWN 3 + +/* + * Get/Set IB link-level config parameters for f_get/set_ib_cfg() + * Mostly for MADs that set or query link parameters, also ipath + * config interfaces + */ +#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */ +#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */ +#define QIB_IB_CFG_LWID 3 /* currently active Link-width */ +#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */ +#define QIB_IB_CFG_SPD 5 /* current Link spd */ +#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */ +#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */ +#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */ +#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */ +#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */ +#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */ +#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */ +#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */ +#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */ +#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */ +#define QIB_IB_CFG_PKEYS 16 /* update partition keys */ +#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */ +#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */ +#define QIB_IB_CFG_VL_HIGH_LIMIT 19 +#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */ +#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */ + +/* + * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16 + * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for + * QIB_IB_CFG_LINKDEFAULT cmd + */ +#define IB_LINKCMD_DOWN (0 << 16) +#define IB_LINKCMD_ARMED (1 << 16) +#define IB_LINKCMD_ACTIVE (2 << 16) +#define IB_LINKINITCMD_NOP 0 +#define IB_LINKINITCMD_POLL 1 +#define IB_LINKINITCMD_SLEEP 2 +#define IB_LINKINITCMD_DISABLE 3 + +/* + * valid states passed to qib_set_linkstate() user call + */ +#define QIB_IB_LINKDOWN 0 +#define QIB_IB_LINKARM 1 +#define QIB_IB_LINKACTIVE 2 +#define QIB_IB_LINKDOWN_ONLY 3 +#define QIB_IB_LINKDOWN_SLEEP 4 +#define QIB_IB_LINKDOWN_DISABLE 5 + +/* + * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed + * negotiation) are used for the 3rd argument to path_f_set_ib_cfg + * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They + * are also the the possible values for qib_link_speed_enabled and active + * The values were chosen to match values used within the IB spec. + */ +#define QIB_IB_SDR 1 +#define QIB_IB_DDR 2 +#define QIB_IB_QDR 4 + +#define QIB_DEFAULT_MTU 4096 + +/* + * Possible IB config parameters for f_get/set_ib_table() + */ +#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */ +#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */ + +/* + * Possible "operations" for f_rcvctrl(ppd, op, ctxt) + * these are bits so they can be combined, e.g. + * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB + */ +#define QIB_RCVCTRL_TAILUPD_ENB 0x01 +#define QIB_RCVCTRL_TAILUPD_DIS 0x02 +#define QIB_RCVCTRL_CTXT_ENB 0x04 +#define QIB_RCVCTRL_CTXT_DIS 0x08 +#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10 +#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20 +#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */ +#define QIB_RCVCTRL_PKEY_DIS 0x80 +#define QIB_RCVCTRL_BP_ENB 0x0100 +#define QIB_RCVCTRL_BP_DIS 0x0200 +#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400 +#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800 + +/* + * Possible "operations" for f_sendctrl(ppd, op, var) + * these are bits so they can be combined, e.g. + * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB + * Some operations (e.g. DISARM, ABORT) are known to + * be "one-shot", so do not modify shadow. + */ +#define QIB_SENDCTRL_DISARM (0x1000) +#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM) + /* available (0x2000) */ +#define QIB_SENDCTRL_AVAIL_DIS (0x4000) +#define QIB_SENDCTRL_AVAIL_ENB (0x8000) +#define QIB_SENDCTRL_AVAIL_BLIP (0x10000) +#define QIB_SENDCTRL_SEND_DIS (0x20000) +#define QIB_SENDCTRL_SEND_ENB (0x40000) +#define QIB_SENDCTRL_FLUSH (0x80000) +#define QIB_SENDCTRL_CLEAR (0x100000) +#define QIB_SENDCTRL_DISARM_ALL (0x200000) + +/* + * These are the generic indices for requesting per-port + * counter values via the f_portcntr function. They + * are always returned as 64 bit values, although most + * are 32 bit counters. + */ +/* send-related counters */ +#define QIBPORTCNTR_PKTSEND 0U +#define QIBPORTCNTR_WORDSEND 1U +#define QIBPORTCNTR_PSXMITDATA 2U +#define QIBPORTCNTR_PSXMITPKTS 3U +#define QIBPORTCNTR_PSXMITWAIT 4U +#define QIBPORTCNTR_SENDSTALL 5U +/* receive-related counters */ +#define QIBPORTCNTR_PKTRCV 6U +#define QIBPORTCNTR_PSRCVDATA 7U +#define QIBPORTCNTR_PSRCVPKTS 8U +#define QIBPORTCNTR_RCVEBP 9U +#define QIBPORTCNTR_RCVOVFL 10U +#define QIBPORTCNTR_WORDRCV 11U +/* IB link related error counters */ +#define QIBPORTCNTR_RXLOCALPHYERR 12U +#define QIBPORTCNTR_RXVLERR 13U +#define QIBPORTCNTR_ERRICRC 14U +#define QIBPORTCNTR_ERRVCRC 15U +#define QIBPORTCNTR_ERRLPCRC 16U +#define QIBPORTCNTR_BADFORMAT 17U +#define QIBPORTCNTR_ERR_RLEN 18U +#define QIBPORTCNTR_IBSYMBOLERR 19U +#define QIBPORTCNTR_INVALIDRLEN 20U +#define QIBPORTCNTR_UNSUPVL 21U +#define QIBPORTCNTR_EXCESSBUFOVFL 22U +#define QIBPORTCNTR_ERRLINK 23U +#define QIBPORTCNTR_IBLINKDOWN 24U +#define QIBPORTCNTR_IBLINKERRRECOV 25U +#define QIBPORTCNTR_LLI 26U +/* other error counters */ +#define QIBPORTCNTR_RXDROPPKT 27U +#define QIBPORTCNTR_VL15PKTDROP 28U +#define QIBPORTCNTR_ERRPKEY 29U +#define QIBPORTCNTR_KHDROVFL 30U +/* sampling counters (these are actually control registers) */ +#define QIBPORTCNTR_PSINTERVAL 31U +#define QIBPORTCNTR_PSSTART 32U +#define QIBPORTCNTR_PSSTAT 33U + +/* how often we check for packet activity for "power on hours (in seconds) */ +#define ACTIVITY_TIMER 5 + +/* Below is an opaque struct. Each chip (device) can maintain + * private data needed for its operation, but not germane to the + * rest of the driver. For convenience, we define another that + * is chip-specific, per-port + */ +struct qib_chip_specific; +struct qib_chipport_specific; + +enum qib_sdma_states { + qib_sdma_state_s00_hw_down, + qib_sdma_state_s10_hw_start_up_wait, + qib_sdma_state_s20_idle, + qib_sdma_state_s30_sw_clean_up_wait, + qib_sdma_state_s40_hw_clean_up_wait, + qib_sdma_state_s50_hw_halt_wait, + qib_sdma_state_s99_running, +}; + +enum qib_sdma_events { + qib_sdma_event_e00_go_hw_down, + qib_sdma_event_e10_go_hw_start, + qib_sdma_event_e20_hw_started, + qib_sdma_event_e30_go_running, + qib_sdma_event_e40_sw_cleaned, + qib_sdma_event_e50_hw_cleaned, + qib_sdma_event_e60_hw_halted, + qib_sdma_event_e70_go_idle, + qib_sdma_event_e7220_err_halted, + qib_sdma_event_e7322_err_halted, + qib_sdma_event_e90_timer_tick, +}; + +extern char *qib_sdma_state_names[]; +extern char *qib_sdma_event_names[]; + +struct sdma_set_state_action { + unsigned op_enable:1; + unsigned op_intenable:1; + unsigned op_halt:1; + unsigned op_drain:1; + unsigned go_s99_running_tofalse:1; + unsigned go_s99_running_totrue:1; +}; + +struct qib_sdma_state { + struct kref kref; + struct completion comp; + enum qib_sdma_states current_state; + struct sdma_set_state_action *set_state_action; + unsigned current_op; + unsigned go_s99_running; + unsigned first_sendbuf; + unsigned last_sendbuf; /* really last +1 */ + /* debugging/devel */ + enum qib_sdma_states previous_state; + unsigned previous_op; + enum qib_sdma_events last_event; +}; + +struct xmit_wait { + struct timer_list timer; + u64 counter; + u8 flags; + struct cache { + u64 psxmitdata; + u64 psrcvdata; + u64 psxmitpkts; + u64 psrcvpkts; + u64 psxmitwait; + } counter_cache; +}; + +/* + * The structure below encapsulates data relevant to a physical IB Port. + * Current chips support only one such port, but the separation + * clarifies things a bit. Note that to conform to IB conventions, + * port-numbers are one-based. The first or only port is port1. + */ +struct qib_pportdata { + struct qib_ibport ibport_data; + + struct qib_devdata *dd; + struct qib_chippport_specific *cpspec; /* chip-specific per-port */ + struct kobject pport_kobj; + struct kobject sl2vl_kobj; + struct kobject diagc_kobj; + + /* GUID for this interface, in network order */ + __be64 guid; + + /* QIB_POLL, etc. link-state specific flags, per port */ + u32 lflags; + /* qib_lflags driver is waiting for */ + u32 state_wanted; + spinlock_t lflags_lock; + /* number of (port-specific) interrupts for this port -- saturates... */ + u32 int_counter; + + /* ref count for each pkey */ + atomic_t pkeyrefs[4]; + + /* + * this address is mapped readonly into user processes so they can + * get status cheaply, whenever they want. One qword of status per port + */ + u64 *statusp; + + /* SendDMA related entries */ + spinlock_t sdma_lock; + struct qib_sdma_state sdma_state; + unsigned long sdma_buf_jiffies; + struct qib_sdma_desc *sdma_descq; + u64 sdma_descq_added; + u64 sdma_descq_removed; + u16 sdma_descq_cnt; + u16 sdma_descq_tail; + u16 sdma_descq_head; + u16 sdma_next_intr; + u16 sdma_reset_wait; + u8 sdma_generation; + struct tasklet_struct sdma_sw_clean_up_task; + struct list_head sdma_activelist; + + dma_addr_t sdma_descq_phys; + volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ + dma_addr_t sdma_head_phys; + + wait_queue_head_t state_wait; /* for state_wanted */ + + /* HoL blocking for SMP replies */ + unsigned hol_state; + struct timer_list hol_timer; + + /* + * Shadow copies of registers; size indicates read access size. + * Most of them are readonly, but some are write-only register, + * where we manipulate the bits in the shadow copy, and then write + * the shadow copy to qlogic_ib. + * + * We deliberately make most of these 32 bits, since they have + * restricted range. For any that we read, we won't to generate 32 + * bit accesses, since Opteron will generate 2 separate 32 bit HT + * transactions for a 64 bit read, and we want to avoid unnecessary + * bus transactions. + */ + + /* This is the 64 bit group */ + /* last ibcstatus. opaque outside chip-specific code */ + u64 lastibcstat; + + /* these are the "32 bit" regs */ + + /* + * the following two are 32-bit bitmasks, but {test,clear,set}_bit + * all expect bit fields to be "unsigned long" + */ + unsigned long p_rcvctrl; /* shadow per-port rcvctrl */ + unsigned long p_sendctrl; /* shadow per-port sendctrl */ + + u32 ibmtu; /* The MTU programmed for this unit */ + /* + * Current max size IB packet (in bytes) including IB headers, that + * we can send. Changes when ibmtu changes. + */ + u32 ibmaxlen; + /* + * ibmaxlen at init time, limited by chip and by receive buffer + * size. Not changed after init. + */ + u32 init_ibmaxlen; + /* LID programmed for this instance */ + u16 lid; + /* list of pkeys programmed; 0 if not set */ + u16 pkeys[4]; + /* LID mask control */ + u8 lmc; + u8 link_width_supported; + u8 link_speed_supported; + u8 link_width_enabled; + u8 link_speed_enabled; + u8 link_width_active; + u8 link_speed_active; + u8 vls_supported; + u8 vls_operational; + /* Rx Polarity inversion (compensate for ~tx on partner) */ + u8 rx_pol_inv; + + u8 hw_pidx; /* physical port index */ + u8 port; /* IB port number and index into dd->pports - 1 */ + + u8 delay_mult; + + /* used to override LED behavior */ + u8 led_override; /* Substituted for normal value, if non-zero */ + u16 led_override_timeoff; /* delta to next timer event */ + u8 led_override_vals[2]; /* Alternates per blink-frame */ + u8 led_override_phase; /* Just counts, LSB picks from vals[] */ + atomic_t led_override_timer_active; + /* Used to flash LEDs in override mode */ + struct timer_list led_override_timer; + struct xmit_wait cong_stats; + struct timer_list symerr_clear_timer; +}; + +/* Observers. Not to be taken lightly, possibly not to ship. */ +/* + * If a diag read or write is to (bottom <= offset <= top), + * the "hoook" is called, allowing, e.g. shadows to be + * updated in sync with the driver. struct diag_observer + * is the "visible" part. + */ +struct diag_observer; + +typedef int (*diag_hook) (struct qib_devdata *dd, + const struct diag_observer *op, + u32 offs, u64 *data, u64 mask, int only_32); + +struct diag_observer { + diag_hook hook; + u32 bottom; + u32 top; +}; + +extern int qib_register_observer(struct qib_devdata *dd, + const struct diag_observer *op); + +/* Only declared here, not defined. Private to diags */ +struct diag_observer_list_elt; + +/* device data struct now contains only "general per-device" info. + * fields related to a physical IB port are in a qib_pportdata struct, + * described above) while fields only used by a particualr chip-type are in + * a qib_chipdata struct, whose contents are opaque to this file. + */ +struct qib_devdata { + struct qib_ibdev verbs_dev; /* must be first */ + struct list_head list; + /* pointers to related structs for this device */ + /* pci access data structure */ + struct pci_dev *pcidev; + struct cdev *user_cdev; + struct cdev *diag_cdev; + struct device *user_device; + struct device *diag_device; + + /* mem-mapped pointer to base of chip regs */ + u64 __iomem *kregbase; + /* end of mem-mapped chip space excluding sendbuf and user regs */ + u64 __iomem *kregend; + /* physical address of chip for io_remap, etc. */ + resource_size_t physaddr; + /* qib_cfgctxts pointers */ + struct qib_ctxtdata **rcd; /* Receive Context Data */ + + /* qib_pportdata, points to array of (physical) port-specific + * data structs, indexed by pidx (0..n-1) + */ + struct qib_pportdata *pport; + struct qib_chip_specific *cspec; /* chip-specific */ + + /* kvirt address of 1st 2k pio buffer */ + void __iomem *pio2kbase; + /* kvirt address of 1st 4k pio buffer */ + void __iomem *pio4kbase; + /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */ + void __iomem *piobase; + /* mem-mapped pointer to base of user chip regs (if using WC PAT) */ + u64 __iomem *userbase; + /* + * points to area where PIOavail registers will be DMA'ed. + * Has to be on a page of it's own, because the page will be + * mapped into user program space. This copy is *ONLY* ever + * written by DMA, not by the driver! Need a copy per device + * when we get to multiple devices + */ + volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */ + /* physical address where updates occur */ + dma_addr_t pioavailregs_phys; + + /* device-specific implementations of functions needed by + * common code. Contrary to previous consensus, we can't + * really just point to a device-specific table, because we + * may need to "bend", e.g. *_f_put_tid + */ + /* fallback to alternate interrupt type if possible */ + int (*f_intr_fallback)(struct qib_devdata *); + /* hard reset chip */ + int (*f_reset)(struct qib_devdata *); + void (*f_quiet_serdes)(struct qib_pportdata *); + int (*f_bringup_serdes)(struct qib_pportdata *); + int (*f_early_init)(struct qib_devdata *); + void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *); + void (*f_put_tid)(struct qib_devdata *, u64 __iomem*, + u32, unsigned long); + void (*f_cleanup)(struct qib_devdata *); + void (*f_setextled)(struct qib_pportdata *, u32); + /* fill out chip-specific fields */ + int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *); + /* free irq */ + void (*f_free_irq)(struct qib_devdata *); + struct qib_message_header *(*f_get_msgheader) + (struct qib_devdata *, __le32 *); + void (*f_config_ctxts)(struct qib_devdata *); + int (*f_get_ib_cfg)(struct qib_pportdata *, int); + int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32); + int (*f_set_ib_loopback)(struct qib_pportdata *, const char *); + int (*f_get_ib_table)(struct qib_pportdata *, int, void *); + int (*f_set_ib_table)(struct qib_pportdata *, int, void *); + u32 (*f_iblink_state)(u64); + u8 (*f_ibphys_portstate)(u64); + void (*f_xgxs_reset)(struct qib_pportdata *); + /* per chip actions needed for IB Link up/down changes */ + int (*f_ib_updown)(struct qib_pportdata *, int, u64); + u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *); + /* Read/modify/write of GPIO pins (potentially chip-specific */ + int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir, + u32 mask); + /* Enable writes to config EEPROM (if supported) */ + int (*f_eeprom_wen)(struct qib_devdata *dd, int wen); + /* + * modify rcvctrl shadow[s] and write to appropriate chip-regs. + * see above QIB_RCVCTRL_xxx_ENB/DIS for operations. + * (ctxt == -1) means "all contexts", only meaningful for + * clearing. Could remove if chip_spec shutdown properly done. + */ + void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op, + int ctxt); + /* Read/modify/write sendctrl appropriately for op and port. */ + void (*f_sendctrl)(struct qib_pportdata *, u32 op); + void (*f_set_intr_state)(struct qib_devdata *, u32); + void (*f_set_armlaunch)(struct qib_devdata *, u32); + void (*f_wantpiobuf_intr)(struct qib_devdata *, u32); + int (*f_late_initreg)(struct qib_devdata *); + int (*f_init_sdma_regs)(struct qib_pportdata *); + u16 (*f_sdma_gethead)(struct qib_pportdata *); + int (*f_sdma_busy)(struct qib_pportdata *); + void (*f_sdma_update_tail)(struct qib_pportdata *, u16); + void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned); + void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned); + void (*f_sdma_hw_clean_up)(struct qib_pportdata *); + void (*f_sdma_hw_start_up)(struct qib_pportdata *); + void (*f_sdma_init_early)(struct qib_pportdata *); + void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); + void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); + u32 (*f_hdrqempty)(struct qib_ctxtdata *); + u64 (*f_portcntr)(struct qib_pportdata *, u32); + u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, + u64 **); + u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32, + char **, u64 **); + u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8); + void (*f_initvl15_bufs)(struct qib_devdata *); + void (*f_init_ctxt)(struct qib_ctxtdata *); + void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32, + struct qib_ctxtdata *); + void (*f_writescratch)(struct qib_devdata *, u32); + int (*f_tempsense_rd)(struct qib_devdata *, int regnum); + + char *boardname; /* human readable board info */ + + /* template for writing TIDs */ + u64 tidtemplate; + /* value to write to free TIDs */ + u64 tidinvalid; + + /* number of registers used for pioavail */ + u32 pioavregs; + /* device (not port) flags, basically device capabilities */ + u32 flags; + /* last buffer for user use */ + u32 lastctxt_piobuf; + + /* saturating counter of (non-port-specific) device interrupts */ + u32 int_counter; + + /* pio bufs allocated per ctxt */ + u32 pbufsctxt; + /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */ + u32 ctxts_extrabuf; + /* + * number of ctxts configured as max; zero is set to number chip + * supports, less gives more pio bufs/ctxt, etc. + */ + u32 cfgctxts; + + /* + * hint that we should update pioavailshadow before + * looking for a PIO buffer + */ + u32 upd_pio_shadow; + + /* internal debugging stats */ + u32 maxpkts_call; + u32 avgpkts_call; + u64 nopiobufs; + + /* PCI Vendor ID (here for NodeInfo) */ + u16 vendorid; + /* PCI Device ID (here for NodeInfo) */ + u16 deviceid; + /* for write combining settings */ + unsigned long wc_cookie; + unsigned long wc_base; + unsigned long wc_len; + + /* shadow copy of struct page *'s for exp tid pages */ + struct page **pageshadow; + /* shadow copy of dma handles for exp tid pages */ + dma_addr_t *physshadow; + u64 __iomem *egrtidbase; + spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */ + /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */ + spinlock_t uctxt_lock; /* rcd and user context changes */ + /* + * per unit status, see also portdata statusp + * mapped readonly into user processes so they can get unit and + * IB link status cheaply + */ + u64 *devstatusp; + char *freezemsg; /* freeze msg if hw error put chip in freeze */ + u32 freezelen; /* max length of freezemsg */ + /* timer used to prevent stats overflow, error throttling, etc. */ + struct timer_list stats_timer; + + /* timer to verify interrupts work, and fallback if possible */ + struct timer_list intrchk_timer; + unsigned long ureg_align; /* user register alignment */ + + /* + * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and + * pio_writing. + */ + spinlock_t pioavail_lock; + + /* + * Shadow copies of registers; size indicates read access size. + * Most of them are readonly, but some are write-only register, + * where we manipulate the bits in the shadow copy, and then write + * the shadow copy to qlogic_ib. + * + * We deliberately make most of these 32 bits, since they have + * restricted range. For any that we read, we won't to generate 32 + * bit accesses, since Opteron will generate 2 separate 32 bit HT + * transactions for a 64 bit read, and we want to avoid unnecessary + * bus transactions. + */ + + /* This is the 64 bit group */ + + unsigned long pioavailshadow[6]; + /* bitmap of send buffers available for the kernel to use with PIO. */ + unsigned long pioavailkernel[6]; + /* bitmap of send buffers which need to be disarmed. */ + unsigned long pio_need_disarm[3]; + /* bitmap of send buffers which are being written to. */ + unsigned long pio_writing[3]; + /* kr_revision shadow */ + u64 revision; + /* Base GUID for device (from eeprom, network order) */ + __be64 base_guid; + + /* + * kr_sendpiobufbase value (chip offset of pio buffers), and the + * base of the 2KB buffer s(user processes only use 2K) + */ + u64 piobufbase; + u32 pio2k_bufbase; + + /* these are the "32 bit" regs */ + + /* number of GUIDs in the flash for this interface */ + u32 nguid; + /* + * the following two are 32-bit bitmasks, but {test,clear,set}_bit + * all expect bit fields to be "unsigned long" + */ + unsigned long rcvctrl; /* shadow per device rcvctrl */ + unsigned long sendctrl; /* shadow per device sendctrl */ + + /* value we put in kr_rcvhdrcnt */ + u32 rcvhdrcnt; + /* value we put in kr_rcvhdrsize */ + u32 rcvhdrsize; + /* value we put in kr_rcvhdrentsize */ + u32 rcvhdrentsize; + /* kr_ctxtcnt value */ + u32 ctxtcnt; + /* kr_pagealign value */ + u32 palign; + /* number of "2KB" PIO buffers */ + u32 piobcnt2k; + /* size in bytes of "2KB" PIO buffers */ + u32 piosize2k; + /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */ + u32 piosize2kmax_dwords; + /* number of "4KB" PIO buffers */ + u32 piobcnt4k; + /* size in bytes of "4KB" PIO buffers */ + u32 piosize4k; + /* kr_rcvegrbase value */ + u32 rcvegrbase; + /* kr_rcvtidbase value */ + u32 rcvtidbase; + /* kr_rcvtidcnt value */ + u32 rcvtidcnt; + /* kr_userregbase */ + u32 uregbase; + /* shadow the control register contents */ + u32 control; + + /* chip address space used by 4k pio buffers */ + u32 align4k; + /* size of each rcvegrbuffer */ + u32 rcvegrbufsize; + /* localbus width (1, 2,4,8,16,32) from config space */ + u32 lbus_width; + /* localbus speed in MHz */ + u32 lbus_speed; + int unit; /* unit # of this chip */ + + /* start of CHIP_SPEC move to chipspec, but need code changes */ + /* low and high portions of MSI capability/vector */ + u32 msi_lo; + /* saved after PCIe init for restore after reset */ + u32 msi_hi; + /* MSI data (vector) saved for restore */ + u16 msi_data; + /* so we can rewrite it after a chip reset */ + u32 pcibar0; + /* so we can rewrite it after a chip reset */ + u32 pcibar1; + u64 rhdrhead_intr_off; + + /* + * ASCII serial number, from flash, large enough for original + * all digit strings, and longer QLogic serial number format + */ + u8 serial[16]; + /* human readable board version */ + u8 boardversion[96]; + u8 lbus_info[32]; /* human readable localbus info */ + /* chip major rev, from qib_revision */ + u8 majrev; + /* chip minor rev, from qib_revision */ + u8 minrev; + + /* Misc small ints */ + /* Number of physical ports available */ + u8 num_pports; + /* Lowest context number which can be used by user processes */ + u8 first_user_ctxt; + u8 n_krcv_queues; + u8 qpn_mask; + u8 skip_kctxt_mask; + + u16 rhf_offset; /* offset of RHF within receive header entry */ + + /* + * GPIO pins for twsi-connected devices, and device code for eeprom + */ + u8 gpio_sda_num; + u8 gpio_scl_num; + u8 twsi_eeprom_dev; + u8 board_atten; + + /* Support (including locks) for EEPROM logging of errors and time */ + /* control access to actual counters, timer */ + spinlock_t eep_st_lock; + /* control high-level access to EEPROM */ + struct mutex eep_lock; + uint64_t traffic_wds; + /* active time is kept in seconds, but logged in hours */ + atomic_t active_time; + /* Below are nominal shadow of EEPROM, new since last EEPROM update */ + uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; + uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; + uint16_t eep_hrs; + /* + * masks for which bits of errs, hwerrs that cause + * each of the counters to increment. + */ + struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT]; + struct qib_diag_client *diag_client; + spinlock_t qib_diag_trans_lock; /* protect diag observer ops */ + struct diag_observer_list_elt *diag_observer_list; + + u8 psxmitwait_supported; + /* cycle length of PS* counters in HW (in picoseconds) */ + u16 psxmitwait_check_rate; +}; + +/* hol_state values */ +#define QIB_HOL_UP 0 +#define QIB_HOL_INIT 1 + +#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0) +#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1) +#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2) +#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3) +#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4) + +/* operation types for f_txchk_change() */ +#define TXCHK_CHG_TYPE_DIS1 3 +#define TXCHK_CHG_TYPE_ENAB1 2 +#define TXCHK_CHG_TYPE_KERN 1 +#define TXCHK_CHG_TYPE_USER 0 + +#define QIB_CHASE_TIME msecs_to_jiffies(145) +#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160) + +/* Private data for file operations */ +struct qib_filedata { + struct qib_ctxtdata *rcd; + unsigned subctxt; + unsigned tidcursor; + struct qib_user_sdma_queue *pq; + int rec_cpu_num; /* for cpu affinity; -1 if none */ +}; + +extern struct list_head qib_dev_list; +extern spinlock_t qib_devs_lock; +extern struct qib_devdata *qib_lookup(int unit); +extern u32 qib_cpulist_count; +extern unsigned long *qib_cpulist; + +extern unsigned qib_wc_pat; +int qib_init(struct qib_devdata *, int); +int init_chip_wc_pat(struct qib_devdata *dd, u32); +int qib_enable_wc(struct qib_devdata *dd); +void qib_disable_wc(struct qib_devdata *dd); +int qib_count_units(int *npresentp, int *nupp); +int qib_count_active_units(void); + +int qib_cdev_init(int minor, const char *name, + const struct file_operations *fops, + struct cdev **cdevp, struct device **devp); +void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp); +int qib_dev_init(void); +void qib_dev_cleanup(void); + +int qib_diag_add(struct qib_devdata *); +void qib_diag_remove(struct qib_devdata *); +void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64); +void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */ + +int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err); +void qib_bad_intrstatus(struct qib_devdata *); +void qib_handle_urcv(struct qib_devdata *, u64); + +/* clean up any per-chip chip-specific stuff */ +void qib_chip_cleanup(struct qib_devdata *); +/* clean up any chip type-specific stuff */ +void qib_chip_done(void); + +/* check to see if we have to force ordering for write combining */ +int qib_unordered_wc(void); +void qib_pio_copy(void __iomem *to, const void *from, size_t count); + +void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned); +int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *); +void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned); +void qib_cancel_sends(struct qib_pportdata *); + +int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *); +int qib_setup_eagerbufs(struct qib_ctxtdata *); +void qib_set_ctxtcnt(struct qib_devdata *); +int qib_create_ctxts(struct qib_devdata *dd); +struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32); +void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); +void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); + +u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); +int qib_reset_device(int); +int qib_wait_linkstate(struct qib_pportdata *, u32, int); +int qib_set_linkstate(struct qib_pportdata *, u8); +int qib_set_mtu(struct qib_pportdata *, u16); +int qib_set_lid(struct qib_pportdata *, u32, u8); +void qib_hol_down(struct qib_pportdata *); +void qib_hol_init(struct qib_pportdata *); +void qib_hol_up(struct qib_pportdata *); +void qib_hol_event(unsigned long); +void qib_disable_after_error(struct qib_devdata *); +int qib_set_uevent_bits(struct qib_pportdata *, const int); + +/* for use in system calls, where we want to know device type, etc. */ +#define ctxt_fp(fp) \ + (((struct qib_filedata *)(fp)->private_data)->rcd) +#define subctxt_fp(fp) \ + (((struct qib_filedata *)(fp)->private_data)->subctxt) +#define tidcursor_fp(fp) \ + (((struct qib_filedata *)(fp)->private_data)->tidcursor) +#define user_sdma_queue_fp(fp) \ + (((struct qib_filedata *)(fp)->private_data)->pq) + +static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd) +{ + return ppd->dd; +} + +static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev) +{ + return container_of(dev, struct qib_devdata, verbs_dev); +} + +static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev) +{ + return dd_from_dev(to_idev(ibdev)); +} + +static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp) +{ + return container_of(ibp, struct qib_pportdata, ibport_data); +} + +static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ + + WARN_ON(pidx >= dd->num_pports); + return &dd->pport[pidx].ibport_data; +} + +/* + * values for dd->flags (_device_ related flags) and + */ +#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */ +#define QIB_INITTED 0x2 /* chip and driver up and initted */ +#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */ +#define QIB_PRESENT 0x8 /* chip accesses can be done */ +#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */ +#define QIB_HAS_THRESH_UPDATE 0x40 +#define QIB_HAS_SDMA_TIMEOUT 0x80 +#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */ +#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */ +#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */ +#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */ +#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */ +#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */ +#define QIB_BADINTR 0x8000 /* severe interrupt problems */ +#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */ +#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */ + +/* + * values for ppd->lflags (_ib_port_ related flags) + */ +#define QIBL_LINKV 0x1 /* IB link state valid */ +#define QIBL_LINKDOWN 0x8 /* IB link is down */ +#define QIBL_LINKINIT 0x10 /* IB link level is up */ +#define QIBL_LINKARMED 0x20 /* IB link is ARMED */ +#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */ +/* leave a gap for more IB-link state */ +#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */ +#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */ +#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced, + * Do not try to bring up */ +#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */ + +/* IB dword length mask in PBC (lower 11 bits); same for all chips */ +#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1) + + +/* ctxt_flag bit offsets */ + /* waiting for a packet to arrive */ +#define QIB_CTXT_WAITING_RCV 2 + /* master has not finished initializing */ +#define QIB_CTXT_MASTER_UNINIT 4 + /* waiting for an urgent packet to arrive */ +#define QIB_CTXT_WAITING_URG 5 + +/* free up any allocated data at closes */ +void qib_free_data(struct qib_ctxtdata *dd); +void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned, + u32, struct qib_ctxtdata *); +struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *, + const struct pci_device_id *); +struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *, + const struct pci_device_id *); +struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *, + const struct pci_device_id *); +void qib_free_devdata(struct qib_devdata *); +struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra); + +#define QIB_TWSI_NO_DEV 0xFF +/* Below qib_twsi_ functions must be called with eep_lock held */ +int qib_twsi_reset(struct qib_devdata *dd); +int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, + int len); +int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, + const void *buffer, int len); +void qib_get_eeprom_info(struct qib_devdata *); +int qib_update_eeprom_log(struct qib_devdata *dd); +void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); +void qib_dump_lookup_output_queue(struct qib_devdata *); +void qib_force_pio_avail_update(struct qib_devdata *); +void qib_clear_symerror_on_linkup(unsigned long opaque); + +/* + * Set LED override, only the two LSBs have "public" meaning, but + * any non-zero value substitutes them for the Link and LinkTrain + * LED states. + */ +#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */ +#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */ +void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val); + +/* send dma routines */ +int qib_setup_sdma(struct qib_pportdata *); +void qib_teardown_sdma(struct qib_pportdata *); +void __qib_sdma_intr(struct qib_pportdata *); +void qib_sdma_intr(struct qib_pportdata *); +int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, + u32, struct qib_verbs_txreq *); +/* ppd->sdma_lock should be locked before calling this. */ +int qib_sdma_make_progress(struct qib_pportdata *dd); + +/* must be called under qib_sdma_lock */ +static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) +{ + return ppd->sdma_descq_cnt - + (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1; +} + +static inline int __qib_sdma_running(struct qib_pportdata *ppd) +{ + return ppd->sdma_state.current_state == qib_sdma_state_s99_running; +} +int qib_sdma_running(struct qib_pportdata *); + +void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); +void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); + +/* + * number of words used for protocol header if not set by qib_userinit(); + */ +#define QIB_DFLT_RCVHDRSIZE 9 + +/* + * We need to be able to handle an IB header of at least 24 dwords. + * We need the rcvhdrq large enough to handle largest IB header, but + * still have room for a 2KB MTU standard IB packet. + * Additionally, some processor/memory controller combinations + * benefit quite strongly from having the DMA'ed data be cacheline + * aligned and a cacheline multiple, so we set the size to 32 dwords + * (2 64-byte primary cachelines for pretty much all processors of + * interest). The alignment hurts nothing, other than using somewhat + * more memory. + */ +#define QIB_RCVHDR_ENTSIZE 32 + +int qib_get_user_pages(unsigned long, size_t, struct page **); +void qib_release_user_pages(struct page **, size_t); +int qib_eeprom_read(struct qib_devdata *, u8, void *, int); +int qib_eeprom_write(struct qib_devdata *, u8, const void *, int); +u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32); +void qib_sendbuf_done(struct qib_devdata *, unsigned); + +static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd) +{ + *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL; +} + +static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd) +{ + /* + * volatile because it's a DMA target from the chip, routine is + * inlined, and don't want register caching or reordering. + */ + return (u32) le64_to_cpu( + *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */ +} + +static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd) +{ + const struct qib_devdata *dd = rcd->dd; + u32 hdrqtail; + + if (dd->flags & QIB_NODMA_RTAIL) { + __le32 *rhf_addr; + u32 seq; + + rhf_addr = (__le32 *) rcd->rcvhdrq + + rcd->head + dd->rhf_offset; + seq = qib_hdrget_seq(rhf_addr); + hdrqtail = rcd->head; + if (seq == rcd->seq_cnt) + hdrqtail++; + } else + hdrqtail = qib_get_rcvhdrtail(rcd); + + return hdrqtail; +} + +/* + * sysfs interface. + */ + +extern const char ib_qib_version[]; + +int qib_device_create(struct qib_devdata *); +void qib_device_remove(struct qib_devdata *); + +int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + struct kobject *kobj); +int qib_verbs_register_sysfs(struct qib_devdata *); +void qib_verbs_unregister_sysfs(struct qib_devdata *); +/* Hook for sysfs read of QSFP */ +extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len); + +int __init qib_init_qibfs(void); +int __exit qib_exit_qibfs(void); + +int qibfs_add(struct qib_devdata *); +int qibfs_remove(struct qib_devdata *); + +int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); +int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, + const struct pci_device_id *); +void qib_pcie_ddcleanup(struct qib_devdata *); +int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); +int qib_reinit_intr(struct qib_devdata *); +void qib_enable_intx(struct pci_dev *); +void qib_nomsi(struct qib_devdata *); +void qib_nomsix(struct qib_devdata *); +void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); +void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); + +/* + * dma_addr wrappers - all 0's invalid for hw + */ +dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, + size_t, int); +const char *qib_get_unit_name(int unit); + +/* + * Flush write combining store buffers (if present) and perform a write + * barrier. + */ +#if defined(CONFIG_X86_64) +#define qib_flush_wc() asm volatile("sfence" : : : "memory") +#else +#define qib_flush_wc() wmb() /* no reorder around wc flush */ +#endif + +/* global module parameter variables */ +extern unsigned qib_ibmtu; +extern ushort qib_cfgctxts; +extern ushort qib_num_cfg_vls; +extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */ +extern unsigned qib_n_krcv_queues; +extern unsigned qib_sdma_fetch_arb; +extern unsigned qib_compat_ddr_negotiate; +extern int qib_special_trigger; + +extern struct mutex qib_mutex; + +/* Number of seconds before our card status check... */ +#define STATUS_TIMEOUT 60 + +#define QIB_DRV_NAME "ib_qib" +#define QIB_USER_MINOR_BASE 0 +#define QIB_TRACE_MINOR 127 +#define QIB_DIAGPKT_MINOR 128 +#define QIB_DIAG_MINOR_BASE 129 +#define QIB_NMINORS 255 + +#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 +#define PCI_VENDOR_ID_QLOGIC 0x1077 +#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10 +#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220 +#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322 + +/* + * qib_early_err is used (only!) to print early errors before devdata is + * allocated, or when dd->pcidev may not be valid, and at the tail end of + * cleanup when devdata may have been freed, etc. qib_dev_porterr is + * the same as qib_dev_err, but is used when the message really needs + * the IB port# to be definitive as to what's happening.. + * All of these go to the trace log, and the trace log entry is done + * first to avoid possible serial port delays from printk. + */ +#define qib_early_err(dev, fmt, ...) \ + do { \ + dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \ + } while (0) + +#define qib_dev_err(dd, fmt, ...) \ + do { \ + dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ + qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \ + } while (0) + +#define qib_dev_porterr(dd, port, fmt, ...) \ + do { \ + dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ + qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ + ##__VA_ARGS__); \ + } while (0) + +#define qib_devinfo(pcidev, fmt, ...) \ + do { \ + dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \ + } while (0) + +/* + * this is used for formatting hw error messages... + */ +struct qib_hwerror_msgs { + u64 mask; + const char *msg; +}; + +#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b } + +/* in qib_intr.c... */ +void qib_format_hwerrors(u64 hwerrs, + const struct qib_hwerror_msgs *hwerrmsgs, + size_t nhwerrmsgs, char *msg, size_t lmsg); +#endif /* _QIB_KERNEL_H */ diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h new file mode 100644 index 000000000000..e16cb6f7de2c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_6120_regs.h @@ -0,0 +1,977 @@ +/* + * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* This file is mechanically generated from RTL. Any hand-edits will be lost! */ + +#define QIB_6120_Revision_OFFS 0x0 +#define QIB_6120_Revision_R_Simulator_LSB 0x3F +#define QIB_6120_Revision_R_Simulator_RMASK 0x1 +#define QIB_6120_Revision_Reserved_LSB 0x28 +#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF +#define QIB_6120_Revision_BoardID_LSB 0x20 +#define QIB_6120_Revision_BoardID_RMASK 0xFF +#define QIB_6120_Revision_R_SW_LSB 0x18 +#define QIB_6120_Revision_R_SW_RMASK 0xFF +#define QIB_6120_Revision_R_Arch_LSB 0x10 +#define QIB_6120_Revision_R_Arch_RMASK 0xFF +#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8 +#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF +#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0 +#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF + +#define QIB_6120_Control_OFFS 0x8 +#define QIB_6120_Control_TxLatency_LSB 0x4 +#define QIB_6120_Control_TxLatency_RMASK 0x1 +#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3 +#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1 +#define QIB_6120_Control_LinkEn_LSB 0x2 +#define QIB_6120_Control_LinkEn_RMASK 0x1 +#define QIB_6120_Control_FreezeMode_LSB 0x1 +#define QIB_6120_Control_FreezeMode_RMASK 0x1 +#define QIB_6120_Control_SyncReset_LSB 0x0 +#define QIB_6120_Control_SyncReset_RMASK 0x1 + +#define QIB_6120_PageAlign_OFFS 0x10 + +#define QIB_6120_PortCnt_OFFS 0x18 + +#define QIB_6120_SendRegBase_OFFS 0x30 + +#define QIB_6120_UserRegBase_OFFS 0x38 + +#define QIB_6120_CntrRegBase_OFFS 0x40 + +#define QIB_6120_Scratch_OFFS 0x48 +#define QIB_6120_Scratch_TopHalf_LSB 0x20 +#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF +#define QIB_6120_Scratch_BottomHalf_LSB 0x0 +#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF + +#define QIB_6120_IntBlocked_OFFS 0x60 +#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F +#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E +#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D +#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C +#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_Reserved_LSB 0xF +#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF +#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10 +#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF +#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE +#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD +#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC +#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_Reserved1_LSB 0x5 +#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F +#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4 +#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3 +#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2 +#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1 +#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1 +#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0 +#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1 + +#define QIB_6120_IntMask_OFFS 0x68 +#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F +#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1 +#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E +#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1 +#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D +#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1 +#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C +#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1 +#define QIB_6120_IntMask_Reserved_LSB 0x11 +#define QIB_6120_IntMask_Reserved_RMASK 0x7FF +#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10 +#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF +#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE +#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD +#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC +#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1 +#define QIB_6120_IntMask_Reserved1_LSB 0x5 +#define QIB_6120_IntMask_Reserved1_RMASK 0x7F +#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4 +#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3 +#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2 +#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1 +#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1 +#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0 +#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1 + +#define QIB_6120_IntStatus_OFFS 0x70 +#define QIB_6120_IntStatus_Error_LSB 0x1F +#define QIB_6120_IntStatus_Error_RMASK 0x1 +#define QIB_6120_IntStatus_PioSent_LSB 0x1E +#define QIB_6120_IntStatus_PioSent_RMASK 0x1 +#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D +#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1 +#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C +#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1 +#define QIB_6120_IntStatus_Reserved_LSB 0xF +#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF +#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10 +#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1 +#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF +#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1 +#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE +#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1 +#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD +#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1 +#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC +#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1 +#define QIB_6120_IntStatus_Reserved1_LSB 0x5 +#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F +#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4 +#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1 +#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3 +#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1 +#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2 +#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1 +#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1 +#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1 +#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0 +#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1 + +#define QIB_6120_IntClear_OFFS 0x78 +#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F +#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1 +#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E +#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1 +#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D +#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1 +#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C +#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1 +#define QIB_6120_IntClear_Reserved_LSB 0xF +#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF +#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10 +#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF +#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE +#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD +#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC +#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1 +#define QIB_6120_IntClear_Reserved1_LSB 0x5 +#define QIB_6120_IntClear_Reserved1_RMASK 0x7F +#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4 +#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3 +#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2 +#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1 +#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1 +#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0 +#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1 + +#define QIB_6120_ErrMask_OFFS 0x80 +#define QIB_6120_ErrMask_Reserved_LSB 0x34 +#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF +#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33 +#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32 +#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1 +#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31 +#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30 +#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1 +#define QIB_6120_ErrMask_Reserved1_LSB 0x26 +#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF +#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25 +#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24 +#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23 +#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22 +#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21 +#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20 +#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F +#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E +#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D +#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_Reserved2_LSB 0x12 +#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF +#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11 +#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10 +#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF +#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE +#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD +#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC +#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB +#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA +#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9 +#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8 +#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7 +#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6 +#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5 +#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4 +#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3 +#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2 +#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1 +#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1 +#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0 +#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1 + +#define QIB_6120_ErrStatus_OFFS 0x88 +#define QIB_6120_ErrStatus_Reserved_LSB 0x34 +#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF +#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33 +#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1 +#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32 +#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1 +#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31 +#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1 +#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30 +#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1 +#define QIB_6120_ErrStatus_Reserved1_LSB 0x26 +#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF +#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25 +#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24 +#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23 +#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22 +#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21 +#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20 +#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F +#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E +#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D +#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_Reserved2_LSB 0x12 +#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF +#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11 +#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10 +#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF +#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE +#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD +#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC +#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB +#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA +#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9 +#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8 +#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7 +#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6 +#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5 +#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4 +#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3 +#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2 +#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1 +#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1 +#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0 +#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1 + +#define QIB_6120_ErrClear_OFFS 0x90 +#define QIB_6120_ErrClear_Reserved_LSB 0x34 +#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF +#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33 +#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32 +#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1 +#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31 +#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30 +#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1 +#define QIB_6120_ErrClear_Reserved1_LSB 0x26 +#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF +#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25 +#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24 +#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23 +#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22 +#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21 +#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20 +#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F +#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E +#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D +#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_Reserved2_LSB 0x12 +#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF +#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11 +#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10 +#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF +#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE +#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD +#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC +#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB +#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA +#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9 +#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8 +#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7 +#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6 +#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5 +#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4 +#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3 +#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2 +#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1 +#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1 +#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0 +#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1 + +#define QIB_6120_HwErrMask_OFFS 0x98 +#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F +#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1 +#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E +#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1 +#define QIB_6120_HwErrMask_Reserved_LSB 0x3D +#define QIB_6120_HwErrMask_Reserved_RMASK 0x1 +#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C +#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1 +#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B +#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1 +#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A +#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1 +#define QIB_6120_HwErrMask_Reserved1_LSB 0x39 +#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1 +#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38 +#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1 +#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37 +#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1 +#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 +#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 +#define QIB_6120_HwErrMask_Reserved2_LSB 0x33 +#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7 +#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C +#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F +#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28 +#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF +#define QIB_6120_HwErrMask_Reserved3_LSB 0x22 +#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F +#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F +#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 +#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E +#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 +#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D +#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1 +#define QIB_6120_HwErrMask_Reserved4_LSB 0x6 +#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF +#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0 +#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F + +#define QIB_6120_HwErrStatus_OFFS 0xA0 +#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F +#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1 +#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E +#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1 +#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D +#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1 +#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C +#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1 +#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B +#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1 +#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A +#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1 +#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39 +#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1 +#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38 +#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1 +#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37 +#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1 +#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36 +#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 +#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33 +#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7 +#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C +#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F +#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28 +#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF +#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22 +#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F +#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F +#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7 +#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E +#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1 +#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D +#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1 +#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6 +#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF +#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0 +#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F + +#define QIB_6120_HwErrClear_OFFS 0xA8 +#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F +#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1 +#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E +#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1 +#define QIB_6120_HwErrClear_Reserved_LSB 0x3D +#define QIB_6120_HwErrClear_Reserved_RMASK 0x1 +#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C +#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1 +#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B +#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1 +#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A +#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1 +#define QIB_6120_HwErrClear_Reserved1_LSB 0x39 +#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1 +#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38 +#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1 +#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37 +#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1 +#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 +#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 +#define QIB_6120_HwErrClear_Reserved2_LSB 0x33 +#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7 +#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C +#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F +#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28 +#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF +#define QIB_6120_HwErrClear_Reserved3_LSB 0x22 +#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F +#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F +#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7 +#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E +#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 +#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D +#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1 +#define QIB_6120_HwErrClear_Reserved4_LSB 0x6 +#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF +#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0 +#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F + +#define QIB_6120_HwDiagCtrl_OFFS 0xB0 +#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F +#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1 +#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E +#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1 +#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D +#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1 +#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C +#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1 +#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33 +#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF +#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C +#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F +#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28 +#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF +#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23 +#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F +#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F +#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF +#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6 +#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF +#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0 +#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F + +#define QIB_6120_IBCStatus_OFFS 0xC0 +#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F +#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1 +#define QIB_6120_IBCStatus_TxReady_LSB 0x1E +#define QIB_6120_IBCStatus_TxReady_RMASK 0x1 +#define QIB_6120_IBCStatus_Reserved_LSB 0x7 +#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF +#define QIB_6120_IBCStatus_LinkState_LSB 0x4 +#define QIB_6120_IBCStatus_LinkState_RMASK 0x7 +#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0 +#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF + +#define QIB_6120_IBCCtrl_OFFS 0xC8 +#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F +#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1 +#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E +#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1 +#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B +#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF +#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28 +#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7 +#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24 +#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF +#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20 +#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF +#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F +#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1 +#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14 +#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF +#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12 +#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3 +#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10 +#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3 +#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8 +#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF +#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0 +#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF + +#define QIB_6120_EXTStatus_OFFS 0xD0 +#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30 +#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF +#define QIB_6120_EXTStatus_Reserved_LSB 0x20 +#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF +#define QIB_6120_EXTStatus_Reserved1_LSB 0x10 +#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF +#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF +#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1 +#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE +#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1 +#define QIB_6120_EXTStatus_Reserved2_LSB 0x0 +#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF + +#define QIB_6120_EXTCtrl_OFFS 0xD8 +#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30 +#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF +#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20 +#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF +#define QIB_6120_EXTCtrl_Reserved_LSB 0x4 +#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF +#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3 +#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1 +#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2 +#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1 +#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1 +#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1 +#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0 +#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1 + +#define QIB_6120_GPIOOut_OFFS 0xE0 + +#define QIB_6120_GPIOMask_OFFS 0xE8 + +#define QIB_6120_GPIOStatus_OFFS 0xF0 + +#define QIB_6120_GPIOClear_OFFS 0xF8 + +#define QIB_6120_RcvCtrl_OFFS 0x100 +#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F +#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1 +#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E +#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1 +#define QIB_6120_RcvCtrl_Reserved_LSB 0x15 +#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF +#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10 +#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F +#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9 +#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F +#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5 +#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF +#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0 +#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F + +#define QIB_6120_RcvBTHQP_OFFS 0x108 +#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E +#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3 +#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18 +#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F +#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0 +#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF + +#define QIB_6120_RcvHdrSize_OFFS 0x110 + +#define QIB_6120_RcvHdrCnt_OFFS 0x118 + +#define QIB_6120_RcvHdrEntSize_OFFS 0x120 + +#define QIB_6120_RcvTIDBase_OFFS 0x128 + +#define QIB_6120_RcvTIDCnt_OFFS 0x130 + +#define QIB_6120_RcvEgrBase_OFFS 0x138 + +#define QIB_6120_RcvEgrCnt_OFFS 0x140 + +#define QIB_6120_RcvBufBase_OFFS 0x148 + +#define QIB_6120_RcvBufSize_OFFS 0x150 + +#define QIB_6120_RxIntMemBase_OFFS 0x158 + +#define QIB_6120_RxIntMemSize_OFFS 0x160 + +#define QIB_6120_RcvPartitionKey_OFFS 0x168 + +#define QIB_6120_RcvPktLEDCnt_OFFS 0x178 +#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20 +#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF +#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0 +#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF + +#define QIB_6120_SendCtrl_OFFS 0x1C0 +#define QIB_6120_SendCtrl_Disarm_LSB 0x1F +#define QIB_6120_SendCtrl_Disarm_RMASK 0x1 +#define QIB_6120_SendCtrl_Reserved_LSB 0x17 +#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF +#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10 +#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F +#define QIB_6120_SendCtrl_Reserved1_LSB 0x4 +#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF +#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3 +#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1 +#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2 +#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1 +#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1 +#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1 +#define QIB_6120_SendCtrl_Abort_LSB 0x0 +#define QIB_6120_SendCtrl_Abort_RMASK 0x1 + +#define QIB_6120_SendPIOBufBase_OFFS 0x1C8 +#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35 +#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF +#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20 +#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF +#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15 +#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF +#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0 +#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF + +#define QIB_6120_SendPIOSize_OFFS 0x1D0 +#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D +#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF +#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20 +#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF +#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC +#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF +#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0 +#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF + +#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8 +#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24 +#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF +#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20 +#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF +#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9 +#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF +#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0 +#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF + +#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0 +#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6 +#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF +#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0 +#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F + +#define QIB_6120_SendBufErr0_OFFS 0x240 +#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0 +#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0 + +#define QIB_6120_RcvHdrAddr0_OFFS 0x280 +#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2 +#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF +#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0 +#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3 + +#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300 +#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2 +#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF +#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0 +#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3 + +#define QIB_6120_SerdesCfg0_OFFS 0x3C0 +#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F +#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1 +#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38 +#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F +#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36 +#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3 +#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34 +#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3 +#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32 +#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3 +#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31 +#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1 +#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30 +#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1 +#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F +#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1 +#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E +#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1 +#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D +#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1 +#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C +#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1 +#define QIB_6120_SerdesCfg0_PW_LSB 0x2B +#define QIB_6120_SerdesCfg0_PW_RMASK 0x1 +#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29 +#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3 +#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28 +#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27 +#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1 +#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26 +#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1 +#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E +#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF +#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D +#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C +#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1 +#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18 +#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF +#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14 +#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF +#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10 +#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF +#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC +#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF +#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8 +#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF +#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7 +#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1 +#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6 +#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1 +#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5 +#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1 +#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4 +#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3 +#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2 +#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1 +#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1 +#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0 +#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1 + +#define QIB_6120_SerdesStat_OFFS 0x3D0 +#define QIB_6120_SerdesStat_Reserved_LSB 0xC +#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF +#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB +#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1 +#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA +#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1 +#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9 +#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1 +#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8 +#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1 +#define QIB_6120_SerdesStat_RxDetA_LSB 0x7 +#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1 +#define QIB_6120_SerdesStat_RxDetB_LSB 0x6 +#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1 +#define QIB_6120_SerdesStat_RxDetC_LSB 0x5 +#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1 +#define QIB_6120_SerdesStat_RxDetD_LSB 0x4 +#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1 +#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3 +#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1 +#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2 +#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1 +#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1 +#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1 +#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0 +#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1 + +#define QIB_6120_XGXSCfg_OFFS 0x3D8 +#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F +#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1 +#define QIB_6120_XGXSCfg_Reserved_LSB 0x17 +#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF +#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13 +#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF +#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9 +#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF +#define QIB_6120_XGXSCfg_port_addr_LSB 0x4 +#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F +#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3 +#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1 +#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2 +#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1 +#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1 +#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1 +#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0 +#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1 + +#define QIB_6120_LBIntCnt_OFFS 0x12000 + +#define QIB_6120_LBFlowStallCnt_OFFS 0x12008 + +#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018 + +#define QIB_6120_TxDataPktCnt_OFFS 0x12020 + +#define QIB_6120_TxFlowPktCnt_OFFS 0x12028 + +#define QIB_6120_TxDwordCnt_OFFS 0x12030 + +#define QIB_6120_TxLenErrCnt_OFFS 0x12038 + +#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040 + +#define QIB_6120_TxUnderrunCnt_OFFS 0x12048 + +#define QIB_6120_TxFlowStallCnt_OFFS 0x12050 + +#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058 + +#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060 + +#define QIB_6120_RxDataPktCnt_OFFS 0x12068 + +#define QIB_6120_RxFlowPktCnt_OFFS 0x12070 + +#define QIB_6120_RxDwordCnt_OFFS 0x12078 + +#define QIB_6120_RxLenErrCnt_OFFS 0x12080 + +#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088 + +#define QIB_6120_RxICRCErrCnt_OFFS 0x12090 + +#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098 + +#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0 + +#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8 + +#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0 + +#define QIB_6120_RxEBPCnt_OFFS 0x120B8 + +#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0 + +#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8 + +#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0 + +#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8 + +#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0 + +#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8 + +#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140 + +#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148 + +#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150 + +#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158 + +#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170 + +#define QIB_6120_RcvEgrArray0_OFFS 0x14000 + +#define QIB_6120_RcvTIDArray0_OFFS 0x54000 + +#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000 + +#define QIB_6120_SendPIOpbcCache_OFFS 0x64800 + +#define QIB_6120_RcvBuf1_OFFS 0x72000 + +#define QIB_6120_RcvBuf2_OFFS 0x75000 + +#define QIB_6120_RcvFlags_OFFS 0x77000 + +#define QIB_6120_RcvLookupBuf1_OFFS 0x79000 + +#define QIB_6120_RcvDMABuf_OFFS 0x7B000 + +#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000 + +#define QIB_6120_PCIERcvBuf_OFFS 0x80000 + +#define QIB_6120_PCIERetryBuf_OFFS 0x82000 + +#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000 + +#define QIB_6120_PIOBuf0_MA_OFFS 0x100000 diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h new file mode 100644 index 000000000000..ea0bfd896f92 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7220.h @@ -0,0 +1,156 @@ +#ifndef _QIB_7220_H +#define _QIB_7220_H +/* + * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* grab register-defs auto-generated by HW */ +#include "qib_7220_regs.h" + +/* The number of eager receive TIDs for context zero. */ +#define IBA7220_KRCVEGRCNT 2048U + +#define IB_7220_LT_STATE_CFGRCVFCFG 0x09 +#define IB_7220_LT_STATE_CFGWAITRMT 0x0a +#define IB_7220_LT_STATE_TXREVLANES 0x0d +#define IB_7220_LT_STATE_CFGENH 0x10 + +struct qib_chip_specific { + u64 __iomem *cregbase; + u64 *cntrs; + u64 *portcntrs; + spinlock_t sdepb_lock; /* serdes EPB bus */ + spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ + spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ + u64 hwerrmask; + u64 errormask; + u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ + u64 gpio_mask; /* shadow the gpio mask register */ + u64 extctrl; /* shadow the gpio output enable, etc... */ + u32 ncntrs; + u32 nportcntrs; + u32 cntrnamelen; + u32 portcntrnamelen; + u32 numctxts; + u32 rcvegrcnt; + u32 autoneg_tries; + u32 serdes_first_init_done; + u32 sdmabufcnt; + u32 lastbuf_for_pio; + u32 updthresh; /* current AvailUpdThld */ + u32 updthresh_dflt; /* default AvailUpdThld */ + int irq; + u8 presets_needed; + u8 relock_timer_active; + char emsgbuf[128]; + char sdmamsgbuf[192]; + char bitsmsgbuf[64]; + struct timer_list relock_timer; + unsigned int relock_interval; /* in jiffies */ +}; + +struct qib_chippport_specific { + struct qib_pportdata pportdata; + wait_queue_head_t autoneg_wait; + struct delayed_work autoneg_work; + struct timer_list chase_timer; + /* + * these 5 fields are used to establish deltas for IB symbol + * errors and linkrecovery errors. They can be reported on + * some chips during link negotiation prior to INIT, and with + * DDR when faking DDR negotiations with non-IBTA switches. + * The chip counters are adjusted at driver unload if there is + * a non-zero delta. + */ + u64 ibdeltainprog; + u64 ibsymdelta; + u64 ibsymsnap; + u64 iblnkerrdelta; + u64 iblnkerrsnap; + u64 ibcctrl; /* kr_ibcctrl shadow */ + u64 ibcddrctrl; /* kr_ibcddrctrl shadow */ + u64 chase_end; + u32 last_delay_mult; +}; + +/* + * This header file provides the declarations and common definitions + * for (mostly) manipulation of the SerDes blocks within the IBA7220. + * the functions declared should only be called from within other + * 7220-related files such as qib_iba7220.c or qib_sd7220.c. + */ +int qib_sd7220_presets(struct qib_devdata *dd); +int qib_sd7220_init(struct qib_devdata *dd); +int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img, + int len, int offset); +int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img, + int len, int offset); +void qib_sd7220_clr_ibpar(struct qib_devdata *); +/* + * Below used for sdnum parameter, selecting one of the two sections + * used for PCIe, or the single SerDes used for IB, which is the + * only one currently used + */ +#define IB_7220_SERDES 2 + +int qib_sd7220_ib_load(struct qib_devdata *dd); +int qib_sd7220_ib_vfy(struct qib_devdata *dd); + +static inline u32 qib_read_kreg32(const struct qib_devdata *dd, + const u16 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + return readl((u32 __iomem *)&dd->kregbase[regno]); +} + +static inline u64 qib_read_kreg64(const struct qib_devdata *dd, + const u16 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + + return readq(&dd->kregbase[regno]); +} + +static inline void qib_write_kreg(const struct qib_devdata *dd, + const u16 regno, u64 value) +{ + if (dd->kregbase) + writeq(value, &dd->kregbase[regno]); +} + +void set_7220_relock_poll(struct qib_devdata *, int); +void shutdown_7220_relock_poll(struct qib_devdata *); +void toggle_7220_rclkrls(struct qib_devdata *); + + +#endif /* _QIB_7220_H */ diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h new file mode 100644 index 000000000000..0da5bb750e52 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7220_regs.h @@ -0,0 +1,1496 @@ +/* + * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +/* This file is mechanically generated from RTL. Any hand-edits will be lost! */ + +#define QIB_7220_Revision_OFFS 0x0 +#define QIB_7220_Revision_R_Simulator_LSB 0x3F +#define QIB_7220_Revision_R_Simulator_RMASK 0x1 +#define QIB_7220_Revision_R_Emulation_LSB 0x3E +#define QIB_7220_Revision_R_Emulation_RMASK 0x1 +#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28 +#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF +#define QIB_7220_Revision_BoardID_LSB 0x20 +#define QIB_7220_Revision_BoardID_RMASK 0xFF +#define QIB_7220_Revision_R_SW_LSB 0x18 +#define QIB_7220_Revision_R_SW_RMASK 0xFF +#define QIB_7220_Revision_R_Arch_LSB 0x10 +#define QIB_7220_Revision_R_Arch_RMASK 0xFF +#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8 +#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF +#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0 +#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF + +#define QIB_7220_Control_OFFS 0x8 +#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7 +#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1 +#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6 +#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1 +#define QIB_7220_Control_Reserved_LSB 0x5 +#define QIB_7220_Control_Reserved_RMASK 0x1 +#define QIB_7220_Control_TxLatency_LSB 0x4 +#define QIB_7220_Control_TxLatency_RMASK 0x1 +#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3 +#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1 +#define QIB_7220_Control_LinkEn_LSB 0x2 +#define QIB_7220_Control_LinkEn_RMASK 0x1 +#define QIB_7220_Control_FreezeMode_LSB 0x1 +#define QIB_7220_Control_FreezeMode_RMASK 0x1 +#define QIB_7220_Control_SyncReset_LSB 0x0 +#define QIB_7220_Control_SyncReset_RMASK 0x1 + +#define QIB_7220_PageAlign_OFFS 0x10 + +#define QIB_7220_PortCnt_OFFS 0x18 + +#define QIB_7220_SendRegBase_OFFS 0x30 + +#define QIB_7220_UserRegBase_OFFS 0x38 + +#define QIB_7220_CntrRegBase_OFFS 0x40 + +#define QIB_7220_Scratch_OFFS 0x48 + +#define QIB_7220_IntMask_OFFS 0x68 +#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F +#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1 +#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E +#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1 +#define QIB_7220_IntMask_Reserved_LSB 0x31 +#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF +#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30 +#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F +#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E +#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D +#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C +#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B +#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A +#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29 +#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28 +#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27 +#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26 +#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25 +#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24 +#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23 +#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22 +#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21 +#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20 +#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1 +#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F +#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1 +#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E +#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1 +#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D +#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1 +#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C +#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1 +#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B +#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1 +#define QIB_7220_IntMask_JIntMask_LSB 0x1A +#define QIB_7220_IntMask_JIntMask_RMASK 0x1 +#define QIB_7220_IntMask_Reserved1_LSB 0x11 +#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF +#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10 +#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF +#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE +#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD +#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC +#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB +#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA +#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9 +#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8 +#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7 +#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6 +#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5 +#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4 +#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3 +#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2 +#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1 +#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1 +#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0 +#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1 + +#define QIB_7220_IntStatus_OFFS 0x70 +#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F +#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1 +#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E +#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1 +#define QIB_7220_IntStatus_Reserved_LSB 0x31 +#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF +#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30 +#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F +#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E +#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D +#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C +#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B +#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A +#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29 +#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28 +#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27 +#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26 +#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25 +#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24 +#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23 +#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22 +#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21 +#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1 +#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20 +#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1 +#define QIB_7220_IntStatus_Error_LSB 0x1F +#define QIB_7220_IntStatus_Error_RMASK 0x1 +#define QIB_7220_IntStatus_PioSent_LSB 0x1E +#define QIB_7220_IntStatus_PioSent_RMASK 0x1 +#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D +#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1 +#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C +#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1 +#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B +#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1 +#define QIB_7220_IntStatus_JInt_LSB 0x1A +#define QIB_7220_IntStatus_JInt_RMASK 0x1 +#define QIB_7220_IntStatus_Reserved1_LSB 0x11 +#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF +#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10 +#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF +#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE +#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD +#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC +#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB +#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA +#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9 +#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8 +#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7 +#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6 +#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5 +#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4 +#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3 +#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2 +#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1 +#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1 +#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0 +#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1 + +#define QIB_7220_IntClear_OFFS 0x78 +#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F +#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1 +#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E +#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1 +#define QIB_7220_IntClear_Reserved_LSB 0x31 +#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF +#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30 +#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F +#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E +#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D +#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C +#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B +#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A +#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29 +#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28 +#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27 +#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26 +#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25 +#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24 +#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23 +#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22 +#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21 +#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20 +#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1 +#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F +#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1 +#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E +#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1 +#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D +#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1 +#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C +#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1 +#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B +#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1 +#define QIB_7220_IntClear_JIntClear_LSB 0x1A +#define QIB_7220_IntClear_JIntClear_RMASK 0x1 +#define QIB_7220_IntClear_Reserved1_LSB 0x11 +#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF +#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10 +#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF +#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE +#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD +#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC +#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB +#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA +#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9 +#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8 +#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7 +#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6 +#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5 +#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4 +#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3 +#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2 +#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1 +#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1 +#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0 +#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1 + +#define QIB_7220_ErrMask_OFFS 0x80 +#define QIB_7220_ErrMask_Reserved_LSB 0x36 +#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF +#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35 +#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34 +#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33 +#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32 +#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1 +#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31 +#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30 +#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F +#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E +#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D +#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C +#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B +#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A +#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29 +#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28 +#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27 +#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26 +#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25 +#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24 +#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23 +#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22 +#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21 +#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20 +#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F +#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E +#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D +#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C +#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B +#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_Reserved1_LSB 0x12 +#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF +#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11 +#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10 +#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF +#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE +#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD +#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC +#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB +#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA +#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9 +#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8 +#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7 +#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6 +#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5 +#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4 +#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3 +#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2 +#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1 +#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1 +#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0 +#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1 + +#define QIB_7220_ErrStatus_OFFS 0x88 +#define QIB_7220_ErrStatus_Reserved_LSB 0x36 +#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF +#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35 +#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34 +#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1 +#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33 +#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1 +#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32 +#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1 +#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31 +#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1 +#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30 +#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F +#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E +#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D +#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C +#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B +#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A +#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29 +#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28 +#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27 +#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26 +#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25 +#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24 +#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23 +#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22 +#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21 +#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20 +#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F +#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E +#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D +#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C +#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1 +#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B +#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1 +#define QIB_7220_ErrStatus_Reserved1_LSB 0x12 +#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF +#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11 +#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10 +#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF +#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE +#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD +#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC +#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB +#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA +#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9 +#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8 +#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7 +#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6 +#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5 +#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4 +#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3 +#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2 +#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1 +#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1 +#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0 +#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1 + +#define QIB_7220_ErrClear_OFFS 0x90 +#define QIB_7220_ErrClear_Reserved_LSB 0x36 +#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF +#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35 +#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34 +#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33 +#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32 +#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1 +#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31 +#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30 +#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F +#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E +#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D +#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C +#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B +#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A +#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29 +#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28 +#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27 +#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26 +#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25 +#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24 +#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23 +#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22 +#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21 +#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20 +#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F +#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E +#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D +#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C +#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B +#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_Reserved1_LSB 0x12 +#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF +#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11 +#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10 +#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF +#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE +#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD +#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC +#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB +#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA +#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9 +#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8 +#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7 +#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6 +#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5 +#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4 +#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3 +#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2 +#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1 +#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1 +#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0 +#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1 + +#define QIB_7220_HwErrMask_OFFS 0x98 +#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F +#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E +#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D +#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1 +#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C +#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B +#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A +#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39 +#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38 +#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1 +#define QIB_7220_HwErrMask_Reserved_LSB 0x37 +#define QIB_7220_HwErrMask_Reserved_RMASK 0x1 +#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 +#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 +#define QIB_7220_HwErrMask_Reserved1_LSB 0x33 +#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7 +#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C +#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F +#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28 +#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF +#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27 +#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26 +#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25 +#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24 +#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_Reserved2_LSB 0x22 +#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3 +#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F +#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 +#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E +#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 +#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D +#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1 +#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C +#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1 +#define QIB_7220_HwErrMask_Reserved3_LSB 0x8 +#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF +#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0 +#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF + +#define QIB_7220_HwErrStatus_OFFS 0xA0 +#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F +#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E +#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D +#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1 +#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C +#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B +#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A +#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39 +#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38 +#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1 +#define QIB_7220_HwErrStatus_Reserved_LSB 0x37 +#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1 +#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36 +#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 +#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33 +#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7 +#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C +#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F +#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28 +#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF +#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27 +#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26 +#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25 +#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24 +#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22 +#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3 +#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F +#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7 +#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E +#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1 +#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D +#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1 +#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C +#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1 +#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8 +#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF +#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0 +#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF + +#define QIB_7220_HwErrClear_OFFS 0xA8 +#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F +#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E +#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D +#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1 +#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C +#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B +#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A +#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39 +#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38 +#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1 +#define QIB_7220_HwErrClear_Reserved_LSB 0x37 +#define QIB_7220_HwErrClear_Reserved_RMASK 0x1 +#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 +#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 +#define QIB_7220_HwErrClear_Reserved1_LSB 0x33 +#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7 +#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C +#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F +#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28 +#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF +#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27 +#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26 +#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25 +#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24 +#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_Reserved2_LSB 0x22 +#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3 +#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F +#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7 +#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E +#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 +#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D +#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1 +#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C +#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1 +#define QIB_7220_HwErrClear_Reserved3_LSB 0x8 +#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF +#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0 +#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF + +#define QIB_7220_HwDiagCtrl_OFFS 0xB0 +#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F +#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E +#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D +#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C +#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33 +#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF +#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C +#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F +#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28 +#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF +#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27 +#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26 +#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25 +#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24 +#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23 +#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1 +#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F +#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF +#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8 +#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF +#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0 +#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF + +#define QIB_7220_REG_0000B8_OFFS 0xB8 + +#define QIB_7220_IBCStatus_OFFS 0xC0 +#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F +#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1 +#define QIB_7220_IBCStatus_TxReady_LSB 0x1E +#define QIB_7220_IBCStatus_TxReady_RMASK 0x1 +#define QIB_7220_IBCStatus_Reserved_LSB 0xE +#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF +#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD +#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1 +#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC +#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1 +#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB +#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1 +#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA +#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1 +#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9 +#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1 +#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8 +#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1 +#define QIB_7220_IBCStatus_LinkState_LSB 0x5 +#define QIB_7220_IBCStatus_LinkState_RMASK 0x7 +#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0 +#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F + +#define QIB_7220_IBCCtrl_OFFS 0xC8 +#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F +#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1 +#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E +#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1 +#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B +#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF +#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28 +#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7 +#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24 +#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF +#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20 +#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF +#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15 +#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF +#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13 +#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3 +#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10 +#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7 +#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8 +#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF +#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0 +#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF + +#define QIB_7220_EXTStatus_OFFS 0xD0 +#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30 +#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF +#define QIB_7220_EXTStatus_Reserved_LSB 0x20 +#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF +#define QIB_7220_EXTStatus_Reserved1_LSB 0x10 +#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF +#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF +#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1 +#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE +#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1 +#define QIB_7220_EXTStatus_Reserved2_LSB 0x0 +#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF + +#define QIB_7220_EXTCtrl_OFFS 0xD8 +#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30 +#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF +#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20 +#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF +#define QIB_7220_EXTCtrl_Reserved_LSB 0x4 +#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF +#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3 +#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1 +#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2 +#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1 +#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1 +#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1 +#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0 +#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1 + +#define QIB_7220_GPIOOut_OFFS 0xE0 + +#define QIB_7220_GPIOMask_OFFS 0xE8 + +#define QIB_7220_GPIOStatus_OFFS 0xF0 + +#define QIB_7220_GPIOClear_OFFS 0xF8 + +#define QIB_7220_RcvCtrl_OFFS 0x100 +#define QIB_7220_RcvCtrl_Reserved_LSB 0x27 +#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF +#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26 +#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1 +#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24 +#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3 +#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23 +#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1 +#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22 +#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1 +#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11 +#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF +#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0 +#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF + +#define QIB_7220_RcvBTHQP_OFFS 0x108 +#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18 +#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF +#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0 +#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF + +#define QIB_7220_RcvHdrSize_OFFS 0x110 + +#define QIB_7220_RcvHdrCnt_OFFS 0x118 + +#define QIB_7220_RcvHdrEntSize_OFFS 0x120 + +#define QIB_7220_RcvTIDBase_OFFS 0x128 + +#define QIB_7220_RcvTIDCnt_OFFS 0x130 + +#define QIB_7220_RcvEgrBase_OFFS 0x138 + +#define QIB_7220_RcvEgrCnt_OFFS 0x140 + +#define QIB_7220_RcvBufBase_OFFS 0x148 + +#define QIB_7220_RcvBufSize_OFFS 0x150 + +#define QIB_7220_RxIntMemBase_OFFS 0x158 + +#define QIB_7220_RxIntMemSize_OFFS 0x160 + +#define QIB_7220_RcvPartitionKey_OFFS 0x168 + +#define QIB_7220_RcvQPMulticastPort_OFFS 0x170 +#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5 +#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF +#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0 +#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F + +#define QIB_7220_RcvPktLEDCnt_OFFS 0x178 +#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20 +#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF +#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0 +#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF + +#define QIB_7220_IBCDDRCtrl_OFFS 0x180 +#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30 +#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF +#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20 +#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF +#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B +#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F +#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A +#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12 +#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF +#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11 +#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10 +#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC +#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF +#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB +#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA +#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9 +#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8 +#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7 +#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5 +#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1 +#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1 +#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0 +#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1 + +#define QIB_7220_HRTBT_GUID_OFFS 0x188 + +#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0 +#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5 +#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F +#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0 +#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F + +#define QIB_7220_IBCDDRStatus_OFFS 0x1A8 +#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24 +#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1 +#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20 +#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF +#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E +#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3 +#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A +#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF +#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0 +#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF + +#define QIB_7220_JIntReload_OFFS 0x1B0 +#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10 +#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF +#define QIB_7220_JIntReload_J_reload_LSB 0x0 +#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF + +#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8 +#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A +#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF +#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11 +#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF +#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8 +#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF +#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3 +#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F +#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2 +#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1 +#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1 +#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1 +#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0 +#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1 + +#define QIB_7220_SendCtrl_OFFS 0x1C0 +#define QIB_7220_SendCtrl_Disarm_LSB 0x1F +#define QIB_7220_SendCtrl_Disarm_RMASK 0x1 +#define QIB_7220_SendCtrl_Reserved_LSB 0x1D +#define QIB_7220_SendCtrl_Reserved_RMASK 0x3 +#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18 +#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F +#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10 +#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF +#define QIB_7220_SendCtrl_Reserved1_LSB 0xD +#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7 +#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC +#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1 +#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB +#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1 +#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA +#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1 +#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9 +#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1 +#define QIB_7220_SendCtrl_Reserved2_LSB 0x5 +#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF +#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4 +#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1 +#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3 +#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1 +#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2 +#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1 +#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1 +#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1 +#define QIB_7220_SendCtrl_Abort_LSB 0x0 +#define QIB_7220_SendCtrl_Abort_RMASK 0x1 + +#define QIB_7220_SendBufBase_OFFS 0x1C8 +#define QIB_7220_SendBufBase_Reserved_LSB 0x35 +#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF +#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20 +#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF +#define QIB_7220_SendBufBase_Reserved1_LSB 0x15 +#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF +#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0 +#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF + +#define QIB_7220_SendBufSize_OFFS 0x1D0 +#define QIB_7220_SendBufSize_Reserved_LSB 0x2D +#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF +#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20 +#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF +#define QIB_7220_SendBufSize_Reserved1_LSB 0xC +#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF +#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0 +#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF + +#define QIB_7220_SendBufCnt_OFFS 0x1D8 +#define QIB_7220_SendBufCnt_Reserved_LSB 0x24 +#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF +#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20 +#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF +#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9 +#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF +#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0 +#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF + +#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0 +#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6 +#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF +#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0 +#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F + +#define QIB_7220_TxIntMemBase_OFFS 0x1E8 + +#define QIB_7220_TxIntMemSize_OFFS 0x1F0 + +#define QIB_7220_SendDmaBase_OFFS 0x1F8 +#define QIB_7220_SendDmaBase_Reserved_LSB 0x30 +#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF +#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0 +#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF + +#define QIB_7220_SendDmaLenGen_OFFS 0x200 +#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13 +#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF +#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10 +#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12 +#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7 +#define QIB_7220_SendDmaLenGen_Length_LSB 0x0 +#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF + +#define QIB_7220_SendDmaTail_OFFS 0x208 +#define QIB_7220_SendDmaTail_Reserved_LSB 0x10 +#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF +#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0 +#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF + +#define QIB_7220_SendDmaHead_OFFS 0x210 +#define QIB_7220_SendDmaHead_Reserved_LSB 0x30 +#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF +#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20 +#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF +#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10 +#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF +#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0 +#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF + +#define QIB_7220_SendDmaHeadAddr_OFFS 0x218 +#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30 +#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF +#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0 +#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF + +#define QIB_7220_SendDmaBufMask0_OFFS 0x220 +#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0 +#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0 + +#define QIB_7220_SendDmaStatus_OFFS 0x238 +#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F +#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1 +#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E +#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1 +#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D +#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1 +#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F +#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF +#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28 +#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F +#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20 +#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF +#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F +#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1 +#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E +#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1 +#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D +#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1 +#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C +#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1 +#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B +#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1 +#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A +#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1 +#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19 +#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1 +#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18 +#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1 +#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10 +#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF +#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0 +#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF + +#define QIB_7220_SendBufErr0_OFFS 0x240 +#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0 +#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0 + +#define QIB_7220_RcvHdrAddr0_OFFS 0x270 +#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2 +#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF +#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0 +#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3 + +#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300 +#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2 +#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF +#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0 +#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3 + +#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0 +#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8 +#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1 +#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1 +#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F +#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0 +#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1 + +#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D +#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C +#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B +#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17 +#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0 +#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF + +#define QIB_7220_XGXSCfg_OFFS 0x3D8 +#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F +#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1 +#define QIB_7220_XGXSCfg_Reserved_LSB 0x13 +#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF +#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9 +#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF +#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3 +#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F +#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2 +#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1 +#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1 +#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1 +#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0 +#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1 + +#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0 +#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D +#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF +#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C +#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A +#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3 +#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28 +#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3 +#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25 +#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7 +#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24 +#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23 +#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22 +#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21 +#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20 +#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1 +#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12 +#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF +#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD +#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F +#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8 +#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F +#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1 +#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F +#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0 +#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1 + +#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400 +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8 +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1 +#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3 +#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1 +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3 +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0 +#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1 + +#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D +#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C +#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17 +#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0 +#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF + +#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500 +#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4 +#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F +#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0 +#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF + +#define QIB_7220_LBIntCnt_OFFS 0x13000 + +#define QIB_7220_LBFlowStallCnt_OFFS 0x13008 + +#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010 + +#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018 + +#define QIB_7220_TxDataPktCnt_OFFS 0x13020 + +#define QIB_7220_TxFlowPktCnt_OFFS 0x13028 + +#define QIB_7220_TxDwordCnt_OFFS 0x13030 + +#define QIB_7220_TxLenErrCnt_OFFS 0x13038 + +#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040 + +#define QIB_7220_TxUnderrunCnt_OFFS 0x13048 + +#define QIB_7220_TxFlowStallCnt_OFFS 0x13050 + +#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058 + +#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060 + +#define QIB_7220_RxDataPktCnt_OFFS 0x13068 + +#define QIB_7220_RxFlowPktCnt_OFFS 0x13070 + +#define QIB_7220_RxDwordCnt_OFFS 0x13078 + +#define QIB_7220_RxLenErrCnt_OFFS 0x13080 + +#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088 + +#define QIB_7220_RxICRCErrCnt_OFFS 0x13090 + +#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098 + +#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0 + +#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8 + +#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0 + +#define QIB_7220_RxEBPCnt_OFFS 0x130B8 + +#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0 + +#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8 + +#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0 + +#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8 + +#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0 + +#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8 + +#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170 + +#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178 + +#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180 + +#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188 + +#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190 + +#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198 + +#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0 + +#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8 + +#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0 + +#define QIB_7220_RxVlErrCnt_OFFS 0x131B8 + +#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0 + +#define QIB_7220_CNT_0131C8_OFFS 0x131C8 + +#define QIB_7220_PSStat_OFFS 0x13200 + +#define QIB_7220_PSStart_OFFS 0x13208 + +#define QIB_7220_PSInterval_OFFS 0x13210 + +#define QIB_7220_PSRcvDataCount_OFFS 0x13218 + +#define QIB_7220_PSRcvPktsCount_OFFS 0x13220 + +#define QIB_7220_PSXmitDataCount_OFFS 0x13228 + +#define QIB_7220_PSXmitPktsCount_OFFS 0x13230 + +#define QIB_7220_PSXmitWaitCount_OFFS 0x13238 + +#define QIB_7220_CNT_013240_OFFS 0x13240 + +#define QIB_7220_RcvEgrArray_OFFS 0x14000 + +#define QIB_7220_MEM_038000_OFFS 0x38000 + +#define QIB_7220_RcvTIDArray0_OFFS 0x53000 + +#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000 + +#define QIB_7220_MEM_064480_OFFS 0x64480 + +#define QIB_7220_SendPIOpbcCache_OFFS 0x64800 + +#define QIB_7220_MEM_064C80_OFFS 0x64C80 + +#define QIB_7220_PreLaunchFIFO_OFFS 0x65000 + +#define QIB_7220_MEM_065080_OFFS 0x65080 + +#define QIB_7220_ScoreBoard_OFFS 0x65400 + +#define QIB_7220_MEM_065440_OFFS 0x65440 + +#define QIB_7220_DescriptorFIFO_OFFS 0x65800 + +#define QIB_7220_MEM_065880_OFFS 0x65880 + +#define QIB_7220_RcvBuf1_OFFS 0x72000 + +#define QIB_7220_MEM_074800_OFFS 0x74800 + +#define QIB_7220_RcvBuf2_OFFS 0x75000 + +#define QIB_7220_MEM_076400_OFFS 0x76400 + +#define QIB_7220_RcvFlags_OFFS 0x77000 + +#define QIB_7220_MEM_078400_OFFS 0x78400 + +#define QIB_7220_RcvLookupBuf1_OFFS 0x79000 + +#define QIB_7220_MEM_07A400_OFFS 0x7A400 + +#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000 + +#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800 + +#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000 + +#define QIB_7220_MEM_07D400_OFFS 0x7D400 + +#define QIB_7220_PCIERcvBuf_OFFS 0x80000 + +#define QIB_7220_PCIERetryBuf_OFFS 0x84000 + +#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000 + +#define QIB_7220_PCIECplBuf_OFFS 0x90000 + +#define QIB_7220_IBSerDesMappTable_OFFS 0x94000 + +#define QIB_7220_MEM_095000_OFFS 0x95000 + +#define QIB_7220_SendBuf0_MA_OFFS 0x100000 + +#define QIB_7220_MEM_1A0000_OFFS 0x1A0000 diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h new file mode 100644 index 000000000000..a97440ba924c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7322_regs.h @@ -0,0 +1,3163 @@ +/* + * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* This file is mechanically generated from RTL. Any hand-edits will be lost! */ + +#define QIB_7322_Revision_OFFS 0x0 +#define QIB_7322_Revision_DEF 0x0000000002010601 +#define QIB_7322_Revision_R_Simulator_LSB 0x3F +#define QIB_7322_Revision_R_Simulator_MSB 0x3F +#define QIB_7322_Revision_R_Simulator_RMASK 0x1 +#define QIB_7322_Revision_R_Emulation_LSB 0x3E +#define QIB_7322_Revision_R_Emulation_MSB 0x3E +#define QIB_7322_Revision_R_Emulation_RMASK 0x1 +#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28 +#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D +#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF +#define QIB_7322_Revision_BoardID_LSB 0x20 +#define QIB_7322_Revision_BoardID_MSB 0x27 +#define QIB_7322_Revision_BoardID_RMASK 0xFF +#define QIB_7322_Revision_R_SW_LSB 0x18 +#define QIB_7322_Revision_R_SW_MSB 0x1F +#define QIB_7322_Revision_R_SW_RMASK 0xFF +#define QIB_7322_Revision_R_Arch_LSB 0x10 +#define QIB_7322_Revision_R_Arch_MSB 0x17 +#define QIB_7322_Revision_R_Arch_RMASK 0xFF +#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8 +#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF +#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF +#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0 +#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7 +#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF + +#define QIB_7322_Control_OFFS 0x8 +#define QIB_7322_Control_DEF 0x0000000000000000 +#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6 +#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6 +#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1 +#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5 +#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5 +#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1 +#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4 +#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4 +#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1 +#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3 +#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3 +#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1 +#define QIB_7322_Control_FreezeMode_LSB 0x1 +#define QIB_7322_Control_FreezeMode_MSB 0x1 +#define QIB_7322_Control_FreezeMode_RMASK 0x1 +#define QIB_7322_Control_SyncReset_LSB 0x0 +#define QIB_7322_Control_SyncReset_MSB 0x0 +#define QIB_7322_Control_SyncReset_RMASK 0x1 + +#define QIB_7322_PageAlign_OFFS 0x10 +#define QIB_7322_PageAlign_DEF 0x0000000000001000 + +#define QIB_7322_ContextCnt_OFFS 0x18 +#define QIB_7322_ContextCnt_DEF 0x0000000000000012 + +#define QIB_7322_Scratch_OFFS 0x20 +#define QIB_7322_Scratch_DEF 0x0000000000000000 + +#define QIB_7322_CntrRegBase_OFFS 0x28 +#define QIB_7322_CntrRegBase_DEF 0x0000000000011000 + +#define QIB_7322_SendRegBase_OFFS 0x30 +#define QIB_7322_SendRegBase_DEF 0x0000000000003000 + +#define QIB_7322_UserRegBase_OFFS 0x38 +#define QIB_7322_UserRegBase_DEF 0x0000000000200000 + +#define QIB_7322_IntMask_OFFS 0x68 +#define QIB_7322_IntMask_DEF 0x0000000000000000 +#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F +#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F +#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1 +#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E +#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E +#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1 +#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D +#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D +#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1 +#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C +#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C +#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1 +#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B +#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B +#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1 +#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A +#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A +#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38 +#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31 +#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31 +#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30 +#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30 +#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F +#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F +#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E +#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E +#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D +#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D +#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C +#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C +#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B +#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B +#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A +#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A +#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29 +#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29 +#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28 +#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28 +#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27 +#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27 +#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26 +#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26 +#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25 +#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25 +#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24 +#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24 +#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23 +#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23 +#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22 +#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22 +#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21 +#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21 +#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20 +#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20 +#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1 +#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F +#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F +#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1 +#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E +#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E +#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1 +#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D +#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D +#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1 +#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C +#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C +#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1 +#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19 +#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19 +#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1 +#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18 +#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18 +#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1 +#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17 +#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17 +#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11 +#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11 +#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10 +#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10 +#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF +#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF +#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE +#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE +#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD +#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD +#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC +#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC +#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB +#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB +#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA +#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA +#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9 +#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9 +#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8 +#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8 +#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7 +#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7 +#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6 +#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6 +#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5 +#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5 +#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4 +#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4 +#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3 +#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3 +#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2 +#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2 +#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1 +#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1 +#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1 +#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0 +#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0 +#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1 + +#define QIB_7322_IntStatus_OFFS 0x70 +#define QIB_7322_IntStatus_DEF 0x0000000000000000 +#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F +#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F +#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E +#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E +#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D +#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D +#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C +#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C +#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B +#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B +#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A +#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A +#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39 +#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39 +#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1 +#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38 +#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38 +#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31 +#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31 +#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30 +#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30 +#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F +#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F +#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E +#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E +#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D +#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D +#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C +#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C +#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B +#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B +#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A +#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A +#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29 +#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29 +#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28 +#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28 +#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27 +#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27 +#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26 +#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26 +#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25 +#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25 +#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24 +#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24 +#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23 +#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23 +#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22 +#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22 +#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21 +#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21 +#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1 +#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20 +#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20 +#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1 +#define QIB_7322_IntStatus_Err_1_LSB 0x1F +#define QIB_7322_IntStatus_Err_1_MSB 0x1F +#define QIB_7322_IntStatus_Err_1_RMASK 0x1 +#define QIB_7322_IntStatus_Err_0_LSB 0x1E +#define QIB_7322_IntStatus_Err_0_MSB 0x1E +#define QIB_7322_IntStatus_Err_0_RMASK 0x1 +#define QIB_7322_IntStatus_Err_LSB 0x1D +#define QIB_7322_IntStatus_Err_MSB 0x1D +#define QIB_7322_IntStatus_Err_RMASK 0x1 +#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C +#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C +#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1 +#define QIB_7322_IntStatus_SendDone_1_LSB 0x19 +#define QIB_7322_IntStatus_SendDone_1_MSB 0x19 +#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1 +#define QIB_7322_IntStatus_SendDone_0_LSB 0x18 +#define QIB_7322_IntStatus_SendDone_0_MSB 0x18 +#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1 +#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17 +#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17 +#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11 +#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11 +#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10 +#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10 +#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF +#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF +#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE +#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE +#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD +#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD +#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC +#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC +#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB +#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB +#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA +#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA +#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9 +#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9 +#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8 +#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8 +#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7 +#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7 +#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6 +#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6 +#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5 +#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5 +#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4 +#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4 +#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3 +#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3 +#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2 +#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2 +#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1 +#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1 +#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1 +#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0 +#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0 +#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1 + +#define QIB_7322_IntClear_OFFS 0x78 +#define QIB_7322_IntClear_DEF 0x0000000000000000 +#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F +#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F +#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1 +#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E +#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E +#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1 +#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D +#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D +#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1 +#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C +#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C +#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1 +#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B +#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B +#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1 +#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A +#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A +#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38 +#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31 +#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31 +#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30 +#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30 +#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F +#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F +#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E +#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E +#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D +#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D +#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C +#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C +#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B +#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B +#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A +#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A +#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29 +#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29 +#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28 +#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28 +#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27 +#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27 +#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26 +#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26 +#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25 +#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25 +#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24 +#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24 +#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23 +#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23 +#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22 +#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22 +#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21 +#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21 +#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20 +#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20 +#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1 +#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F +#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F +#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1 +#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E +#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E +#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1 +#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D +#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D +#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1 +#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C +#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C +#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1 +#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19 +#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19 +#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1 +#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18 +#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18 +#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1 +#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17 +#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17 +#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11 +#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11 +#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10 +#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10 +#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF +#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF +#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE +#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE +#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD +#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD +#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC +#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC +#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB +#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB +#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA +#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA +#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9 +#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9 +#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8 +#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8 +#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7 +#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7 +#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6 +#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6 +#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5 +#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5 +#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4 +#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4 +#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3 +#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3 +#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2 +#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2 +#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1 +#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1 +#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1 +#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0 +#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0 +#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1 + +#define QIB_7322_ErrMask_OFFS 0x80 +#define QIB_7322_ErrMask_DEF 0x0000000000000000 +#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F +#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F +#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1 +#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E +#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E +#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D +#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D +#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38 +#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38 +#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37 +#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37 +#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35 +#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35 +#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1 +#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34 +#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34 +#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24 +#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24 +#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23 +#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23 +#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B +#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B +#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A +#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A +#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19 +#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19 +#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD +#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD +#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC +#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC +#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1 + +#define QIB_7322_ErrStatus_OFFS 0x88 +#define QIB_7322_ErrStatus_DEF 0x0000000000000000 +#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F +#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F +#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1 +#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E +#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E +#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1 +#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D +#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D +#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38 +#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38 +#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1 +#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37 +#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37 +#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1 +#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35 +#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35 +#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1 +#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34 +#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34 +#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24 +#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24 +#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23 +#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23 +#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B +#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B +#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A +#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A +#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1 +#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19 +#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19 +#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1 +#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD +#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD +#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1 +#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC +#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC +#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1 + +#define QIB_7322_ErrClear_OFFS 0x90 +#define QIB_7322_ErrClear_DEF 0x0000000000000000 +#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F +#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F +#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1 +#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E +#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E +#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D +#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D +#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38 +#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38 +#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37 +#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37 +#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35 +#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35 +#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34 +#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34 +#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24 +#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24 +#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1 +#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23 +#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23 +#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B +#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B +#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A +#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A +#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19 +#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19 +#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD +#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD +#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC +#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC +#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1 + +#define QIB_7322_HwErrMask_OFFS 0x98 +#define QIB_7322_HwErrMask_DEF 0x0000000000000000 +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1 +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E +#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1 +#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37 +#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37 +#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1 +#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 +#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36 +#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 +#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35 +#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35 +#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1 +#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30 +#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30 +#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1 +#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22 +#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22 +#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1 +#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F +#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21 +#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 +#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E +#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E +#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 +#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D +#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D +#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1 +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1 +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B +#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1 +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 +#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE +#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE +#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1 +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD +#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 +#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC +#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC +#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1 +#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB +#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB +#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 + +#define QIB_7322_HwErrStatus_OFFS 0xA0 +#define QIB_7322_HwErrStatus_DEF 0x0000000000000000 +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1 +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E +#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1 +#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37 +#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37 +#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1 +#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36 +#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36 +#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 +#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35 +#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35 +#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1 +#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30 +#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30 +#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1 +#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22 +#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22 +#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1 +#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F +#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21 +#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7 +#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E +#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E +#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1 +#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D +#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D +#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1 +#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C +#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C +#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1 +#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B +#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B +#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1 +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 +#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE +#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE +#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1 +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD +#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 +#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC +#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC +#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1 +#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB +#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB +#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 + +#define QIB_7322_HwErrClear_OFFS 0xA8 +#define QIB_7322_HwErrClear_DEF 0x0000000000000000 +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1 +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E +#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1 +#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37 +#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37 +#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1 +#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 +#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36 +#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 +#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35 +#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35 +#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1 +#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30 +#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30 +#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1 +#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22 +#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22 +#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1 +#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F +#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21 +#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7 +#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E +#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E +#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 +#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D +#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D +#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1 +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1 +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B +#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1 +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1 +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD +#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC +#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1 +#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB +#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB +#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 + +#define QIB_7322_HwDiagCtrl_OFFS 0xB0 +#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000 +#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F +#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F +#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D +#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D +#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C +#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C +#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F +#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22 +#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD +#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC +#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1 + +#define QIB_7322_EXTStatus_OFFS 0xC0 +#define QIB_7322_EXTStatus_DEF 0x000000000000X000 +#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30 +#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F +#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF +#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF +#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF +#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1 +#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE +#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE +#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1 + +#define QIB_7322_EXTCtrl_OFFS 0xC8 +#define QIB_7322_EXTCtrl_DEF 0x0000000000000000 +#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30 +#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F +#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF +#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20 +#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F +#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF +#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3 +#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3 +#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1 +#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2 +#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2 +#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1 +#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1 +#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1 +#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1 +#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0 +#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0 +#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1 + +#define QIB_7322_GPIOOut_OFFS 0xE0 +#define QIB_7322_GPIOOut_DEF 0x0000000000000000 + +#define QIB_7322_GPIOMask_OFFS 0xE8 +#define QIB_7322_GPIOMask_DEF 0x0000000000000000 + +#define QIB_7322_GPIOStatus_OFFS 0xF0 +#define QIB_7322_GPIOStatus_DEF 0x0000000000000000 + +#define QIB_7322_GPIOClear_OFFS 0xF8 +#define QIB_7322_GPIOClear_DEF 0x0000000000000000 + +#define QIB_7322_RcvCtrl_OFFS 0x100 +#define QIB_7322_RcvCtrl_DEF 0x0000000000000000 +#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30 +#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F +#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF +#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F +#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F +#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1 +#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C +#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E +#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7 +#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B +#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B +#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1 +#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29 +#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A +#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3 +#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14 +#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25 +#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF +#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0 +#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11 +#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF + +#define QIB_7322_RcvHdrSize_OFFS 0x110 +#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000 + +#define QIB_7322_RcvHdrCnt_OFFS 0x118 +#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000 + +#define QIB_7322_RcvHdrEntSize_OFFS 0x120 +#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000 + +#define QIB_7322_RcvTIDBase_OFFS 0x128 +#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000 + +#define QIB_7322_RcvTIDCnt_OFFS 0x130 +#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200 + +#define QIB_7322_RcvEgrBase_OFFS 0x138 +#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000 + +#define QIB_7322_RcvEgrCnt_OFFS 0x140 +#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000 + +#define QIB_7322_RcvBufBase_OFFS 0x148 +#define QIB_7322_RcvBufBase_DEF 0x0000000000080000 + +#define QIB_7322_RcvBufSize_OFFS 0x150 +#define QIB_7322_RcvBufSize_DEF 0x0000000000005000 + +#define QIB_7322_RxIntMemBase_OFFS 0x158 +#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000 + +#define QIB_7322_RxIntMemSize_OFFS 0x160 +#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000 + +#define QIB_7322_feature_mask_OFFS 0x190 +#define QIB_7322_feature_mask_DEF 0x00000000000000XX + +#define QIB_7322_active_feature_mask_OFFS 0x198 +#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX +#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5 +#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5 +#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1 +#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4 +#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4 +#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1 +#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3 +#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3 +#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1 +#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2 +#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2 +#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1 +#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1 +#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1 +#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1 +#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0 +#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0 +#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1 + +#define QIB_7322_SendCtrl_OFFS 0x1C0 +#define QIB_7322_SendCtrl_DEF 0x0000000000000000 +#define QIB_7322_SendCtrl_Disarm_LSB 0x1F +#define QIB_7322_SendCtrl_Disarm_MSB 0x1F +#define QIB_7322_SendCtrl_Disarm_RMASK 0x1 +#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D +#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D +#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1 +#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18 +#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C +#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F +#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10 +#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17 +#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF +#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4 +#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4 +#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1 +#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2 +#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2 +#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1 +#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1 +#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1 +#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1 + +#define QIB_7322_SendBufBase_OFFS 0x1C8 +#define QIB_7322_SendBufBase_DEF 0x0018000000100000 +#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20 +#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34 +#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF +#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0 +#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14 +#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF + +#define QIB_7322_SendBufSize_OFFS 0x1D0 +#define QIB_7322_SendBufSize_DEF 0x0000108000000880 +#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20 +#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C +#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF +#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0 +#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB +#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF + +#define QIB_7322_SendBufCnt_OFFS 0x1D8 +#define QIB_7322_SendBufCnt_DEF 0x0000002000000080 +#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20 +#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25 +#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F +#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0 +#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8 +#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF + +#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0 +#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000 +#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6 +#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27 +#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF + +#define QIB_7322_SendBufErr0_OFFS 0x240 +#define QIB_7322_SendBufErr0_DEF 0x0000000000000000 +#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0 +#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F +#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0 + +#define QIB_7322_AvailUpdCount_OFFS 0x268 +#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000 +#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0 +#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4 +#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F + +#define QIB_7322_RcvHdrAddr0_OFFS 0x280 +#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000 +#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2 +#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27 +#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF + +#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340 +#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000 +#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2 +#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27 +#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF + +#define QIB_7322_ahb_access_ctrl_OFFS 0x460 +#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000 +#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1 +#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2 +#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3 +#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0 +#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0 +#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1 + +#define QIB_7322_ahb_transaction_reg_OFFS 0x468 +#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000 +#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20 +#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F +#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF +#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F +#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F +#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1 +#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E +#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E +#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1 +#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B +#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B +#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1 +#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10 +#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A +#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF + +#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470 +#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001 +#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA +#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA +#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1 +#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5 +#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9 +#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F +#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3 +#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4 +#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1 +#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1 +#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0 +#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0 +#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1 + +#define QIB_7322_SendCheckMask0_OFFS 0x4C0 +#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000 +#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0 +#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F +#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0 + +#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0 +#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000 +#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0 +#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F +#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0 + +#define QIB_7322_SendIBPacketMask0_OFFS 0x500 +#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000 +#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0 +#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F +#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0 + +#define QIB_7322_IntRedirect0_OFFS 0x540 +#define QIB_7322_IntRedirect0_DEF 0x0000000000000000 +#define QIB_7322_IntRedirect0_vec11_LSB 0x37 +#define QIB_7322_IntRedirect0_vec11_MSB 0x3B +#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec10_LSB 0x32 +#define QIB_7322_IntRedirect0_vec10_MSB 0x36 +#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec9_LSB 0x2D +#define QIB_7322_IntRedirect0_vec9_MSB 0x31 +#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec8_LSB 0x28 +#define QIB_7322_IntRedirect0_vec8_MSB 0x2C +#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec7_LSB 0x23 +#define QIB_7322_IntRedirect0_vec7_MSB 0x27 +#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec6_LSB 0x1E +#define QIB_7322_IntRedirect0_vec6_MSB 0x22 +#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec5_LSB 0x19 +#define QIB_7322_IntRedirect0_vec5_MSB 0x1D +#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec4_LSB 0x14 +#define QIB_7322_IntRedirect0_vec4_MSB 0x18 +#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec3_LSB 0xF +#define QIB_7322_IntRedirect0_vec3_MSB 0x13 +#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec2_LSB 0xA +#define QIB_7322_IntRedirect0_vec2_MSB 0xE +#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec1_LSB 0x5 +#define QIB_7322_IntRedirect0_vec1_MSB 0x9 +#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F +#define QIB_7322_IntRedirect0_vec0_LSB 0x0 +#define QIB_7322_IntRedirect0_vec0_MSB 0x4 +#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F + +#define QIB_7322_Int_Granted_OFFS 0x570 +#define QIB_7322_Int_Granted_DEF 0x0000000000000000 + +#define QIB_7322_vec_clr_without_int_OFFS 0x578 +#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000 + +#define QIB_7322_DCACtrlA_OFFS 0x580 +#define QIB_7322_DCACtrlA_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4 +#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4 +#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1 +#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3 +#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3 +#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1 +#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2 +#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2 +#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1 +#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1 +#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1 +#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1 +#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0 +#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0 +#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1 + +#define QIB_7322_DCACtrlB_OFFS 0x588 +#define QIB_7322_DCACtrlB_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36 +#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B +#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E +#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35 +#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28 +#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D +#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20 +#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27 +#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16 +#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B +#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE +#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15 +#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8 +#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD +#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0 +#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7 +#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF + +#define QIB_7322_DCACtrlC_OFFS 0x590 +#define QIB_7322_DCACtrlC_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36 +#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B +#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E +#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35 +#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28 +#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D +#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20 +#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27 +#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16 +#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B +#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE +#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15 +#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8 +#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD +#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0 +#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7 +#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF + +#define QIB_7322_DCACtrlD_OFFS 0x598 +#define QIB_7322_DCACtrlD_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36 +#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B +#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E +#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35 +#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28 +#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D +#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20 +#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27 +#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16 +#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B +#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE +#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15 +#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8 +#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD +#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0 +#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7 +#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF + +#define QIB_7322_DCACtrlE_OFFS 0x5A0 +#define QIB_7322_DCACtrlE_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36 +#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B +#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E +#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35 +#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28 +#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D +#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20 +#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27 +#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16 +#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B +#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE +#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15 +#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8 +#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD +#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0 +#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7 +#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF + +#define QIB_7322_DCACtrlF_OFFS 0x5A8 +#define QIB_7322_DCACtrlF_DEF 0x0000000000000000 +#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28 +#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F +#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20 +#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27 +#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16 +#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B +#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE +#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15 +#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF +#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8 +#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD +#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F +#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0 +#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7 +#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF + +#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00 +#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000 +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10 +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0 +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF +#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF + +#define QIB_7322_CntrRegBase_0_OFFS 0x1028 +#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000 + +#define QIB_7322_ErrMask_0_OFFS 0x1080 +#define QIB_7322_ErrMask_0_DEF 0x0000000000000000 +#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A +#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A +#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39 +#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39 +#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36 +#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36 +#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31 +#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31 +#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30 +#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30 +#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F +#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F +#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E +#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E +#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D +#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D +#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C +#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C +#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B +#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B +#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A +#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A +#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29 +#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29 +#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28 +#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28 +#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27 +#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27 +#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26 +#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26 +#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25 +#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25 +#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24 +#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24 +#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22 +#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22 +#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21 +#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21 +#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20 +#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20 +#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F +#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F +#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E +#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E +#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D +#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D +#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11 +#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11 +#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10 +#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10 +#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF +#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF +#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE +#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE +#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB +#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB +#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA +#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA +#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9 +#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9 +#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8 +#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8 +#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7 +#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7 +#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6 +#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6 +#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5 +#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5 +#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4 +#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4 +#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3 +#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3 +#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2 +#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2 +#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1 +#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1 +#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1 +#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0 +#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0 +#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1 + +#define QIB_7322_ErrStatus_0_OFFS 0x1088 +#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000 +#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A +#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A +#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39 +#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39 +#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36 +#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36 +#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31 +#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31 +#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30 +#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30 +#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F +#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F +#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E +#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E +#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D +#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D +#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C +#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C +#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B +#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B +#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A +#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A +#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29 +#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29 +#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28 +#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28 +#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27 +#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27 +#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26 +#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26 +#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25 +#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25 +#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24 +#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24 +#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22 +#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22 +#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21 +#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21 +#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20 +#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20 +#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F +#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F +#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E +#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E +#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D +#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D +#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11 +#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11 +#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10 +#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10 +#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF +#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF +#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE +#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE +#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB +#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB +#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA +#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA +#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9 +#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9 +#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8 +#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8 +#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7 +#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7 +#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6 +#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6 +#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5 +#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5 +#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4 +#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4 +#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3 +#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3 +#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2 +#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2 +#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1 +#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1 +#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1 +#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0 +#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0 +#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1 + +#define QIB_7322_ErrClear_0_OFFS 0x1090 +#define QIB_7322_ErrClear_0_DEF 0x0000000000000000 +#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A +#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A +#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39 +#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39 +#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36 +#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36 +#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31 +#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31 +#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30 +#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30 +#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F +#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F +#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E +#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E +#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D +#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D +#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C +#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C +#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B +#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B +#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A +#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A +#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29 +#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29 +#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28 +#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28 +#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27 +#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27 +#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26 +#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26 +#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25 +#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25 +#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24 +#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24 +#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22 +#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22 +#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21 +#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21 +#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20 +#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20 +#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F +#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F +#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E +#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E +#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D +#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D +#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11 +#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11 +#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10 +#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10 +#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF +#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF +#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE +#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE +#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB +#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB +#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA +#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA +#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9 +#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9 +#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8 +#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8 +#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7 +#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7 +#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6 +#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6 +#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5 +#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5 +#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4 +#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4 +#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3 +#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3 +#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2 +#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2 +#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1 +#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1 +#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1 +#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0 +#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0 +#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1 + +#define QIB_7322_TXEStatus_0_OFFS 0x10B8 +#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF +#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F +#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F +#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1 +#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E +#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E +#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0 +#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1 + +#define QIB_7322_RcvCtrl_0_OFFS 0x1100 +#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000 +#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A +#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A +#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1 +#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29 +#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29 +#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1 +#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28 +#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28 +#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1 +#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27 +#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27 +#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1 +#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2 +#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11 +#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF +#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0 +#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0 +#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1 + +#define QIB_7322_RcvBTHQP_0_OFFS 0x1108 +#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000 +#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0 +#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17 +#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF + +#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110 +#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4 +#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F + +#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118 +#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4 +#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F + +#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120 +#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4 +#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F + +#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128 +#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4 +#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F + +#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130 +#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4 +#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F + +#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138 +#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5 +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9 +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0 +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4 +#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F + +#define QIB_7322_PSStat_0_OFFS 0x1140 +#define QIB_7322_PSStat_0_DEF 0x0000000000000000 + +#define QIB_7322_PSStart_0_OFFS 0x1148 +#define QIB_7322_PSStart_0_DEF 0x0000000000000000 + +#define QIB_7322_PSInterval_0_OFFS 0x1150 +#define QIB_7322_PSInterval_0_DEF 0x0000000000000000 + +#define QIB_7322_RcvStatus_0_OFFS 0x1160 +#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000 +#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1 +#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5 +#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F +#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0 +#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0 +#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1 + +#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168 +#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000 + +#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170 +#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000 +#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0 +#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4 +#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F + +#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178 +#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000 +#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20 +#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F +#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF +#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0 +#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F +#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF + +#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180 +#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0 +#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF +#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF + +#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188 +#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0 +#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF +#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF + +#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190 +#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0 +#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF +#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF + +#define QIB_7322_SendCtrl_0_OFFS 0x11C0 +#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000 +#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF +#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF +#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1 +#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE +#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE +#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1 +#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD +#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD +#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC +#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC +#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB +#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB +#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA +#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA +#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9 +#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9 +#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8 +#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8 +#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1 +#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7 +#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7 +#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1 +#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3 +#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3 +#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1 +#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1 +#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1 +#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1 +#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0 +#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0 +#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1 + +#define QIB_7322_SendDmaBase_0_OFFS 0x11F8 +#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0 +#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F +#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF + +#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200 +#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10 +#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12 +#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7 +#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0 +#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF +#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF + +#define QIB_7322_SendDmaTail_0_OFFS 0x1208 +#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0 +#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF +#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF + +#define QIB_7322_SendDmaHead_0_OFFS 0x1210 +#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20 +#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F +#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF +#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0 +#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF +#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF + +#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218 +#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0 +#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F +#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF + +#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220 +#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0 +#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F +#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0 + +#define QIB_7322_SendDmaStatus_0_OFFS 0x1238 +#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000 +#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F +#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F +#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E +#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E +#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D +#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D +#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F +#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C +#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF +#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28 +#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E +#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F +#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20 +#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27 +#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF +#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F +#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F +#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E +#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E +#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D +#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D +#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C +#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C +#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B +#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B +#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A +#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A +#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19 +#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19 +#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18 +#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18 +#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1 +#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10 +#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17 +#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF +#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0 +#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF +#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF + +#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258 +#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0 +#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3 +#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF + +#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260 +#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000 +#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6 +#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6 +#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5 +#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5 +#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4 +#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4 +#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3 +#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3 +#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2 +#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2 +#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1 +#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1 +#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1 +#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0 +#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0 +#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1 + +#define QIB_7322_RxCreditVL0_0_OFFS 0x1280 +#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000 +#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10 +#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B +#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF +#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0 +#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB +#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF + +#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480 +#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000 +#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0 +#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F +#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0 + +#define QIB_7322_SendCheckControl_0_OFFS 0x14A8 +#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000 +#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4 +#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4 +#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1 +#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3 +#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3 +#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1 +#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2 +#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2 +#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1 +#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1 +#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1 +#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1 +#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0 +#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0 +#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1 + +#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0 +#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000 +#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0 +#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF +#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF + +#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8 +#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000 +#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0 +#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF +#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF + +#define QIB_7322_IBCStatusA_0_OFFS 0x1540 +#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20 +#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E +#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E +#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D +#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D +#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF +#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF +#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE +#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE +#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD +#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD +#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC +#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC +#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA +#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA +#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9 +#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9 +#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8 +#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8 +#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1 +#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5 +#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7 +#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7 +#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0 +#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4 +#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F + +#define QIB_7322_IBCStatusB_0_OFFS 0x1548 +#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25 +#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1 +#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24 +#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24 +#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1 +#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20 +#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23 +#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF +#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E +#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F +#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3 +#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A +#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D +#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF +#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0 +#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19 +#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF + +#define QIB_7322_IBCCtrlA_0_OFFS 0x1560 +#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000 +#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F +#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F +#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1 +#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E +#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E +#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1 +#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D +#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D +#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1 +#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C +#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C +#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1 +#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30 +#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32 +#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7 +#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24 +#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27 +#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF +#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20 +#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23 +#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF +#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15 +#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F +#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF +#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13 +#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14 +#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3 +#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10 +#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12 +#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7 +#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8 +#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF +#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF +#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0 +#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7 +#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF + +#define QIB_7322_IBCCtrlB_0_OFFS 0x1568 +#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF +#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30 +#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F +#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF +#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20 +#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F +#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF +#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B +#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B +#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A +#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A +#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12 +#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19 +#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF +#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11 +#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11 +#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10 +#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10 +#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC +#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF +#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF +#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB +#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB +#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA +#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA +#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9 +#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9 +#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8 +#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8 +#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7 +#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7 +#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5 +#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6 +#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1 +#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1 +#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0 +#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0 +#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1 + +#define QIB_7322_IBCCtrlC_0_OFFS 0x1570 +#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301 +#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5 +#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9 +#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F +#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0 +#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4 +#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F + +#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588 +#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000 + +#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590 +#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3 +#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4 +#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4 +#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1 +#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2 +#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3 +#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0 +#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1 + +#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598 +#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0 +#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1 + +#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8 +#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20 +#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1 +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11 +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19 +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8 +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10 +#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0 +#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1 + +#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0 +#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000 + +#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8 +#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007 +#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9 +#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12 +#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF +#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2 +#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2 +#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1 +#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1 +#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1 +#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1 +#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0 +#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0 +#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1 + +#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0 +#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18 +#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14 +#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17 +#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF +#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10 +#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13 +#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF +#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF +#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF +#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD +#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD +#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC +#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC +#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB +#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB +#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA +#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA +#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9 +#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9 +#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8 +#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8 +#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1 +#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0 +#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6 +#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F + +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2 +#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7 + +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7 +#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF + +#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670 +#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000 + +#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0 +#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000 +#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0 +#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7 +#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF + +#define QIB_7322_LowPriority0_0_OFFS 0x1C00 +#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000 +#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10 +#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12 +#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7 +#define QIB_7322_LowPriority0_0_Weight_LSB 0x0 +#define QIB_7322_LowPriority0_0_Weight_MSB 0x7 +#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF + +#define QIB_7322_HighPriority0_0_OFFS 0x1E00 +#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000 +#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10 +#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12 +#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7 +#define QIB_7322_HighPriority0_0_Weight_LSB 0x0 +#define QIB_7322_HighPriority0_0_Weight_MSB 0x7 +#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF + +#define QIB_7322_CntrRegBase_1_OFFS 0x2028 +#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000 + +#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170 + +#define QIB_7322_SendCtrl_1_OFFS 0x21C0 + +#define QIB_7322_SendBufAvail0_OFFS 0x3000 +#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000 +#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0 +#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F +#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0 + +#define QIB_7322_MsixTable_OFFS 0x8000 +#define QIB_7322_MsixTable_DEF 0x0000000000000000 + +#define QIB_7322_MsixPba_OFFS 0x9000 +#define QIB_7322_MsixPba_DEF 0x0000000000000000 + +#define QIB_7322_LAMemory_OFFS 0xA000 +#define QIB_7322_LAMemory_DEF 0x0000000000000000 + +#define QIB_7322_LBIntCnt_OFFS 0x11000 +#define QIB_7322_LBIntCnt_DEF 0x0000000000000000 + +#define QIB_7322_LBFlowStallCnt_OFFS 0x11008 +#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000 + +#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0 +#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000 + +#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8 +#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000 + +#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8 +#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000 + +#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0 +#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000 + +#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0 +#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000 + +#define QIB_7322_LBIntCnt_0_OFFS 0x12000 +#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008 +#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000 + +#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010 +#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018 +#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020 +#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028 +#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxDwordCnt_0_OFFS 0x12030 +#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038 +#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040 +#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048 +#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050 +#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058 +#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060 +#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068 +#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070 +#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxDwordCnt_0_OFFS 0x12078 +#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080 +#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088 +#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090 +#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098 +#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0 +#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8 +#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0 +#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8 +#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0 +#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8 +#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0 +#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0 +#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180 +#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188 +#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190 +#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198 +#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8 +#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0 +#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8 +#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0 +#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8 +#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0 +#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8 +#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8 +#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000 + +#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218 +#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000 + +#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220 +#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228 +#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230 +#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238 +#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000 + +#define QIB_7322_LBIntCnt_1_OFFS 0x13000 +#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008 +#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000 + +#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010 +#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018 +#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020 +#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028 +#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxDwordCnt_1_OFFS 0x13030 +#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038 +#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040 +#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048 +#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050 +#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058 +#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060 +#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068 +#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070 +#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxDwordCnt_1_OFFS 0x13078 +#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080 +#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088 +#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090 +#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098 +#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0 +#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8 +#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0 +#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8 +#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0 +#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8 +#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0 +#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0 +#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180 +#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188 +#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190 +#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198 +#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8 +#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0 +#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8 +#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0 +#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8 +#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0 +#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8 +#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8 +#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000 + +#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218 +#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000 + +#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220 +#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228 +#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230 +#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000 + +#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238 +#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000 + +#define QIB_7322_RcvEgrArray_OFFS 0x14000 +#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000 +#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25 +#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27 +#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7 +#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0 +#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24 +#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF + +#define QIB_7322_RcvTIDArray0_OFFS 0x50000 +#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000 +#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25 +#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27 +#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7 +#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0 +#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24 +#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF + +#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000 +#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000 + +#define QIB_7322_RcvHdrTail0_OFFS 0x200000 +#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000 + +#define QIB_7322_RcvHdrHead0_OFFS 0x200008 +#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000 +#define QIB_7322_RcvHdrHead0_counter_LSB 0x20 +#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F +#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF +#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0 +#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F +#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF + +#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010 +#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000 + +#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018 +#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000 + +#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000 +#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000 +#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C +#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C +#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B +#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B +#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16 +#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16 +#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15 +#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15 +#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14 +#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14 +#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13 +#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13 +#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1 +#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB +#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12 +#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF +#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0 +#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA +#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h new file mode 100644 index 000000000000..b3955ed8f794 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -0,0 +1,758 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QIB_COMMON_H +#define _QIB_COMMON_H + +/* + * This file contains defines, structures, etc. that are used + * to communicate between kernel and user code. + */ + +/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */ +#define QIB_SRC_OUI_1 0x00 +#define QIB_SRC_OUI_2 0x11 +#define QIB_SRC_OUI_3 0x75 + +/* version of protocol header (known to chip also). In the long run, + * we should be able to generate and accept a range of version numbers; + * for now we only accept one, and it's compiled in. + */ +#define IPS_PROTO_VERSION 2 + +/* + * These are compile time constants that you may want to enable or disable + * if you are trying to debug problems with code or performance. + * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in + * fastpath code + * QIB_TRACE_REGWRITES define as 1 if you want register writes to be + * traced in faspath code + * _QIB_TRACING define as 0 if you want to remove all tracing in a + * compilation unit + */ + +/* + * The value in the BTH QP field that QLogic_IB uses to differentiate + * an qlogic_ib protocol IB packet vs standard IB transport + * This it needs to be even (0x656b78), because the LSB is sometimes + * used for the MSB of context. The change may cause a problem + * interoperating with older software. + */ +#define QIB_KD_QP 0x656b78 + +/* + * These are the status bits readable (in ascii form, 64bit value) + * from the "status" sysfs file. For binary compatibility, values + * must remain as is; removed states can be reused for different + * purposes. + */ +#define QIB_STATUS_INITTED 0x1 /* basic initialization done */ +/* Chip has been found and initted */ +#define QIB_STATUS_CHIP_PRESENT 0x20 +/* IB link is at ACTIVE, usable for data traffic */ +#define QIB_STATUS_IB_READY 0x40 +/* link is configured, LID, MTU, etc. have been set */ +#define QIB_STATUS_IB_CONF 0x80 +/* A Fatal hardware error has occurred. */ +#define QIB_STATUS_HWERROR 0x200 + +/* + * The list of usermode accessible registers. Also see Reg_* later in file. + */ +enum qib_ureg { + /* (RO) DMA RcvHdr to be used next. */ + ur_rcvhdrtail = 0, + /* (RW) RcvHdr entry to be processed next by host. */ + ur_rcvhdrhead = 1, + /* (RO) Index of next Eager index to use. */ + ur_rcvegrindextail = 2, + /* (RW) Eager TID to be processed next */ + ur_rcvegrindexhead = 3, + /* For internal use only; max register number. */ + _QIB_UregMax +}; + +/* bit values for spi_runtime_flags */ +#define QIB_RUNTIME_PCIE 0x0002 +#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004 +#define QIB_RUNTIME_RCVHDR_COPY 0x0008 +#define QIB_RUNTIME_MASTER 0x0010 +#define QIB_RUNTIME_RCHK 0x0020 +#define QIB_RUNTIME_NODMA_RTAIL 0x0080 +#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100 +#define QIB_RUNTIME_SDMA 0x0200 +#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400 +#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800 +#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000 +#define QIB_RUNTIME_CTXT_REDIRECT 0x2000 +#define QIB_RUNTIME_HDRSUPP 0x4000 + +/* + * This structure is returned by qib_userinit() immediately after + * open to get implementation-specific info, and info specific to this + * instance. + * + * This struct must have explict pad fields where type sizes + * may result in different alignments between 32 and 64 bit + * programs, since the 64 bit * bit kernel requires the user code + * to have matching offsets + */ +struct qib_base_info { + /* version of hardware, for feature checking. */ + __u32 spi_hw_version; + /* version of software, for feature checking. */ + __u32 spi_sw_version; + /* QLogic_IB context assigned, goes into sent packets */ + __u16 spi_ctxt; + __u16 spi_subctxt; + /* + * IB MTU, packets IB data must be less than this. + * The MTU is in bytes, and will be a multiple of 4 bytes. + */ + __u32 spi_mtu; + /* + * Size of a PIO buffer. Any given packet's total size must be less + * than this (in words). Included is the starting control word, so + * if 513 is returned, then total pkt size is 512 words or less. + */ + __u32 spi_piosize; + /* size of the TID cache in qlogic_ib, in entries */ + __u32 spi_tidcnt; + /* size of the TID Eager list in qlogic_ib, in entries */ + __u32 spi_tidegrcnt; + /* size of a single receive header queue entry in words. */ + __u32 spi_rcvhdrent_size; + /* + * Count of receive header queue entries allocated. + * This may be less than the spu_rcvhdrcnt passed in!. + */ + __u32 spi_rcvhdr_cnt; + + /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */ + __u32 spi_runtime_flags; + + /* address where hardware receive header queue is mapped */ + __u64 spi_rcvhdr_base; + + /* user program. */ + + /* base address of eager TID receive buffers used by hardware. */ + __u64 spi_rcv_egrbufs; + + /* Allocated by initialization code, not by protocol. */ + + /* + * Size of each TID buffer in host memory, starting at + * spi_rcv_egrbufs. The buffers are virtually contiguous. + */ + __u32 spi_rcv_egrbufsize; + /* + * The special QP (queue pair) value that identifies an qlogic_ib + * protocol packet from standard IB packets. More, probably much + * more, to be added. + */ + __u32 spi_qpair; + + /* + * User register base for init code, not to be used directly by + * protocol or applications. Always points to chip registers, + * for normal or shared context. + */ + __u64 spi_uregbase; + /* + * Maximum buffer size in bytes that can be used in a single TID + * entry (assuming the buffer is aligned to this boundary). This is + * the minimum of what the hardware and software support Guaranteed + * to be a power of 2. + */ + __u32 spi_tid_maxsize; + /* + * alignment of each pio send buffer (byte count + * to add to spi_piobufbase to get to second buffer) + */ + __u32 spi_pioalign; + /* + * The index of the first pio buffer available to this process; + * needed to do lookup in spi_pioavailaddr; not added to + * spi_piobufbase. + */ + __u32 spi_pioindex; + /* number of buffers mapped for this process */ + __u32 spi_piocnt; + + /* + * Base address of writeonly pio buffers for this process. + * Each buffer has spi_piosize words, and is aligned on spi_pioalign + * boundaries. spi_piocnt buffers are mapped from this address + */ + __u64 spi_piobufbase; + + /* + * Base address of readonly memory copy of the pioavail registers. + * There are 2 bits for each buffer. + */ + __u64 spi_pioavailaddr; + + /* + * Address where driver updates a copy of the interface and driver + * status (QIB_STATUS_*) as a 64 bit value. It's followed by a + * link status qword (formerly combined with driver status), then a + * string indicating hardware error, if there was one. + */ + __u64 spi_status; + + /* number of chip ctxts available to user processes */ + __u32 spi_nctxts; + __u16 spi_unit; /* unit number of chip we are using */ + __u16 spi_port; /* IB port number we are using */ + /* num bufs in each contiguous set */ + __u32 spi_rcv_egrperchunk; + /* size in bytes of each contiguous set */ + __u32 spi_rcv_egrchunksize; + /* total size of mmap to cover full rcvegrbuffers */ + __u32 spi_rcv_egrbuftotlen; + __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */ + /* address of readonly memory copy of the rcvhdrq tail register. */ + __u64 spi_rcvhdr_tailaddr; + + /* + * shared memory pages for subctxts if ctxt is shared; these cover + * all the processes in the group sharing a single context. + * all have enough space for the num_subcontexts value on this job. + */ + __u64 spi_subctxt_uregbase; + __u64 spi_subctxt_rcvegrbuf; + __u64 spi_subctxt_rcvhdr_base; + + /* shared memory page for send buffer disarm status */ + __u64 spi_sendbuf_status; +} __attribute__ ((aligned(8))); + +/* + * This version number is given to the driver by the user code during + * initialization in the spu_userversion field of qib_user_info, so + * the driver can check for compatibility with user code. + * + * The major version changes when data structures + * change in an incompatible way. The driver must be the same or higher + * for initialization to succeed. In some cases, a higher version + * driver will not interoperate with older software, and initialization + * will return an error. + */ +#define QIB_USER_SWMAJOR 1 + +/* + * Minor version differences are always compatible + * a within a major version, however if user software is larger + * than driver software, some new features and/or structure fields + * may not be implemented; the user code must deal with this if it + * cares, or it must abort after initialization reports the difference. + */ +#define QIB_USER_SWMINOR 10 + +#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) + +#ifndef QIB_KERN_TYPE +#define QIB_KERN_TYPE 0 +#define QIB_IDSTR "QLogic kernel.org driver" +#endif + +/* + * Similarly, this is the kernel version going back to the user. It's + * slightly different, in that we want to tell if the driver was built as + * part of a QLogic release, or from the driver from openfabrics.org, + * kernel.org, or a standard distribution, for support reasons. + * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied. + * + * It's returned by the driver to the user code during initialization in the + * spi_sw_version field of qib_base_info, so the user code can in turn + * check for compatibility with the kernel. +*/ +#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) + +/* + * This structure is passed to qib_userinit() to tell the driver where + * user code buffers are, sizes, etc. The offsets and sizes of the + * fields must remain unchanged, for binary compatibility. It can + * be extended, if userversion is changed so user code can tell, if needed + */ +struct qib_user_info { + /* + * version of user software, to detect compatibility issues. + * Should be set to QIB_USER_SWVERSION. + */ + __u32 spu_userversion; + + __u32 _spu_unused2; + + /* size of struct base_info to write to */ + __u32 spu_base_info_size; + + __u32 _spu_unused3; + + /* + * If two or more processes wish to share a context, each process + * must set the spu_subctxt_cnt and spu_subctxt_id to the same + * values. The only restriction on the spu_subctxt_id is that + * it be unique for a given node. + */ + __u16 spu_subctxt_cnt; + __u16 spu_subctxt_id; + + __u32 spu_port; /* IB port requested by user if > 0 */ + + /* + * address of struct base_info to write to + */ + __u64 spu_base_info; + +} __attribute__ ((aligned(8))); + +/* User commands. */ + +/* 16 available, was: old set up userspace (for old user code) */ +#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */ +#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */ +#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */ +#define QIB_CMD_TID_FREE 20 /* free expected TID entries */ +#define QIB_CMD_SET_PART_KEY 21 /* add partition key */ +/* 22 available, was: return info on slave processes (for old user code) */ +#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */ +#define QIB_CMD_USER_INIT 24 /* set up userspace */ +#define QIB_CMD_UNUSED_1 25 +#define QIB_CMD_UNUSED_2 26 +#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ +#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */ +#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */ +/* 30 is unused */ +#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */ +#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */ +/* 33 available, was a testing feature */ +#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */ +#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */ +#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned + * processes: qib_cpus_list */ + +/* + * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for + * compatibility with libraries from previous release. The ACK_EVENT + * will take appropriate driver action (if any, just DISARM for now), + * then clear the bits passed in as part of the mask. These bits are + * in the first 64bit word at spi_sendbuf_status, and are passed to + * the driver in the event_mask union as well. + */ +#define _QIB_EVENT_DISARM_BUFS_BIT 0 +#define _QIB_EVENT_LINKDOWN_BIT 1 +#define _QIB_EVENT_LID_CHANGE_BIT 2 +#define _QIB_EVENT_LMC_CHANGE_BIT 3 +#define _QIB_EVENT_SL2VL_CHANGE_BIT 4 +#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT + +#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT) +#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT) +#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT) +#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT) +#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT) + + +/* + * Poll types + */ +#define QIB_POLL_TYPE_ANYRCV 0x0 +#define QIB_POLL_TYPE_URGENT 0x1 + +struct qib_ctxt_info { + __u16 num_active; /* number of active units */ + __u16 unit; /* unit (chip) assigned to caller */ + __u16 port; /* IB port assigned to caller (1-based) */ + __u16 ctxt; /* ctxt on unit assigned to caller */ + __u16 subctxt; /* subctxt on unit assigned to caller */ + __u16 num_ctxts; /* number of ctxts available on unit */ + __u16 num_subctxts; /* number of subctxts opened on ctxt */ + __u16 rec_cpu; /* cpu # for affinity (ffff if none) */ +}; + +struct qib_tid_info { + __u32 tidcnt; + /* make structure same size in 32 and 64 bit */ + __u32 tid__unused; + /* virtual address of first page in transfer */ + __u64 tidvaddr; + /* pointer (same size 32/64 bit) to __u16 tid array */ + __u64 tidlist; + + /* + * pointer (same size 32/64 bit) to bitmap of TIDs used + * for this call; checked for being large enough at open + */ + __u64 tidmap; +}; + +struct qib_cmd { + __u32 type; /* command type */ + union { + struct qib_tid_info tid_info; + struct qib_user_info user_info; + + /* + * address in userspace where we should put the sdma + * inflight counter + */ + __u64 sdma_inflight; + /* + * address in userspace where we should put the sdma + * completion counter + */ + __u64 sdma_complete; + /* address in userspace of struct qib_ctxt_info to + write result to */ + __u64 ctxt_info; + /* enable/disable receipt of packets */ + __u32 recv_ctrl; + /* enable/disable armlaunch errors (non-zero to enable) */ + __u32 armlaunch_ctrl; + /* partition key to set */ + __u16 part_key; + /* user address of __u32 bitmask of active slaves */ + __u64 slave_mask_addr; + /* type of polling we want */ + __u16 poll_type; + /* back pressure enable bit for one particular context */ + __u8 ctxt_bp; + /* qib_user_event_ack(), IPATH_EVENT_* bits */ + __u64 event_mask; + } cmd; +}; + +struct qib_iovec { + /* Pointer to data, but same size 32 and 64 bit */ + __u64 iov_base; + + /* + * Length of data; don't need 64 bits, but want + * qib_sendpkt to remain same size as before 32 bit changes, so... + */ + __u64 iov_len; +}; + +/* + * Describes a single packet for send. Each packet can have one or more + * buffers, but the total length (exclusive of IB headers) must be less + * than the MTU, and if using the PIO method, entire packet length, + * including IB headers, must be less than the qib_piosize value (words). + * Use of this necessitates including sys/uio.h + */ +struct __qib_sendpkt { + __u32 sps_flags; /* flags for packet (TBD) */ + __u32 sps_cnt; /* number of entries to use in sps_iov */ + /* array of iov's describing packet. TEMPORARY */ + struct qib_iovec sps_iov[4]; +}; + +/* + * Diagnostics can send a packet by "writing" the following + * structs to the diag data special file. + * This allows a custom + * pbc (+ static rate) qword, so that special modes and deliberate + * changes to CRCs can be used. The elements were also re-ordered + * for better alignment and to avoid padding issues. + */ +#define _DIAG_XPKT_VERS 3 +struct qib_diag_xpkt { + __u16 version; + __u16 unit; + __u16 port; + __u16 len; + __u64 data; + __u64 pbc_wd; +}; + +/* + * Data layout in I2C flash (for GUID, etc.) + * All fields are little-endian binary unless otherwise stated + */ +#define QIB_FLASH_VERSION 2 +struct qib_flash { + /* flash layout version (QIB_FLASH_VERSION) */ + __u8 if_fversion; + /* checksum protecting if_length bytes */ + __u8 if_csum; + /* + * valid length (in use, protected by if_csum), including + * if_fversion and if_csum themselves) + */ + __u8 if_length; + /* the GUID, in network order */ + __u8 if_guid[8]; + /* number of GUIDs to use, starting from if_guid */ + __u8 if_numguid; + /* the (last 10 characters of) board serial number, in ASCII */ + char if_serial[12]; + /* board mfg date (YYYYMMDD ASCII) */ + char if_mfgdate[8]; + /* last board rework/test date (YYYYMMDD ASCII) */ + char if_testdate[8]; + /* logging of error counts, TBD */ + __u8 if_errcntp[4]; + /* powered on hours, updated at driver unload */ + __u8 if_powerhour[2]; + /* ASCII free-form comment field */ + char if_comment[32]; + /* Backwards compatible prefix for longer QLogic Serial Numbers */ + char if_sprefix[4]; + /* 82 bytes used, min flash size is 128 bytes */ + __u8 if_future[46]; +}; + +/* + * These are the counters implemented in the chip, and are listed in order. + * The InterCaps naming is taken straight from the chip spec. + */ +struct qlogic_ib_counters { + __u64 LBIntCnt; + __u64 LBFlowStallCnt; + __u64 TxSDmaDescCnt; /* was Reserved1 */ + __u64 TxUnsupVLErrCnt; + __u64 TxDataPktCnt; + __u64 TxFlowPktCnt; + __u64 TxDwordCnt; + __u64 TxLenErrCnt; + __u64 TxMaxMinLenErrCnt; + __u64 TxUnderrunCnt; + __u64 TxFlowStallCnt; + __u64 TxDroppedPktCnt; + __u64 RxDroppedPktCnt; + __u64 RxDataPktCnt; + __u64 RxFlowPktCnt; + __u64 RxDwordCnt; + __u64 RxLenErrCnt; + __u64 RxMaxMinLenErrCnt; + __u64 RxICRCErrCnt; + __u64 RxVCRCErrCnt; + __u64 RxFlowCtrlErrCnt; + __u64 RxBadFormatCnt; + __u64 RxLinkProblemCnt; + __u64 RxEBPCnt; + __u64 RxLPCRCErrCnt; + __u64 RxBufOvflCnt; + __u64 RxTIDFullErrCnt; + __u64 RxTIDValidErrCnt; + __u64 RxPKeyMismatchCnt; + __u64 RxP0HdrEgrOvflCnt; + __u64 RxP1HdrEgrOvflCnt; + __u64 RxP2HdrEgrOvflCnt; + __u64 RxP3HdrEgrOvflCnt; + __u64 RxP4HdrEgrOvflCnt; + __u64 RxP5HdrEgrOvflCnt; + __u64 RxP6HdrEgrOvflCnt; + __u64 RxP7HdrEgrOvflCnt; + __u64 RxP8HdrEgrOvflCnt; + __u64 RxP9HdrEgrOvflCnt; + __u64 RxP10HdrEgrOvflCnt; + __u64 RxP11HdrEgrOvflCnt; + __u64 RxP12HdrEgrOvflCnt; + __u64 RxP13HdrEgrOvflCnt; + __u64 RxP14HdrEgrOvflCnt; + __u64 RxP15HdrEgrOvflCnt; + __u64 RxP16HdrEgrOvflCnt; + __u64 IBStatusChangeCnt; + __u64 IBLinkErrRecoveryCnt; + __u64 IBLinkDownedCnt; + __u64 IBSymbolErrCnt; + __u64 RxVL15DroppedPktCnt; + __u64 RxOtherLocalPhyErrCnt; + __u64 PcieRetryBufDiagQwordCnt; + __u64 ExcessBufferOvflCnt; + __u64 LocalLinkIntegrityErrCnt; + __u64 RxVlErrCnt; + __u64 RxDlidFltrCnt; +}; + +/* + * The next set of defines are for packet headers, and chip register + * and memory bits that are visible to and/or used by user-mode software. + */ + +/* RcvHdrFlags bits */ +#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF +#define QLOGIC_IB_RHF_LENGTH_SHIFT 0 +#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7 +#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11 +#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF +#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16 +#define QLOGIC_IB_RHF_SEQ_MASK 0xF +#define QLOGIC_IB_RHF_SEQ_SHIFT 0 +#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF +#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4 +#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000 +#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000 +#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000 +#define QLOGIC_IB_RHF_H_LENERR 0x10000000 +#define QLOGIC_IB_RHF_H_MTUERR 0x08000000 +#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000 +#define QLOGIC_IB_RHF_H_TIDERR 0x02000000 +#define QLOGIC_IB_RHF_H_MKERR 0x01000000 +#define QLOGIC_IB_RHF_H_IBERR 0x00800000 +#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000 +#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000 +#define QLOGIC_IB_RHF_L_SWA 0x00008000 +#define QLOGIC_IB_RHF_L_SWB 0x00004000 + +/* qlogic_ib header fields */ +#define QLOGIC_IB_I_VERS_MASK 0xF +#define QLOGIC_IB_I_VERS_SHIFT 28 +#define QLOGIC_IB_I_CTXT_MASK 0xF +#define QLOGIC_IB_I_CTXT_SHIFT 24 +#define QLOGIC_IB_I_TID_MASK 0x7FF +#define QLOGIC_IB_I_TID_SHIFT 13 +#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF +#define QLOGIC_IB_I_OFFSET_SHIFT 0 + +/* K_PktFlags bits */ +#define QLOGIC_IB_KPF_INTR 0x1 +#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3 +#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1 + +#define QLOGIC_IB_MAX_SUBCTXT 4 + +/* SendPIO per-buffer control */ +#define QLOGIC_IB_SP_TEST 0x40 +#define QLOGIC_IB_SP_TESTEBP 0x20 +#define QLOGIC_IB_SP_TRIGGER_SHIFT 15 + +/* SendPIOAvail bits */ +#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1 +#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0 + +/* qlogic_ib header format */ +struct qib_header { + /* + * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset - + * 14 bits before ECO change ~28 Dec 03. After that, Vers 4, + * Context 4, TID 11, offset 13. + */ + __le32 ver_ctxt_tid_offset; + __le16 chksum; + __le16 pkt_flags; +}; + +/* + * qlogic_ib user message header format. + * This structure contains the first 4 fields common to all protocols + * that employ qlogic_ib. + */ +struct qib_message_header { + __be16 lrh[4]; + __be32 bth[3]; + /* fields below this point are in host byte order */ + struct qib_header iph; + __u8 sub_opcode; +}; + +/* IB - LRH header consts */ +#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */ +#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */ + +/* misc. */ +#define SIZE_OF_CRC 1 + +#define QIB_DEFAULT_P_KEY 0xFFFF +#define QIB_PERMISSIVE_LID 0xFFFF +#define QIB_AETH_CREDIT_SHIFT 24 +#define QIB_AETH_CREDIT_MASK 0x1F +#define QIB_AETH_CREDIT_INVAL 0x1F +#define QIB_PSN_MASK 0xFFFFFF +#define QIB_MSN_MASK 0xFFFFFF +#define QIB_QPN_MASK 0xFFFFFF +#define QIB_MULTICAST_LID_BASE 0xC000 +#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK +#define QIB_MULTICAST_QPN 0xFFFFFF + +/* Receive Header Queue: receive type (from qlogic_ib) */ +#define RCVHQ_RCV_TYPE_EXPECTED 0 +#define RCVHQ_RCV_TYPE_EAGER 1 +#define RCVHQ_RCV_TYPE_NON_KD 2 +#define RCVHQ_RCV_TYPE_ERROR 3 + +#define QIB_HEADER_QUEUE_WORDS 9 + +/* functions for extracting fields from rcvhdrq entries for the driver. + */ +static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf) +{ + return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK; +} + +static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf) +{ + return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) & + QLOGIC_IB_RHF_RCVTYPE_MASK; +} + +static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf) +{ + return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) & + QLOGIC_IB_RHF_LENGTH_MASK) << 2; +} + +static inline __u32 qib_hdrget_index(const __le32 *rbuf) +{ + return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) & + QLOGIC_IB_RHF_EGRINDEX_MASK; +} + +static inline __u32 qib_hdrget_seq(const __le32 *rbuf) +{ + return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) & + QLOGIC_IB_RHF_SEQ_MASK; +} + +static inline __u32 qib_hdrget_offset(const __le32 *rbuf) +{ + return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) & + QLOGIC_IB_RHF_HDRQ_OFFSET_MASK; +} + +static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf) +{ + return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR; +} + +static inline __u32 qib_hdrget_qib_ver(__le32 hdrword) +{ + return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) & + QLOGIC_IB_I_VERS_MASK; +} + +#endif /* _QIB_COMMON_H */ diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c new file mode 100644 index 000000000000..a86cbf880f98 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_cq.c @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include "qib_verbs.h" + +/** + * qib_cq_enter - add a new entry to the completion queue + * @cq: completion queue + * @entry: work completion entry to add + * @sig: true if @entry is a solicitated entry + * + * This may be called with qp->s_lock held. + */ +void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) +{ + struct qib_cq_wc *wc; + unsigned long flags; + u32 head; + u32 next; + + spin_lock_irqsave(&cq->lock, flags); + + /* + * Note that the head pointer might be writable by user processes. + * Take care to verify it is a sane value. + */ + wc = cq->queue; + head = wc->head; + if (head >= (unsigned) cq->ibcq.cqe) { + head = cq->ibcq.cqe; + next = 0; + } else + next = head + 1; + if (unlikely(next == wc->tail)) { + spin_unlock_irqrestore(&cq->lock, flags); + if (cq->ibcq.event_handler) { + struct ib_event ev; + + ev.device = cq->ibcq.device; + ev.element.cq = &cq->ibcq; + ev.event = IB_EVENT_CQ_ERR; + cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); + } + return; + } + if (cq->ip) { + wc->uqueue[head].wr_id = entry->wr_id; + wc->uqueue[head].status = entry->status; + wc->uqueue[head].opcode = entry->opcode; + wc->uqueue[head].vendor_err = entry->vendor_err; + wc->uqueue[head].byte_len = entry->byte_len; + wc->uqueue[head].ex.imm_data = + (__u32 __force)entry->ex.imm_data; + wc->uqueue[head].qp_num = entry->qp->qp_num; + wc->uqueue[head].src_qp = entry->src_qp; + wc->uqueue[head].wc_flags = entry->wc_flags; + wc->uqueue[head].pkey_index = entry->pkey_index; + wc->uqueue[head].slid = entry->slid; + wc->uqueue[head].sl = entry->sl; + wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; + wc->uqueue[head].port_num = entry->port_num; + /* Make sure entry is written before the head index. */ + smp_wmb(); + } else + wc->kqueue[head] = *entry; + wc->head = next; + + if (cq->notify == IB_CQ_NEXT_COMP || + (cq->notify == IB_CQ_SOLICITED && solicited)) { + cq->notify = IB_CQ_NONE; + cq->triggered++; + /* + * This will cause send_complete() to be called in + * another thread. + */ + queue_work(qib_cq_wq, &cq->comptask); + } + + spin_unlock_irqrestore(&cq->lock, flags); +} + +/** + * qib_poll_cq - poll for work completion entries + * @ibcq: the completion queue to poll + * @num_entries: the maximum number of entries to return + * @entry: pointer to array where work completions are placed + * + * Returns the number of completion entries polled. + * + * This may be called from interrupt context. Also called by ib_poll_cq() + * in the generic verbs code. + */ +int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) +{ + struct qib_cq *cq = to_icq(ibcq); + struct qib_cq_wc *wc; + unsigned long flags; + int npolled; + u32 tail; + + /* The kernel can only poll a kernel completion queue */ + if (cq->ip) { + npolled = -EINVAL; + goto bail; + } + + spin_lock_irqsave(&cq->lock, flags); + + wc = cq->queue; + tail = wc->tail; + if (tail > (u32) cq->ibcq.cqe) + tail = (u32) cq->ibcq.cqe; + for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { + if (tail == wc->head) + break; + /* The kernel doesn't need a RMB since it has the lock. */ + *entry = wc->kqueue[tail]; + if (tail >= cq->ibcq.cqe) + tail = 0; + else + tail++; + } + wc->tail = tail; + + spin_unlock_irqrestore(&cq->lock, flags); + +bail: + return npolled; +} + +static void send_complete(struct work_struct *work) +{ + struct qib_cq *cq = container_of(work, struct qib_cq, comptask); + + /* + * The completion handler will most likely rearm the notification + * and poll for all pending entries. If a new completion entry + * is added while we are in this routine, queue_work() + * won't call us again until we return so we check triggered to + * see if we need to call the handler again. + */ + for (;;) { + u8 triggered = cq->triggered; + + /* + * IPoIB connected mode assumes the callback is from a + * soft IRQ. We simulate this by blocking "bottom halves". + * See the implementation for ipoib_cm_handle_tx_wc(), + * netif_tx_lock_bh() and netif_tx_lock(). + */ + local_bh_disable(); + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + local_bh_enable(); + + if (cq->triggered == triggered) + return; + } +} + +/** + * qib_create_cq - create a completion queue + * @ibdev: the device this completion queue is attached to + * @entries: the minimum size of the completion queue + * @context: unused by the QLogic_IB driver + * @udata: user data for libibverbs.so + * + * Returns a pointer to the completion queue or negative errno values + * for failure. + * + * Called by ib_create_cq() in the generic verbs code. + */ +struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, + int comp_vector, struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_cq *cq; + struct qib_cq_wc *wc; + struct ib_cq *ret; + u32 sz; + + if (entries < 1 || entries > ib_qib_max_cqes) { + ret = ERR_PTR(-EINVAL); + goto done; + } + + /* Allocate the completion queue structure. */ + cq = kmalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + ret = ERR_PTR(-ENOMEM); + goto done; + } + + /* + * Allocate the completion queue entries and head/tail pointers. + * This is allocated separately so that it can be resized and + * also mapped into user space. + * We need to use vmalloc() in order to support mmap and large + * numbers of entries. + */ + sz = sizeof(*wc); + if (udata && udata->outlen >= sizeof(__u64)) + sz += sizeof(struct ib_uverbs_wc) * (entries + 1); + else + sz += sizeof(struct ib_wc) * (entries + 1); + wc = vmalloc_user(sz); + if (!wc) { + ret = ERR_PTR(-ENOMEM); + goto bail_cq; + } + + /* + * Return the address of the WC as the offset to mmap. + * See qib_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + int err; + + cq->ip = qib_create_mmap_info(dev, sz, context, wc); + if (!cq->ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_wc; + } + + err = ib_copy_to_udata(udata, &cq->ip->offset, + sizeof(cq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } + } else + cq->ip = NULL; + + spin_lock(&dev->n_cqs_lock); + if (dev->n_cqs_allocated == ib_qib_max_cqs) { + spin_unlock(&dev->n_cqs_lock); + ret = ERR_PTR(-ENOMEM); + goto bail_ip; + } + + dev->n_cqs_allocated++; + spin_unlock(&dev->n_cqs_lock); + + if (cq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + + /* + * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. + * The number of entries should be >= the number requested or return + * an error. + */ + cq->ibcq.cqe = entries; + cq->notify = IB_CQ_NONE; + cq->triggered = 0; + spin_lock_init(&cq->lock); + INIT_WORK(&cq->comptask, send_complete); + wc->head = 0; + wc->tail = 0; + cq->queue = wc; + + ret = &cq->ibcq; + + goto done; + +bail_ip: + kfree(cq->ip); +bail_wc: + vfree(wc); +bail_cq: + kfree(cq); +done: + return ret; +} + +/** + * qib_destroy_cq - destroy a completion queue + * @ibcq: the completion queue to destroy. + * + * Returns 0 for success. + * + * Called by ib_destroy_cq() in the generic verbs code. + */ +int qib_destroy_cq(struct ib_cq *ibcq) +{ + struct qib_ibdev *dev = to_idev(ibcq->device); + struct qib_cq *cq = to_icq(ibcq); + + flush_work(&cq->comptask); + spin_lock(&dev->n_cqs_lock); + dev->n_cqs_allocated--; + spin_unlock(&dev->n_cqs_lock); + if (cq->ip) + kref_put(&cq->ip->ref, qib_release_mmap_info); + else + vfree(cq->queue); + kfree(cq); + + return 0; +} + +/** + * qib_req_notify_cq - change the notification type for a completion queue + * @ibcq: the completion queue + * @notify_flags: the type of notification to request + * + * Returns 0 for success. + * + * This may be called from interrupt context. Also called by + * ib_req_notify_cq() in the generic verbs code. + */ +int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) +{ + struct qib_cq *cq = to_icq(ibcq); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&cq->lock, flags); + /* + * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow + * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). + */ + if (cq->notify != IB_CQ_NEXT_COMP) + cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + cq->queue->head != cq->queue->tail) + ret = 1; + + spin_unlock_irqrestore(&cq->lock, flags); + + return ret; +} + +/** + * qib_resize_cq - change the size of the CQ + * @ibcq: the completion queue + * + * Returns 0 for success. + */ +int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) +{ + struct qib_cq *cq = to_icq(ibcq); + struct qib_cq_wc *old_wc; + struct qib_cq_wc *wc; + u32 head, tail, n; + int ret; + u32 sz; + + if (cqe < 1 || cqe > ib_qib_max_cqes) { + ret = -EINVAL; + goto bail; + } + + /* + * Need to use vmalloc() if we want to support large #s of entries. + */ + sz = sizeof(*wc); + if (udata && udata->outlen >= sizeof(__u64)) + sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); + else + sz += sizeof(struct ib_wc) * (cqe + 1); + wc = vmalloc_user(sz); + if (!wc) { + ret = -ENOMEM; + goto bail; + } + + /* Check that we can write the offset to mmap. */ + if (udata && udata->outlen >= sizeof(__u64)) { + __u64 offset = 0; + + ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); + if (ret) + goto bail_free; + } + + spin_lock_irq(&cq->lock); + /* + * Make sure head and tail are sane since they + * might be user writable. + */ + old_wc = cq->queue; + head = old_wc->head; + if (head > (u32) cq->ibcq.cqe) + head = (u32) cq->ibcq.cqe; + tail = old_wc->tail; + if (tail > (u32) cq->ibcq.cqe) + tail = (u32) cq->ibcq.cqe; + if (head < tail) + n = cq->ibcq.cqe + 1 + head - tail; + else + n = head - tail; + if (unlikely((u32)cqe < n)) { + ret = -EINVAL; + goto bail_unlock; + } + for (n = 0; tail != head; n++) { + if (cq->ip) + wc->uqueue[n] = old_wc->uqueue[tail]; + else + wc->kqueue[n] = old_wc->kqueue[tail]; + if (tail == (u32) cq->ibcq.cqe) + tail = 0; + else + tail++; + } + cq->ibcq.cqe = cqe; + wc->head = n; + wc->tail = 0; + cq->queue = wc; + spin_unlock_irq(&cq->lock); + + vfree(old_wc); + + if (cq->ip) { + struct qib_ibdev *dev = to_idev(ibcq->device); + struct qib_mmap_info *ip = cq->ip; + + qib_update_mmap_info(dev, ip, sz, wc); + + /* + * Return the offset to mmap. + * See qib_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + ret = ib_copy_to_udata(udata, &ip->offset, + sizeof(ip->offset)); + if (ret) + goto bail; + } + + spin_lock_irq(&dev->pending_lock); + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + + ret = 0; + goto bail; + +bail_unlock: + spin_unlock_irq(&cq->lock); +bail_free: + vfree(wc); +bail: + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c new file mode 100644 index 000000000000..ca98dd523752 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -0,0 +1,894 @@ +/* + * Copyright (c) 2010 QLogic Corporation. All rights reserved. + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file contains support for diagnostic functions. It is accessed by + * opening the qib_diag device, normally minor number 129. Diagnostic use + * of the QLogic_IB chip may render the chip or board unusable until the + * driver is unloaded, or in some cases, until the system is rebooted. + * + * Accesses to the chip through this interface are not similar to going + * through the /sys/bus/pci resource mmap interface. + */ + +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "qib.h" +#include "qib_common.h" + +/* + * Each client that opens the diag device must read then write + * offset 0, to prevent lossage from random cat or od. diag_state + * sequences this "handshake". + */ +enum diag_state { UNUSED = 0, OPENED, INIT, READY }; + +/* State for an individual client. PID so children cannot abuse handshake */ +static struct qib_diag_client { + struct qib_diag_client *next; + struct qib_devdata *dd; + pid_t pid; + enum diag_state state; +} *client_pool; + +/* + * Get a client struct. Recycled if possible, else kmalloc. + * Must be called with qib_mutex held + */ +static struct qib_diag_client *get_client(struct qib_devdata *dd) +{ + struct qib_diag_client *dc; + + dc = client_pool; + if (dc) + /* got from pool remove it and use */ + client_pool = dc->next; + else + /* None in pool, alloc and init */ + dc = kmalloc(sizeof *dc, GFP_KERNEL); + + if (dc) { + dc->next = NULL; + dc->dd = dd; + dc->pid = current->pid; + dc->state = OPENED; + } + return dc; +} + +/* + * Return to pool. Must be called with qib_mutex held + */ +static void return_client(struct qib_diag_client *dc) +{ + struct qib_devdata *dd = dc->dd; + struct qib_diag_client *tdc, *rdc; + + rdc = NULL; + if (dc == dd->diag_client) { + dd->diag_client = dc->next; + rdc = dc; + } else { + tdc = dc->dd->diag_client; + while (tdc) { + if (dc == tdc->next) { + tdc->next = dc->next; + rdc = dc; + break; + } + tdc = tdc->next; + } + } + if (rdc) { + rdc->state = UNUSED; + rdc->dd = NULL; + rdc->pid = 0; + rdc->next = client_pool; + client_pool = rdc; + } +} + +static int qib_diag_open(struct inode *in, struct file *fp); +static int qib_diag_release(struct inode *in, struct file *fp); +static ssize_t qib_diag_read(struct file *fp, char __user *data, + size_t count, loff_t *off); +static ssize_t qib_diag_write(struct file *fp, const char __user *data, + size_t count, loff_t *off); + +static const struct file_operations diag_file_ops = { + .owner = THIS_MODULE, + .write = qib_diag_write, + .read = qib_diag_read, + .open = qib_diag_open, + .release = qib_diag_release +}; + +static atomic_t diagpkt_count = ATOMIC_INIT(0); +static struct cdev *diagpkt_cdev; +static struct device *diagpkt_device; + +static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, + size_t count, loff_t *off); + +static const struct file_operations diagpkt_file_ops = { + .owner = THIS_MODULE, + .write = qib_diagpkt_write, +}; + +int qib_diag_add(struct qib_devdata *dd) +{ + char name[16]; + int ret = 0; + + if (atomic_inc_return(&diagpkt_count) == 1) { + ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt", + &diagpkt_file_ops, &diagpkt_cdev, + &diagpkt_device); + if (ret) + goto done; + } + + snprintf(name, sizeof(name), "ipath_diag%d", dd->unit); + ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name, + &diag_file_ops, &dd->diag_cdev, + &dd->diag_device); +done: + return ret; +} + +static void qib_unregister_observers(struct qib_devdata *dd); + +void qib_diag_remove(struct qib_devdata *dd) +{ + struct qib_diag_client *dc; + + if (atomic_dec_and_test(&diagpkt_count)) + qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); + + qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); + + /* + * Return all diag_clients of this device. There should be none, + * as we are "guaranteed" that no clients are still open + */ + while (dd->diag_client) + return_client(dd->diag_client); + + /* Now clean up all unused client structs */ + while (client_pool) { + dc = client_pool; + client_pool = dc->next; + kfree(dc); + } + /* Clean up observer list */ + qib_unregister_observers(dd); +} + +/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem * + * + * @dd: the qlogic_ib device + * @offs: the offset in chip-space + * @cntp: Pointer to max (byte) count for transfer starting at offset + * This returns a u32 __iomem * so it can be used for both 64 and 32-bit + * mapping. It is needed because with the use of PAT for control of + * write-combining, the logically contiguous address-space of the chip + * may be split into virtually non-contiguous spaces, with different + * attributes, which are them mapped to contiguous physical space + * based from the first BAR. + * + * The code below makes the same assumptions as were made in + * init_chip_wc_pat() (qib_init.c), copied here: + * Assumes chip address space looks like: + * - kregs + sregs + cregs + uregs (in any order) + * - piobufs (2K and 4K bufs in either order) + * or: + * - kregs + sregs + cregs (in any order) + * - piobufs (2K and 4K bufs in either order) + * - uregs + * + * If cntp is non-NULL, returns how many bytes from offset can be accessed + * Returns 0 if the offset is not mapped. + */ +static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, + u32 *cntp) +{ + u32 kreglen; + u32 snd_bottom, snd_lim = 0; + u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; + u32 __iomem *map = NULL; + u32 cnt = 0; + + /* First, simplest case, offset is within the first map. */ + kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); + if (offset < kreglen) { + map = krb32 + (offset / sizeof(u32)); + cnt = kreglen - offset; + goto mapped; + } + + /* + * Next check for user regs, the next most common case, + * and a cheap check because if they are not in the first map + * they are last in chip. + */ + if (dd->userbase) { + /* If user regs mapped, they are after send, so set limit. */ + u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; + snd_lim = dd->uregbase; + krb32 = (u32 __iomem *)dd->userbase; + if (offset >= dd->uregbase && offset < ulim) { + map = krb32 + (offset - dd->uregbase) / sizeof(u32); + cnt = ulim - offset; + goto mapped; + } + } + + /* + * Lastly, check for offset within Send Buffers. + * This is gnarly because struct devdata is deliberately vague + * about things like 7322 VL15 buffers, and we are not in + * chip-specific code here, so should not make many assumptions. + * The one we _do_ make is that the only chip that has more sndbufs + * than we admit is the 7322, and it has userregs above that, so + * we know the snd_lim. + */ + /* Assume 2K buffers are first. */ + snd_bottom = dd->pio2k_bufbase; + if (snd_lim == 0) { + u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); + snd_lim = snd_bottom + tot2k; + } + /* If 4k buffers exist, account for them by bumping + * appropriate limit. + */ + if (dd->piobcnt4k) { + u32 tot4k = dd->piobcnt4k * dd->align4k; + u32 offs4k = dd->piobufbase >> 32; + if (snd_bottom > offs4k) + snd_bottom = offs4k; + else { + /* 4k above 2k. Bump snd_lim, if needed*/ + if (!dd->userbase) + snd_lim = offs4k + tot4k; + } + } + /* + * Judgement call: can we ignore the space between SendBuffs and + * UserRegs, where we would like to see vl15 buffs, but not more? + */ + if (offset >= snd_bottom && offset < snd_lim) { + offset -= snd_bottom; + map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); + cnt = snd_lim - offset; + } + +mapped: + if (cntp) + *cntp = cnt; + return map; +} + +/* + * qib_read_umem64 - read a 64-bit quantity from the chip into user space + * @dd: the qlogic_ib device + * @uaddr: the location to store the data in user memory + * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) + * @count: number of bytes to copy (multiple of 32 bits) + * + * This function also localizes all chip memory accesses. + * The copy should be written such that we read full cacheline packets + * from the chip. This is usually used for a single qword + * + * NOTE: This assumes the chip address is 64-bit aligned. + */ +static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr, + u32 regoffs, size_t count) +{ + const u64 __iomem *reg_addr; + const u64 __iomem *reg_end; + u32 limit; + int ret; + + reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); + if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { + ret = -EINVAL; + goto bail; + } + if (count >= limit) + count = limit; + reg_end = reg_addr + (count / sizeof(u64)); + + /* not very efficient, but it works for now */ + while (reg_addr < reg_end) { + u64 data = readq(reg_addr); + + if (copy_to_user(uaddr, &data, sizeof(u64))) { + ret = -EFAULT; + goto bail; + } + reg_addr++; + uaddr += sizeof(u64); + } + ret = 0; +bail: + return ret; +} + +/* + * qib_write_umem64 - write a 64-bit quantity to the chip from user space + * @dd: the qlogic_ib device + * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) + * @uaddr: the source of the data in user memory + * @count: the number of bytes to copy (multiple of 32 bits) + * + * This is usually used for a single qword + * NOTE: This assumes the chip address is 64-bit aligned. + */ + +static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, + const void __user *uaddr, size_t count) +{ + u64 __iomem *reg_addr; + const u64 __iomem *reg_end; + u32 limit; + int ret; + + reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); + if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { + ret = -EINVAL; + goto bail; + } + if (count >= limit) + count = limit; + reg_end = reg_addr + (count / sizeof(u64)); + + /* not very efficient, but it works for now */ + while (reg_addr < reg_end) { + u64 data; + if (copy_from_user(&data, uaddr, sizeof(data))) { + ret = -EFAULT; + goto bail; + } + writeq(data, reg_addr); + + reg_addr++; + uaddr += sizeof(u64); + } + ret = 0; +bail: + return ret; +} + +/* + * qib_read_umem32 - read a 32-bit quantity from the chip into user space + * @dd: the qlogic_ib device + * @uaddr: the location to store the data in user memory + * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) + * @count: number of bytes to copy + * + * read 32 bit values, not 64 bit; for memories that only + * support 32 bit reads; usually a single dword. + */ +static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr, + u32 regoffs, size_t count) +{ + const u32 __iomem *reg_addr; + const u32 __iomem *reg_end; + u32 limit; + int ret; + + reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); + if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { + ret = -EINVAL; + goto bail; + } + if (count >= limit) + count = limit; + reg_end = reg_addr + (count / sizeof(u32)); + + /* not very efficient, but it works for now */ + while (reg_addr < reg_end) { + u32 data = readl(reg_addr); + + if (copy_to_user(uaddr, &data, sizeof(data))) { + ret = -EFAULT; + goto bail; + } + + reg_addr++; + uaddr += sizeof(u32); + + } + ret = 0; +bail: + return ret; +} + +/* + * qib_write_umem32 - write a 32-bit quantity to the chip from user space + * @dd: the qlogic_ib device + * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) + * @uaddr: the source of the data in user memory + * @count: number of bytes to copy + * + * write 32 bit values, not 64 bit; for memories that only + * support 32 bit write; usually a single dword. + */ + +static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs, + const void __user *uaddr, size_t count) +{ + u32 __iomem *reg_addr; + const u32 __iomem *reg_end; + u32 limit; + int ret; + + reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); + if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { + ret = -EINVAL; + goto bail; + } + if (count >= limit) + count = limit; + reg_end = reg_addr + (count / sizeof(u32)); + + while (reg_addr < reg_end) { + u32 data; + + if (copy_from_user(&data, uaddr, sizeof(data))) { + ret = -EFAULT; + goto bail; + } + writel(data, reg_addr); + + reg_addr++; + uaddr += sizeof(u32); + } + ret = 0; +bail: + return ret; +} + +static int qib_diag_open(struct inode *in, struct file *fp) +{ + int unit = iminor(in) - QIB_DIAG_MINOR_BASE; + struct qib_devdata *dd; + struct qib_diag_client *dc; + int ret; + + mutex_lock(&qib_mutex); + + dd = qib_lookup(unit); + + if (dd == NULL || !(dd->flags & QIB_PRESENT) || + !dd->kregbase) { + ret = -ENODEV; + goto bail; + } + + dc = get_client(dd); + if (!dc) { + ret = -ENOMEM; + goto bail; + } + dc->next = dd->diag_client; + dd->diag_client = dc; + fp->private_data = dc; + ret = 0; +bail: + mutex_unlock(&qib_mutex); + + return ret; +} + +/** + * qib_diagpkt_write - write an IB packet + * @fp: the diag data device file pointer + * @data: qib_diag_pkt structure saying where to get the packet + * @count: size of data to write + * @off: unused by this code + */ +static ssize_t qib_diagpkt_write(struct file *fp, + const char __user *data, + size_t count, loff_t *off) +{ + u32 __iomem *piobuf; + u32 plen, clen, pbufn; + struct qib_diag_xpkt dp; + u32 *tmpbuf = NULL; + struct qib_devdata *dd; + struct qib_pportdata *ppd; + ssize_t ret = 0; + + if (count != sizeof(dp)) { + ret = -EINVAL; + goto bail; + } + if (copy_from_user(&dp, data, sizeof(dp))) { + ret = -EFAULT; + goto bail; + } + + dd = qib_lookup(dp.unit); + if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) { + ret = -ENODEV; + goto bail; + } + if (!(dd->flags & QIB_INITTED)) { + /* no hardware, freeze, etc. */ + ret = -ENODEV; + goto bail; + } + + if (dp.version != _DIAG_XPKT_VERS) { + qib_dev_err(dd, "Invalid version %u for diagpkt_write\n", + dp.version); + ret = -EINVAL; + goto bail; + } + /* send count must be an exact number of dwords */ + if (dp.len & 3) { + ret = -EINVAL; + goto bail; + } + if (!dp.port || dp.port > dd->num_pports) { + ret = -EINVAL; + goto bail; + } + ppd = &dd->pport[dp.port - 1]; + + /* need total length before first word written */ + /* +1 word is for the qword padding */ + plen = sizeof(u32) + dp.len; + clen = dp.len >> 2; + + if ((plen + 4) > ppd->ibmaxlen) { + ret = -EINVAL; + goto bail; /* before writing pbc */ + } + tmpbuf = vmalloc(plen); + if (!tmpbuf) { + qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " + "failing\n"); + ret = -ENOMEM; + goto bail; + } + + if (copy_from_user(tmpbuf, + (const void __user *) (unsigned long) dp.data, + dp.len)) { + ret = -EFAULT; + goto bail; + } + + plen >>= 2; /* in dwords */ + + if (dp.pbc_wd == 0) + dp.pbc_wd = plen; + + piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn); + if (!piobuf) { + ret = -EBUSY; + goto bail; + } + /* disarm it just to be extra sure */ + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn)); + + /* disable header check on pbufn for this packet */ + dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL); + + writeq(dp.pbc_wd, piobuf); + /* + * Copy all but the trigger word, then flush, so it's written + * to chip before trigger word, then write trigger word, then + * flush again, so packet is sent. + */ + if (dd->flags & QIB_PIO_FLUSH_WC) { + qib_flush_wc(); + qib_pio_copy(piobuf + 2, tmpbuf, clen - 1); + qib_flush_wc(); + __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); + } else + qib_pio_copy(piobuf + 2, tmpbuf, clen); + + if (dd->flags & QIB_USE_SPCL_TRIG) { + u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; + + qib_flush_wc(); + __raw_writel(0xaebecede, piobuf + spcl_off); + } + + /* + * Ensure buffer is written to the chip, then re-enable + * header checks (if supported by chip). The txchk + * code will ensure seen by chip before returning. + */ + qib_flush_wc(); + qib_sendbuf_done(dd, pbufn); + dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL); + + ret = sizeof(dp); + +bail: + vfree(tmpbuf); + return ret; +} + +static int qib_diag_release(struct inode *in, struct file *fp) +{ + mutex_lock(&qib_mutex); + return_client(fp->private_data); + fp->private_data = NULL; + mutex_unlock(&qib_mutex); + return 0; +} + +/* + * Chip-specific code calls to register its interest in + * a specific range. + */ +struct diag_observer_list_elt { + struct diag_observer_list_elt *next; + const struct diag_observer *op; +}; + +int qib_register_observer(struct qib_devdata *dd, + const struct diag_observer *op) +{ + struct diag_observer_list_elt *olp; + int ret = -EINVAL; + + if (!dd || !op) + goto bail; + ret = -ENOMEM; + olp = vmalloc(sizeof *olp); + if (!olp) { + printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); + goto bail; + } + if (olp) { + unsigned long flags; + + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); + olp->op = op; + olp->next = dd->diag_observer_list; + dd->diag_observer_list = olp; + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); + ret = 0; + } +bail: + return ret; +} + +/* Remove all registered observers when device is closed */ +static void qib_unregister_observers(struct qib_devdata *dd) +{ + struct diag_observer_list_elt *olp; + unsigned long flags; + + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); + olp = dd->diag_observer_list; + while (olp) { + /* Pop one observer, let go of lock */ + dd->diag_observer_list = olp->next; + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); + vfree(olp); + /* try again. */ + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); + olp = dd->diag_observer_list; + } + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); +} + +/* + * Find the observer, if any, for the specified address. Initial implementation + * is simple stack of observers. This must be called with diag transaction + * lock held. + */ +static const struct diag_observer *diag_get_observer(struct qib_devdata *dd, + u32 addr) +{ + struct diag_observer_list_elt *olp; + const struct diag_observer *op = NULL; + + olp = dd->diag_observer_list; + while (olp) { + op = olp->op; + if (addr >= op->bottom && addr <= op->top) + break; + olp = olp->next; + } + if (!olp) + op = NULL; + + return op; +} + +static ssize_t qib_diag_read(struct file *fp, char __user *data, + size_t count, loff_t *off) +{ + struct qib_diag_client *dc = fp->private_data; + struct qib_devdata *dd = dc->dd; + void __iomem *kreg_base; + ssize_t ret; + + if (dc->pid != current->pid) { + ret = -EPERM; + goto bail; + } + + kreg_base = dd->kregbase; + + if (count == 0) + ret = 0; + else if ((count % 4) || (*off % 4)) + /* address or length is not 32-bit aligned, hence invalid */ + ret = -EINVAL; + else if (dc->state < READY && (*off || count != 8)) + ret = -EINVAL; /* prevent cat /dev/qib_diag* */ + else { + unsigned long flags; + u64 data64 = 0; + int use_32; + const struct diag_observer *op; + + use_32 = (count % 8) || (*off % 8); + ret = -1; + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); + /* + * Check for observer on this address range. + * we only support a single 32 or 64-bit read + * via observer, currently. + */ + op = diag_get_observer(dd, *off); + if (op) { + u32 offset = *off; + ret = op->hook(dd, op, offset, &data64, 0, use_32); + } + /* + * We need to release lock before any copy_to_user(), + * whether implicit in qib_read_umem* or explicit below. + */ + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); + if (!op) { + if (use_32) + /* + * Address or length is not 64-bit aligned; + * do 32-bit rd + */ + ret = qib_read_umem32(dd, data, (u32) *off, + count); + else + ret = qib_read_umem64(dd, data, (u32) *off, + count); + } else if (ret == count) { + /* Below finishes case where observer existed */ + ret = copy_to_user(data, &data64, use_32 ? + sizeof(u32) : sizeof(u64)); + if (ret) + ret = -EFAULT; + } + } + + if (ret >= 0) { + *off += count; + ret = count; + if (dc->state == OPENED) + dc->state = INIT; + } +bail: + return ret; +} + +static ssize_t qib_diag_write(struct file *fp, const char __user *data, + size_t count, loff_t *off) +{ + struct qib_diag_client *dc = fp->private_data; + struct qib_devdata *dd = dc->dd; + void __iomem *kreg_base; + ssize_t ret; + + if (dc->pid != current->pid) { + ret = -EPERM; + goto bail; + } + + kreg_base = dd->kregbase; + + if (count == 0) + ret = 0; + else if ((count % 4) || (*off % 4)) + /* address or length is not 32-bit aligned, hence invalid */ + ret = -EINVAL; + else if (dc->state < READY && + ((*off || count != 8) || dc->state != INIT)) + /* No writes except second-step of init seq */ + ret = -EINVAL; /* before any other write allowed */ + else { + unsigned long flags; + const struct diag_observer *op = NULL; + int use_32 = (count % 8) || (*off % 8); + + /* + * Check for observer on this address range. + * We only support a single 32 or 64-bit write + * via observer, currently. This helps, because + * we would otherwise have to jump through hoops + * to make "diag transaction" meaningful when we + * cannot do a copy_from_user while holding the lock. + */ + if (count == 4 || count == 8) { + u64 data64; + u32 offset = *off; + ret = copy_from_user(&data64, data, count); + if (ret) { + ret = -EFAULT; + goto bail; + } + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); + op = diag_get_observer(dd, *off); + if (op) + ret = op->hook(dd, op, offset, &data64, ~0Ull, + use_32); + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); + } + + if (!op) { + if (use_32) + /* + * Address or length is not 64-bit aligned; + * do 32-bit write + */ + ret = qib_write_umem32(dd, (u32) *off, data, + count); + else + ret = qib_write_umem64(dd, (u32) *off, data, + count); + } + } + + if (ret >= 0) { + *off += count; + ret = count; + if (dc->state == INIT) + dc->state = READY; /* all read/write OK now */ + } +bail: + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c new file mode 100644 index 000000000000..2920bb39a65b --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_dma.c @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/types.h> +#include <linux/scatterlist.h> + +#include "qib_verbs.h" + +#define BAD_DMA_ADDRESS ((u64) 0) + +/* + * The following functions implement driver specific replacements + * for the ib_dma_*() functions. + * + * These functions return kernel virtual addresses instead of + * device bus addresses since the driver uses the CPU to copy + * data instead of using hardware DMA. + */ + +static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) +{ + return dma_addr == BAD_DMA_ADDRESS; +} + +static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, + size_t size, enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); + return (u64) cpu_addr; +} + +static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); +} + +static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + u64 addr; + + BUG_ON(!valid_dma_direction(direction)); + + if (offset + size > PAGE_SIZE) { + addr = BAD_DMA_ADDRESS; + goto done; + } + + addr = (u64) page_address(page); + if (addr) + addr += offset; + /* TODO: handle highmem pages */ + +done: + return addr; +} + +static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); +} + +static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction) +{ + struct scatterlist *sg; + u64 addr; + int i; + int ret = nents; + + BUG_ON(!valid_dma_direction(direction)); + + for_each_sg(sgl, sg, nents, i) { + addr = (u64) page_address(sg_page(sg)); + /* TODO: handle highmem pages */ + if (!addr) { + ret = 0; + break; + } + } + return ret; +} + +static void qib_unmap_sg(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + BUG_ON(!valid_dma_direction(direction)); +} + +static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) +{ + u64 addr = (u64) page_address(sg_page(sg)); + + if (addr) + addr += sg->offset; + return addr; +} + +static unsigned int qib_sg_dma_len(struct ib_device *dev, + struct scatterlist *sg) +{ + return sg->length; +} + +static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, + size_t size, enum dma_data_direction dir) +{ +} + +static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, + size_t size, + enum dma_data_direction dir) +{ +} + +static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, + u64 *dma_handle, gfp_t flag) +{ + struct page *p; + void *addr = NULL; + + p = alloc_pages(flag, get_order(size)); + if (p) + addr = page_address(p); + if (dma_handle) + *dma_handle = (u64) addr; + return addr; +} + +static void qib_dma_free_coherent(struct ib_device *dev, size_t size, + void *cpu_addr, u64 dma_handle) +{ + free_pages((unsigned long) cpu_addr, get_order(size)); +} + +struct ib_dma_mapping_ops qib_dma_mapping_ops = { + .mapping_error = qib_mapping_error, + .map_single = qib_dma_map_single, + .unmap_single = qib_dma_unmap_single, + .map_page = qib_dma_map_page, + .unmap_page = qib_dma_unmap_page, + .map_sg = qib_map_sg, + .unmap_sg = qib_unmap_sg, + .dma_address = qib_sg_dma_address, + .dma_len = qib_sg_dma_len, + .sync_single_for_cpu = qib_sync_single_for_cpu, + .sync_single_for_device = qib_sync_single_for_device, + .alloc_coherent = qib_dma_alloc_coherent, + .free_coherent = qib_dma_free_coherent +}; diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c new file mode 100644 index 000000000000..f15ce076ac49 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> + +#include "qib.h" + +/* + * The size has to be longer than this string, so we can append + * board/chip information to it in the init code. + */ +const char ib_qib_version[] = QIB_IDSTR "\n"; + +DEFINE_SPINLOCK(qib_devs_lock); +LIST_HEAD(qib_dev_list); +DEFINE_MUTEX(qib_mutex); /* general driver use */ + +unsigned qib_ibmtu; +module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO); +MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096"); + +unsigned qib_compat_ddr_negotiate = 1; +module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint, + S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(compat_ddr_negotiate, + "Attempt pre-IBTA 1.2 DDR speed negotiation"); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("QLogic <support@qlogic.com>"); +MODULE_DESCRIPTION("QLogic IB driver"); + +/* + * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our + * PIO send buffers. This is well beyond anything currently + * defined in the InfiniBand spec. + */ +#define QIB_PIO_MAXIBHDR 128 + +struct qlogic_ib_stats qib_stats; + +const char *qib_get_unit_name(int unit) +{ + static char iname[16]; + + snprintf(iname, sizeof iname, "infinipath%u", unit); + return iname; +} + +/* + * Return count of units with at least one port ACTIVE. + */ +int qib_count_active_units(void) +{ + struct qib_devdata *dd; + struct qib_pportdata *ppd; + unsigned long flags; + int pidx, nunits_active = 0; + + spin_lock_irqsave(&qib_devs_lock, flags); + list_for_each_entry(dd, &qib_dev_list, list) { + if (!(dd->flags & QIB_PRESENT) || !dd->kregbase) + continue; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | + QIBL_LINKARMED | QIBL_LINKACTIVE))) { + nunits_active++; + break; + } + } + } + spin_unlock_irqrestore(&qib_devs_lock, flags); + return nunits_active; +} + +/* + * Return count of all units, optionally return in arguments + * the number of usable (present) units, and the number of + * ports that are up. + */ +int qib_count_units(int *npresentp, int *nupp) +{ + int nunits = 0, npresent = 0, nup = 0; + struct qib_devdata *dd; + unsigned long flags; + int pidx; + struct qib_pportdata *ppd; + + spin_lock_irqsave(&qib_devs_lock, flags); + + list_for_each_entry(dd, &qib_dev_list, list) { + nunits++; + if ((dd->flags & QIB_PRESENT) && dd->kregbase) + npresent++; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | + QIBL_LINKARMED | QIBL_LINKACTIVE))) + nup++; + } + } + + spin_unlock_irqrestore(&qib_devs_lock, flags); + + if (npresentp) + *npresentp = npresent; + if (nupp) + *nupp = nup; + + return nunits; +} + +/** + * qib_wait_linkstate - wait for an IB link state change to occur + * @dd: the qlogic_ib device + * @state: the state to wait for + * @msecs: the number of milliseconds to wait + * + * wait up to msecs milliseconds for IB link state change to occur for + * now, take the easy polling route. Currently used only by + * qib_set_linkstate. Returns 0 if state reached, otherwise + * -ETIMEDOUT state can have multiple states set, for any of several + * transitions. + */ +int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + if (ppd->state_wanted) { + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + ret = -EBUSY; + goto bail; + } + ppd->state_wanted = state; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + wait_event_interruptible_timeout(ppd->state_wait, + (ppd->lflags & state), + msecs_to_jiffies(msecs)); + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->state_wanted = 0; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + + if (!(ppd->lflags & state)) + ret = -ETIMEDOUT; + else + ret = 0; +bail: + return ret; +} + +int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) +{ + u32 lstate; + int ret; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + switch (newstate) { + case QIB_IB_LINKDOWN_ONLY: + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP); + /* don't wait */ + ret = 0; + goto bail; + + case QIB_IB_LINKDOWN: + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); + /* don't wait */ + ret = 0; + goto bail; + + case QIB_IB_LINKDOWN_SLEEP: + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP); + /* don't wait */ + ret = 0; + goto bail; + + case QIB_IB_LINKDOWN_DISABLE: + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE); + /* don't wait */ + ret = 0; + goto bail; + + case QIB_IB_LINKARM: + if (ppd->lflags & QIBL_LINKARMED) { + ret = 0; + goto bail; + } + if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) { + ret = -EINVAL; + goto bail; + } + /* + * Since the port can be ACTIVE when we ask for ARMED, + * clear QIBL_LINKV so we can wait for a transition. + * If the link isn't ARMED, then something else happened + * and there is no point waiting for ARMED. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_LINKV; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP); + lstate = QIBL_LINKV; + break; + + case QIB_IB_LINKACTIVE: + if (ppd->lflags & QIBL_LINKACTIVE) { + ret = 0; + goto bail; + } + if (!(ppd->lflags & QIBL_LINKARMED)) { + ret = -EINVAL; + goto bail; + } + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, + IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP); + lstate = QIBL_LINKACTIVE; + break; + + default: + ret = -EINVAL; + goto bail; + } + ret = qib_wait_linkstate(ppd, lstate, 10); + +bail: + return ret; +} + +/* + * Get address of eager buffer from it's index (allocated in chunks, not + * contiguous). + */ +static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) +{ + const u32 chunk = etail / rcd->rcvegrbufs_perchunk; + const u32 idx = etail % rcd->rcvegrbufs_perchunk; + + return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; +} + +/* + * Returns 1 if error was a CRC, else 0. + * Needed for some chip's synthesized error counters. + */ +static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, + u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, + struct qib_message_header *hdr) +{ + u32 ret = 0; + + if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) + ret = 1; + return ret; +} + +/* + * qib_kreceive - receive a packet + * @rcd: the qlogic_ib context + * @llic: gets count of good packets needed to clear lli, + * (used with chips that need need to track crcs for lli) + * + * called from interrupt handler for errors or receive interrupt + * Returns number of CRC error packets, needed by some chips for + * local link integrity tracking. crcs are adjusted down by following + * good packets, if any, and count of good packets is also tracked. + */ +u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) +{ + struct qib_devdata *dd = rcd->dd; + struct qib_pportdata *ppd = rcd->ppd; + __le32 *rhf_addr; + void *ebuf; + const u32 rsize = dd->rcvhdrentsize; /* words */ + const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */ + u32 etail = -1, l, hdrqtail; + struct qib_message_header *hdr; + u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; + int last; + u64 lval; + struct qib_qp *qp, *nqp; + + l = rcd->head; + rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; + if (dd->flags & QIB_NODMA_RTAIL) { + u32 seq = qib_hdrget_seq(rhf_addr); + if (seq != rcd->seq_cnt) + goto bail; + hdrqtail = 0; + } else { + hdrqtail = qib_get_rcvhdrtail(rcd); + if (l == hdrqtail) + goto bail; + smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ + } + + for (last = 0, i = 1; !last; i += !last) { + hdr = dd->f_get_msgheader(dd, rhf_addr); + eflags = qib_hdrget_err_flags(rhf_addr); + etype = qib_hdrget_rcv_type(rhf_addr); + /* total length */ + tlen = qib_hdrget_length_in_bytes(rhf_addr); + ebuf = NULL; + if ((dd->flags & QIB_NODMA_RTAIL) ? + qib_hdrget_use_egr_buf(rhf_addr) : + (etype != RCVHQ_RCV_TYPE_EXPECTED)) { + etail = qib_hdrget_index(rhf_addr); + updegr = 1; + if (tlen > sizeof(*hdr) || + etype >= RCVHQ_RCV_TYPE_NON_KD) + ebuf = qib_get_egrbuf(rcd, etail); + } + if (!eflags) { + u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; + + if (lrh_len != tlen) { + qib_stats.sps_lenerrs++; + goto move_along; + } + } + if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags && + ebuf == NULL && + tlen > (dd->rcvhdrentsize - 2 + 1 - + qib_hdrget_offset(rhf_addr)) << 2) { + goto move_along; + } + + /* + * Both tiderr and qibhdrerr are set for all plain IB + * packets; only qibhdrerr should be set. + */ + if (unlikely(eflags)) + crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, + etail, rhf_addr, hdr); + else if (etype == RCVHQ_RCV_TYPE_NON_KD) { + qib_ib_rcv(rcd, hdr, ebuf, tlen); + if (crcs) + crcs--; + else if (llic && *llic) + --*llic; + } +move_along: + l += rsize; + if (l >= maxcnt) + l = 0; + rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; + if (dd->flags & QIB_NODMA_RTAIL) { + u32 seq = qib_hdrget_seq(rhf_addr); + + if (++rcd->seq_cnt > 13) + rcd->seq_cnt = 1; + if (seq != rcd->seq_cnt) + last = 1; + } else if (l == hdrqtail) + last = 1; + /* + * Update head regs etc., every 16 packets, if not last pkt, + * to help prevent rcvhdrq overflows, when many packets + * are processed and queue is nearly full. + * Don't request an interrupt for intermediate updates. + */ + lval = l; + if (!last && !(i & 0xf)) { + dd->f_update_usrhead(rcd, lval, updegr, etail); + updegr = 0; + } + } + + rcd->head = l; + rcd->pkt_count += i; + + /* + * Iterate over all QPs waiting to respond. + * The list won't change since the IRQ is only run on one CPU. + */ + list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { + list_del_init(&qp->rspwait); + if (qp->r_flags & QIB_R_RSP_NAK) { + qp->r_flags &= ~QIB_R_RSP_NAK; + qib_send_rc_ack(qp); + } + if (qp->r_flags & QIB_R_RSP_SEND) { + unsigned long flags; + + qp->r_flags &= ~QIB_R_RSP_SEND; + spin_lock_irqsave(&qp->s_lock, flags); + if (ib_qib_state_ops[qp->state] & + QIB_PROCESS_OR_FLUSH_SEND) + qib_schedule_send(qp); + spin_unlock_irqrestore(&qp->s_lock, flags); + } + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } + +bail: + /* Report number of packets consumed */ + if (npkts) + *npkts = i; + + /* + * Always write head at end, and setup rcv interrupt, even + * if no packets were processed. + */ + lval = (u64)rcd->head | dd->rhdrhead_intr_off; + dd->f_update_usrhead(rcd, lval, updegr, etail); + return crcs; +} + +/** + * qib_set_mtu - set the MTU + * @ppd: the perport data + * @arg: the new MTU + * + * We can handle "any" incoming size, the issue here is whether we + * need to restrict our outgoing size. For now, we don't do any + * sanity checking on this, and we don't deal with what happens to + * programs that are already running when the size changes. + * NOTE: changing the MTU will usually cause the IBC to go back to + * link INIT state... + */ +int qib_set_mtu(struct qib_pportdata *ppd, u16 arg) +{ + u32 piosize; + int ret, chk; + + if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && + arg != 4096) { + ret = -EINVAL; + goto bail; + } + chk = ib_mtu_enum_to_int(qib_ibmtu); + if (chk > 0 && arg > chk) { + ret = -EINVAL; + goto bail; + } + + piosize = ppd->ibmaxlen; + ppd->ibmtu = arg; + + if (arg >= (piosize - QIB_PIO_MAXIBHDR)) { + /* Only if it's not the initial value (or reset to it) */ + if (piosize != ppd->init_ibmaxlen) { + if (arg > piosize && arg <= ppd->init_ibmaxlen) + piosize = ppd->init_ibmaxlen - 2 * sizeof(u32); + ppd->ibmaxlen = piosize; + } + } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) { + piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32); + ppd->ibmaxlen = piosize; + } + + ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0); + + ret = 0; + +bail: + return ret; +} + +int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) +{ + struct qib_devdata *dd = ppd->dd; + ppd->lid = lid; + ppd->lmc = lmc; + + dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC, + lid | (~((1U << lmc) - 1)) << 16); + + qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n", + dd->unit, ppd->port, lid); + + return 0; +} + +/* + * Following deal with the "obviously simple" task of overriding the state + * of the LEDS, which normally indicate link physical and logical status. + * The complications arise in dealing with different hardware mappings + * and the board-dependent routine being called from interrupts. + * and then there's the requirement to _flash_ them. + */ +#define LED_OVER_FREQ_SHIFT 8 +#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) +/* Below is "non-zero" to force override, but both actual LEDs are off */ +#define LED_OVER_BOTH_OFF (8) + +static void qib_run_led_override(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + struct qib_devdata *dd = ppd->dd; + int timeoff; + int ph_idx; + + if (!(dd->flags & QIB_INITTED)) + return; + + ph_idx = ppd->led_override_phase++ & 1; + ppd->led_override = ppd->led_override_vals[ph_idx]; + timeoff = ppd->led_override_timeoff; + + dd->f_setextled(ppd, 1); + /* + * don't re-fire the timer if user asked for it to be off; we let + * it fire one more time after they turn it off to simplify + */ + if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) + mod_timer(&ppd->led_override_timer, jiffies + timeoff); +} + +void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val) +{ + struct qib_devdata *dd = ppd->dd; + int timeoff, freq; + + if (!(dd->flags & QIB_INITTED)) + return; + + /* First check if we are blinking. If not, use 1HZ polling */ + timeoff = HZ; + freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; + + if (freq) { + /* For blink, set each phase from one nybble of val */ + ppd->led_override_vals[0] = val & 0xF; + ppd->led_override_vals[1] = (val >> 4) & 0xF; + timeoff = (HZ << 4)/freq; + } else { + /* Non-blink set both phases the same. */ + ppd->led_override_vals[0] = val & 0xF; + ppd->led_override_vals[1] = val & 0xF; + } + ppd->led_override_timeoff = timeoff; + + /* + * If the timer has not already been started, do so. Use a "quick" + * timeout so the function will be called soon, to look at our request. + */ + if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { + /* Need to start timer */ + init_timer(&ppd->led_override_timer); + ppd->led_override_timer.function = qib_run_led_override; + ppd->led_override_timer.data = (unsigned long) ppd; + ppd->led_override_timer.expires = jiffies + 1; + add_timer(&ppd->led_override_timer); + } else { + if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) + mod_timer(&ppd->led_override_timer, jiffies + 1); + atomic_dec(&ppd->led_override_timer_active); + } +} + +/** + * qib_reset_device - reset the chip if possible + * @unit: the device to reset + * + * Whether or not reset is successful, we attempt to re-initialize the chip + * (that is, much like a driver unload/reload). We clear the INITTED flag + * so that the various entry points will fail until we reinitialize. For + * now, we only allow this if no user contexts are open that use chip resources + */ +int qib_reset_device(int unit) +{ + int ret, i; + struct qib_devdata *dd = qib_lookup(unit); + struct qib_pportdata *ppd; + unsigned long flags; + int pidx; + + if (!dd) { + ret = -ENODEV; + goto bail; + } + + qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); + + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { + qib_devinfo(dd->pcidev, "Invalid unit number %u or " + "not initialized or not present\n", unit); + ret = -ENXIO; + goto bail; + } + + spin_lock_irqsave(&dd->uctxt_lock, flags); + if (dd->rcd) + for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { + if (!dd->rcd[i] || !dd->rcd[i]->cnt) + continue; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + ret = -EBUSY; + goto bail; + } + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (atomic_read(&ppd->led_override_timer_active)) { + /* Need to stop LED timer, _then_ shut off LEDs */ + del_timer_sync(&ppd->led_override_timer); + atomic_set(&ppd->led_override_timer_active, 0); + } + + /* Shut off LEDs after we are sure timer is not running */ + ppd->led_override = LED_OVER_BOTH_OFF; + dd->f_setextled(ppd, 0); + if (dd->flags & QIB_HAS_SEND_DMA) + qib_teardown_sdma(ppd); + } + + ret = dd->f_reset(dd); + if (ret == 1) + ret = qib_init(dd, 1); + else + ret = -EAGAIN; + if (ret) + qib_dev_err(dd, "Reinitialize unit %u after " + "reset failed with %d\n", unit, ret); + else + qib_devinfo(dd->pcidev, "Reinitialized unit %u after " + "resetting\n", unit); + +bail: + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c new file mode 100644 index 000000000000..92d9cfe98a68 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -0,0 +1,451 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> + +#include "qib.h" + +/* + * Functions specific to the serial EEPROM on cards handled by ib_qib. + * The actual serail interface code is in qib_twsi.c. This file is a client + */ + +/** + * qib_eeprom_read - receives bytes from the eeprom via I2C + * @dd: the qlogic_ib device + * @eeprom_offset: address to read from + * @buffer: where to store result + * @len: number of bytes to receive + */ +int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset, + void *buff, int len) +{ + int ret; + + ret = mutex_lock_interruptible(&dd->eep_lock); + if (!ret) { + ret = qib_twsi_reset(dd); + if (ret) + qib_dev_err(dd, "EEPROM Reset for read failed\n"); + else + ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, + eeprom_offset, buff, len); + mutex_unlock(&dd->eep_lock); + } + + return ret; +} + +/* + * Actually update the eeprom, first doing write enable if + * needed, then restoring write enable state. + * Must be called with eep_lock held + */ +static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset, + const void *buf, int len) +{ + int ret, pwen; + + pwen = dd->f_eeprom_wen(dd, 1); + ret = qib_twsi_reset(dd); + if (ret) + qib_dev_err(dd, "EEPROM Reset for write failed\n"); + else + ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev, + offset, buf, len); + dd->f_eeprom_wen(dd, pwen); + return ret; +} + +/** + * qib_eeprom_write - writes data to the eeprom via I2C + * @dd: the qlogic_ib device + * @eeprom_offset: where to place data + * @buffer: data to write + * @len: number of bytes to write + */ +int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset, + const void *buff, int len) +{ + int ret; + + ret = mutex_lock_interruptible(&dd->eep_lock); + if (!ret) { + ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len); + mutex_unlock(&dd->eep_lock); + } + + return ret; +} + +static u8 flash_csum(struct qib_flash *ifp, int adjust) +{ + u8 *ip = (u8 *) ifp; + u8 csum = 0, len; + + /* + * Limit length checksummed to max length of actual data. + * Checksum of erased eeprom will still be bad, but we avoid + * reading past the end of the buffer we were passed. + */ + len = ifp->if_length; + if (len > sizeof(struct qib_flash)) + len = sizeof(struct qib_flash); + while (len--) + csum += *ip++; + csum -= ifp->if_csum; + csum = ~csum; + if (adjust) + ifp->if_csum = csum; + + return csum; +} + +/** + * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device + * @dd: the qlogic_ib device + * + * We have the capability to use the nguid field, and get + * the guid from the first chip's flash, to use for all of them. + */ +void qib_get_eeprom_info(struct qib_devdata *dd) +{ + void *buf; + struct qib_flash *ifp; + __be64 guid; + int len, eep_stat; + u8 csum, *bguid; + int t = dd->unit; + struct qib_devdata *dd0 = qib_lookup(0); + + if (t && dd0->nguid > 1 && t <= dd0->nguid) { + u8 oguid; + dd->base_guid = dd0->base_guid; + bguid = (u8 *) &dd->base_guid; + + oguid = bguid[7]; + bguid[7] += t; + if (oguid > bguid[7]) { + if (bguid[6] == 0xff) { + if (bguid[5] == 0xff) { + qib_dev_err(dd, "Can't set %s GUID" + " from base, wraps to" + " OUI!\n", + qib_get_unit_name(t)); + dd->base_guid = 0; + goto bail; + } + bguid[5]++; + } + bguid[6]++; + } + dd->nguid = 1; + goto bail; + } + + /* + * Read full flash, not just currently used part, since it may have + * been written with a newer definition. + * */ + len = sizeof(struct qib_flash); + buf = vmalloc(len); + if (!buf) { + qib_dev_err(dd, "Couldn't allocate memory to read %u " + "bytes from eeprom for GUID\n", len); + goto bail; + } + + /* + * Use "public" eeprom read function, which does locking and + * figures out device. This will migrate to chip-specific. + */ + eep_stat = qib_eeprom_read(dd, 0, buf, len); + + if (eep_stat) { + qib_dev_err(dd, "Failed reading GUID from eeprom\n"); + goto done; + } + ifp = (struct qib_flash *)buf; + + csum = flash_csum(ifp, 0); + if (csum != ifp->if_csum) { + qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " + "0x%x, not 0x%x\n", csum, ifp->if_csum); + goto done; + } + if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || + *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { + qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", + *(unsigned long long *) ifp->if_guid); + /* don't allow GUID if all 0 or all 1's */ + goto done; + } + + /* complain, but allow it */ + if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) + qib_devinfo(dd->pcidev, "Warning, GUID %llx is " + "default, probably not correct!\n", + *(unsigned long long *) ifp->if_guid); + + bguid = ifp->if_guid; + if (!bguid[0] && !bguid[1] && !bguid[2]) { + /* + * Original incorrect GUID format in flash; fix in + * core copy, by shifting up 2 octets; don't need to + * change top octet, since both it and shifted are 0. + */ + bguid[1] = bguid[3]; + bguid[2] = bguid[4]; + bguid[3] = 0; + bguid[4] = 0; + guid = *(__be64 *) ifp->if_guid; + } else + guid = *(__be64 *) ifp->if_guid; + dd->base_guid = guid; + dd->nguid = ifp->if_numguid; + /* + * Things are slightly complicated by the desire to transparently + * support both the Pathscale 10-digit serial number and the QLogic + * 13-character version. + */ + if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] && + ((u8 *) ifp->if_sprefix)[0] != 0xFF) { + char *snp = dd->serial; + + /* + * This board has a Serial-prefix, which is stored + * elsewhere for backward-compatibility. + */ + memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); + snp[sizeof ifp->if_sprefix] = '\0'; + len = strlen(snp); + snp += len; + len = (sizeof dd->serial) - len; + if (len > sizeof ifp->if_serial) + len = sizeof ifp->if_serial; + memcpy(snp, ifp->if_serial, len); + } else + memcpy(dd->serial, ifp->if_serial, + sizeof ifp->if_serial); + if (!strstr(ifp->if_comment, "Tested successfully")) + qib_dev_err(dd, "Board SN %s did not pass functional " + "test: %s\n", dd->serial, ifp->if_comment); + + memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); + /* + * Power-on (actually "active") hours are kept as little-endian value + * in EEPROM, but as seconds in a (possibly as small as 24-bit) + * atomic_t while running. + */ + atomic_set(&dd->active_time, 0); + dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); + +done: + vfree(buf); + +bail:; +} + +/** + * qib_update_eeprom_log - copy active-time and error counters to eeprom + * @dd: the qlogic_ib device + * + * Although the time is kept as seconds in the qib_devdata struct, it is + * rounded to hours for re-write, as we have only 16 bits in EEPROM. + * First-cut code reads whole (expected) struct qib_flash, modifies, + * re-writes. Future direction: read/write only what we need, assuming + * that the EEPROM had to have been "good enough" for driver init, and + * if not, we aren't making it worse. + * + */ +int qib_update_eeprom_log(struct qib_devdata *dd) +{ + void *buf; + struct qib_flash *ifp; + int len, hi_water; + uint32_t new_time, new_hrs; + u8 csum; + int ret, idx; + unsigned long flags; + + /* first, check if we actually need to do anything. */ + ret = 0; + for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { + if (dd->eep_st_new_errs[idx]) { + ret = 1; + break; + } + } + new_time = atomic_read(&dd->active_time); + + if (ret == 0 && new_time < 3600) + goto bail; + + /* + * The quick-check above determined that there is something worthy + * of logging, so get current contents and do a more detailed idea. + * read full flash, not just currently used part, since it may have + * been written with a newer definition + */ + len = sizeof(struct qib_flash); + buf = vmalloc(len); + ret = 1; + if (!buf) { + qib_dev_err(dd, "Couldn't allocate memory to read %u " + "bytes from eeprom for logging\n", len); + goto bail; + } + + /* Grab semaphore and read current EEPROM. If we get an + * error, let go, but if not, keep it until we finish write. + */ + ret = mutex_lock_interruptible(&dd->eep_lock); + if (ret) { + qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); + goto free_bail; + } + ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); + if (ret) { + mutex_unlock(&dd->eep_lock); + qib_dev_err(dd, "Unable read EEPROM for logging\n"); + goto free_bail; + } + ifp = (struct qib_flash *)buf; + + csum = flash_csum(ifp, 0); + if (csum != ifp->if_csum) { + mutex_unlock(&dd->eep_lock); + qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", + csum, ifp->if_csum); + ret = 1; + goto free_bail; + } + hi_water = 0; + spin_lock_irqsave(&dd->eep_st_lock, flags); + for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { + int new_val = dd->eep_st_new_errs[idx]; + if (new_val) { + /* + * If we have seen any errors, add to EEPROM values + * We need to saturate at 0xFF (255) and we also + * would need to adjust the checksum if we were + * trying to minimize EEPROM traffic + * Note that we add to actual current count in EEPROM, + * in case it was altered while we were running. + */ + new_val += ifp->if_errcntp[idx]; + if (new_val > 0xFF) + new_val = 0xFF; + if (ifp->if_errcntp[idx] != new_val) { + ifp->if_errcntp[idx] = new_val; + hi_water = offsetof(struct qib_flash, + if_errcntp) + idx; + } + /* + * update our shadow (used to minimize EEPROM + * traffic), to match what we are about to write. + */ + dd->eep_st_errs[idx] = new_val; + dd->eep_st_new_errs[idx] = 0; + } + } + /* + * Now update active-time. We would like to round to the nearest hour + * but unless atomic_t are sure to be proper signed ints we cannot, + * because we need to account for what we "transfer" to EEPROM and + * if we log an hour at 31 minutes, then we would need to set + * active_time to -29 to accurately count the _next_ hour. + */ + if (new_time >= 3600) { + new_hrs = new_time / 3600; + atomic_sub((new_hrs * 3600), &dd->active_time); + new_hrs += dd->eep_hrs; + if (new_hrs > 0xFFFF) + new_hrs = 0xFFFF; + dd->eep_hrs = new_hrs; + if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { + ifp->if_powerhour[0] = new_hrs & 0xFF; + hi_water = offsetof(struct qib_flash, if_powerhour); + } + if ((new_hrs >> 8) != ifp->if_powerhour[1]) { + ifp->if_powerhour[1] = new_hrs >> 8; + hi_water = offsetof(struct qib_flash, if_powerhour) + 1; + } + } + /* + * There is a tiny possibility that we could somehow fail to write + * the EEPROM after updating our shadows, but problems from holding + * the spinlock too long are a much bigger issue. + */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + if (hi_water) { + /* we made some change to the data, uopdate cksum and write */ + csum = flash_csum(ifp, 1); + ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); + } + mutex_unlock(&dd->eep_lock); + if (ret) + qib_dev_err(dd, "Failed updating EEPROM\n"); + +free_bail: + vfree(buf); +bail: + return ret; +} + +/** + * qib_inc_eeprom_err - increment one of the four error counters + * that are logged to EEPROM. + * @dd: the qlogic_ib device + * @eidx: 0..3, the counter to increment + * @incr: how much to add + * + * Each counter is 8-bits, and saturates at 255 (0xFF). They + * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() + * is called, but it can only be called in a context that allows sleep. + * This function can be called even at interrupt level. + */ +void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) +{ + uint new_val; + unsigned long flags; + + spin_lock_irqsave(&dd->eep_st_lock, flags); + new_val = dd->eep_st_new_errs[eidx] + incr; + if (new_val > 255) + new_val = 255; + dd->eep_st_new_errs[eidx] = new_val; + spin_unlock_irqrestore(&dd->eep_st_lock, flags); +} diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c new file mode 100644 index 000000000000..a142a9eb5226 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -0,0 +1,2317 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/cdev.h> +#include <linux/swap.h> +#include <linux/vmalloc.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <linux/uio.h> +#include <linux/jiffies.h> +#include <asm/pgtable.h> +#include <linux/delay.h> + +#include "qib.h" +#include "qib_common.h" +#include "qib_user_sdma.h" + +static int qib_open(struct inode *, struct file *); +static int qib_close(struct inode *, struct file *); +static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); +static ssize_t qib_aio_write(struct kiocb *, const struct iovec *, + unsigned long, loff_t); +static unsigned int qib_poll(struct file *, struct poll_table_struct *); +static int qib_mmapf(struct file *, struct vm_area_struct *); + +static const struct file_operations qib_file_ops = { + .owner = THIS_MODULE, + .write = qib_write, + .aio_write = qib_aio_write, + .open = qib_open, + .release = qib_close, + .poll = qib_poll, + .mmap = qib_mmapf +}; + +/* + * Convert kernel virtual addresses to physical addresses so they don't + * potentially conflict with the chip addresses used as mmap offsets. + * It doesn't really matter what mmap offset we use as long as we can + * interpret it correctly. + */ +static u64 cvt_kvaddr(void *p) +{ + struct page *page; + u64 paddr = 0; + + page = vmalloc_to_page(p); + if (page) + paddr = page_to_pfn(page) << PAGE_SHIFT; + + return paddr; +} + +static int qib_get_base_info(struct file *fp, void __user *ubase, + size_t ubase_size) +{ + struct qib_ctxtdata *rcd = ctxt_fp(fp); + int ret = 0; + struct qib_base_info *kinfo = NULL; + struct qib_devdata *dd = rcd->dd; + struct qib_pportdata *ppd = rcd->ppd; + unsigned subctxt_cnt; + int shared, master; + size_t sz; + + subctxt_cnt = rcd->subctxt_cnt; + if (!subctxt_cnt) { + shared = 0; + master = 0; + subctxt_cnt = 1; + } else { + shared = 1; + master = !subctxt_fp(fp); + } + + sz = sizeof(*kinfo); + /* If context sharing is not requested, allow the old size structure */ + if (!shared) + sz -= 7 * sizeof(u64); + if (ubase_size < sz) { + ret = -EINVAL; + goto bail; + } + + kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); + if (kinfo == NULL) { + ret = -ENOMEM; + goto bail; + } + + ret = dd->f_get_base_info(rcd, kinfo); + if (ret < 0) + goto bail; + + kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; + kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; + kinfo->spi_tidegrcnt = rcd->rcvegrcnt; + kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; + /* + * have to mmap whole thing + */ + kinfo->spi_rcv_egrbuftotlen = + rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; + kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; + kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / + rcd->rcvegrbuf_chunks; + kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; + if (master) + kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; + /* + * for this use, may be cfgctxts summed over all chips that + * are are configured and present + */ + kinfo->spi_nctxts = dd->cfgctxts; + /* unit (chip/board) our context is on */ + kinfo->spi_unit = dd->unit; + kinfo->spi_port = ppd->port; + /* for now, only a single page */ + kinfo->spi_tid_maxsize = PAGE_SIZE; + + /* + * Doing this per context, and based on the skip value, etc. This has + * to be the actual buffer size, since the protocol code treats it + * as an array. + * + * These have to be set to user addresses in the user code via mmap. + * These values are used on return to user code for the mmap target + * addresses only. For 32 bit, same 44 bit address problem, so use + * the physical address, not virtual. Before 2.6.11, using the + * page_address() macro worked, but in 2.6.11, even that returns the + * full 64 bit address (upper bits all 1's). So far, using the + * physical addresses (or chip offsets, for chip mapping) works, but + * no doubt some future kernel release will change that, and we'll be + * on to yet another method of dealing with this. + * Normally only one of rcvhdr_tailaddr or rhf_offset is useful + * since the chips with non-zero rhf_offset don't normally + * enable tail register updates to host memory, but for testing, + * both can be enabled and used. + */ + kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; + kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys; + kinfo->spi_rhf_offset = dd->rhf_offset; + kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys; + kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; + /* setup per-unit (not port) status area for user programs */ + kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + + (char *) ppd->statusp - + (char *) dd->pioavailregs_dma; + kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; + if (!shared) { + kinfo->spi_piocnt = rcd->piocnt; + kinfo->spi_piobufbase = (u64) rcd->piobufs; + kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask); + } else if (master) { + kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) + + (rcd->piocnt % subctxt_cnt); + /* Master's PIO buffers are after all the slave's */ + kinfo->spi_piobufbase = (u64) rcd->piobufs + + dd->palign * + (rcd->piocnt - kinfo->spi_piocnt); + } else { + unsigned slave = subctxt_fp(fp) - 1; + + kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt; + kinfo->spi_piobufbase = (u64) rcd->piobufs + + dd->palign * kinfo->spi_piocnt * slave; + } + + if (shared) { + kinfo->spi_sendbuf_status = + cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]); + /* only spi_subctxt_* fields should be set in this block! */ + kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase); + + kinfo->spi_subctxt_rcvegrbuf = + cvt_kvaddr(rcd->subctxt_rcvegrbuf); + kinfo->spi_subctxt_rcvhdr_base = + cvt_kvaddr(rcd->subctxt_rcvhdr_base); + } + + /* + * All user buffers are 2KB buffers. If we ever support + * giving 4KB buffers to user processes, this will need some + * work. Can't use piobufbase directly, because it has + * both 2K and 4K buffer base values. + */ + kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / + dd->palign; + kinfo->spi_pioalign = dd->palign; + kinfo->spi_qpair = QIB_KD_QP; + /* + * user mode PIO buffers are always 2KB, even when 4KB can + * be received, and sent via the kernel; this is ibmaxlen + * for 2K MTU. + */ + kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); + kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ + kinfo->spi_ctxt = rcd->ctxt; + kinfo->spi_subctxt = subctxt_fp(fp); + kinfo->spi_sw_version = QIB_KERN_SWVERSION; + kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */ + kinfo->spi_hw_version = dd->revision; + + if (master) + kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER; + + sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); + if (copy_to_user(ubase, kinfo, sz)) + ret = -EFAULT; +bail: + kfree(kinfo); + return ret; +} + +/** + * qib_tid_update - update a context TID + * @rcd: the context + * @fp: the qib device file + * @ti: the TID information + * + * The new implementation as of Oct 2004 is that the driver assigns + * the tid and returns it to the caller. To reduce search time, we + * keep a cursor for each context, walking the shadow tid array to find + * one that's not in use. + * + * For now, if we can't allocate the full list, we fail, although + * in the long run, we'll allocate as many as we can, and the + * caller will deal with that by trying the remaining pages later. + * That means that when we fail, we have to mark the tids as not in + * use again, in our shadow copy. + * + * It's up to the caller to free the tids when they are done. + * We'll unlock the pages as they free them. + * + * Also, right now we are locking one page at a time, but since + * the intended use of this routine is for a single group of + * virtually contiguous pages, that should change to improve + * performance. + */ +static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, + const struct qib_tid_info *ti) +{ + int ret = 0, ntids; + u32 tid, ctxttid, cnt, i, tidcnt, tidoff; + u16 *tidlist; + struct qib_devdata *dd = rcd->dd; + u64 physaddr; + unsigned long vaddr; + u64 __iomem *tidbase; + unsigned long tidmap[8]; + struct page **pagep = NULL; + unsigned subctxt = subctxt_fp(fp); + + if (!dd->pageshadow) { + ret = -ENOMEM; + goto done; + } + + cnt = ti->tidcnt; + if (!cnt) { + ret = -EFAULT; + goto done; + } + ctxttid = rcd->ctxt * dd->rcvtidcnt; + if (!rcd->subctxt_cnt) { + tidcnt = dd->rcvtidcnt; + tid = rcd->tidcursor; + tidoff = 0; + } else if (!subctxt) { + tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + + (dd->rcvtidcnt % rcd->subctxt_cnt); + tidoff = dd->rcvtidcnt - tidcnt; + ctxttid += tidoff; + tid = tidcursor_fp(fp); + } else { + tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; + tidoff = tidcnt * (subctxt - 1); + ctxttid += tidoff; + tid = tidcursor_fp(fp); + } + if (cnt > tidcnt) { + /* make sure it all fits in tid_pg_list */ + qib_devinfo(dd->pcidev, "Process tried to allocate %u " + "TIDs, only trying max (%u)\n", cnt, tidcnt); + cnt = tidcnt; + } + pagep = (struct page **) rcd->tid_pg_list; + tidlist = (u16 *) &pagep[dd->rcvtidcnt]; + pagep += tidoff; + tidlist += tidoff; + + memset(tidmap, 0, sizeof(tidmap)); + /* before decrement; chip actual # */ + ntids = tidcnt; + tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + + dd->rcvtidbase + + ctxttid * sizeof(*tidbase)); + + /* virtual address of first page in transfer */ + vaddr = ti->tidvaddr; + if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, + cnt * PAGE_SIZE)) { + ret = -EFAULT; + goto done; + } + ret = qib_get_user_pages(vaddr, cnt, pagep); + if (ret) { + /* + * if (ret == -EBUSY) + * We can't continue because the pagep array won't be + * initialized. This should never happen, + * unless perhaps the user has mpin'ed the pages + * themselves. + */ + qib_devinfo(dd->pcidev, + "Failed to lock addr %p, %u pages: " + "errno %d\n", (void *) vaddr, cnt, -ret); + goto done; + } + for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { + for (; ntids--; tid++) { + if (tid == tidcnt) + tid = 0; + if (!dd->pageshadow[ctxttid + tid]) + break; + } + if (ntids < 0) { + /* + * Oops, wrapped all the way through their TIDs, + * and didn't have enough free; see comments at + * start of routine + */ + i--; /* last tidlist[i] not filled in */ + ret = -ENOMEM; + break; + } + tidlist[i] = tid + tidoff; + /* we "know" system pages and TID pages are same size */ + dd->pageshadow[ctxttid + tid] = pagep[i]; + dd->physshadow[ctxttid + tid] = + qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + /* + * don't need atomic or it's overhead + */ + __set_bit(tid, tidmap); + physaddr = dd->physshadow[ctxttid + tid]; + /* PERFORMANCE: below should almost certainly be cached */ + dd->f_put_tid(dd, &tidbase[tid], + RCVHQ_RCV_TYPE_EXPECTED, physaddr); + /* + * don't check this tid in qib_ctxtshadow, since we + * just filled it in; start with the next one. + */ + tid++; + } + + if (ret) { + u32 limit; +cleanup: + /* jump here if copy out of updated info failed... */ + /* same code that's in qib_free_tid() */ + limit = sizeof(tidmap) * BITS_PER_BYTE; + if (limit > tidcnt) + /* just in case size changes in future */ + limit = tidcnt; + tid = find_first_bit((const unsigned long *)tidmap, limit); + for (; tid < limit; tid++) { + if (!test_bit(tid, tidmap)) + continue; + if (dd->pageshadow[ctxttid + tid]) { + dma_addr_t phys; + + phys = dd->physshadow[ctxttid + tid]; + dd->physshadow[ctxttid + tid] = dd->tidinvalid; + /* PERFORMANCE: below should almost certainly + * be cached + */ + dd->f_put_tid(dd, &tidbase[tid], + RCVHQ_RCV_TYPE_EXPECTED, + dd->tidinvalid); + pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + dd->pageshadow[ctxttid + tid] = NULL; + } + } + qib_release_user_pages(pagep, cnt); + } else { + /* + * Copy the updated array, with qib_tid's filled in, back + * to user. Since we did the copy in already, this "should + * never fail" If it does, we have to clean up... + */ + if (copy_to_user((void __user *) + (unsigned long) ti->tidlist, + tidlist, cnt * sizeof(*tidlist))) { + ret = -EFAULT; + goto cleanup; + } + if (copy_to_user((void __user *) (unsigned long) ti->tidmap, + tidmap, sizeof tidmap)) { + ret = -EFAULT; + goto cleanup; + } + if (tid == tidcnt) + tid = 0; + if (!rcd->subctxt_cnt) + rcd->tidcursor = tid; + else + tidcursor_fp(fp) = tid; + } + +done: + return ret; +} + +/** + * qib_tid_free - free a context TID + * @rcd: the context + * @subctxt: the subcontext + * @ti: the TID info + * + * right now we are unlocking one page at a time, but since + * the intended use of this routine is for a single group of + * virtually contiguous pages, that should change to improve + * performance. We check that the TID is in range for this context + * but otherwise don't check validity; if user has an error and + * frees the wrong tid, it's only their own data that can thereby + * be corrupted. We do check that the TID was in use, for sanity + * We always use our idea of the saved address, not the address that + * they pass in to us. + */ +static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, + const struct qib_tid_info *ti) +{ + int ret = 0; + u32 tid, ctxttid, cnt, limit, tidcnt; + struct qib_devdata *dd = rcd->dd; + u64 __iomem *tidbase; + unsigned long tidmap[8]; + + if (!dd->pageshadow) { + ret = -ENOMEM; + goto done; + } + + if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, + sizeof tidmap)) { + ret = -EFAULT; + goto done; + } + + ctxttid = rcd->ctxt * dd->rcvtidcnt; + if (!rcd->subctxt_cnt) + tidcnt = dd->rcvtidcnt; + else if (!subctxt) { + tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + + (dd->rcvtidcnt % rcd->subctxt_cnt); + ctxttid += dd->rcvtidcnt - tidcnt; + } else { + tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; + ctxttid += tidcnt * (subctxt - 1); + } + tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + + dd->rcvtidbase + + ctxttid * sizeof(*tidbase)); + + limit = sizeof(tidmap) * BITS_PER_BYTE; + if (limit > tidcnt) + /* just in case size changes in future */ + limit = tidcnt; + tid = find_first_bit(tidmap, limit); + for (cnt = 0; tid < limit; tid++) { + /* + * small optimization; if we detect a run of 3 or so without + * any set, use find_first_bit again. That's mainly to + * accelerate the case where we wrapped, so we have some at + * the beginning, and some at the end, and a big gap + * in the middle. + */ + if (!test_bit(tid, tidmap)) + continue; + cnt++; + if (dd->pageshadow[ctxttid + tid]) { + struct page *p; + dma_addr_t phys; + + p = dd->pageshadow[ctxttid + tid]; + dd->pageshadow[ctxttid + tid] = NULL; + phys = dd->physshadow[ctxttid + tid]; + dd->physshadow[ctxttid + tid] = dd->tidinvalid; + /* PERFORMANCE: below should almost certainly be + * cached + */ + dd->f_put_tid(dd, &tidbase[tid], + RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); + pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + qib_release_user_pages(&p, 1); + } + } +done: + return ret; +} + +/** + * qib_set_part_key - set a partition key + * @rcd: the context + * @key: the key + * + * We can have up to 4 active at a time (other than the default, which is + * always allowed). This is somewhat tricky, since multiple contexts may set + * the same key, so we reference count them, and clean up at exit. All 4 + * partition keys are packed into a single qlogic_ib register. It's an + * error for a process to set the same pkey multiple times. We provide no + * mechanism to de-allocate a pkey at this time, we may eventually need to + * do that. I've used the atomic operations, and no locking, and only make + * a single pass through what's available. This should be more than + * adequate for some time. I'll think about spinlocks or the like if and as + * it's necessary. + */ +static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key) +{ + struct qib_pportdata *ppd = rcd->ppd; + int i, any = 0, pidx = -1; + u16 lkey = key & 0x7FFF; + int ret; + + if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) { + /* nothing to do; this key always valid */ + ret = 0; + goto bail; + } + + if (!lkey) { + ret = -EINVAL; + goto bail; + } + + /* + * Set the full membership bit, because it has to be + * set in the register or the packet, and it seems + * cleaner to set in the register than to force all + * callers to set it. + */ + key |= 0x8000; + + for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { + if (!rcd->pkeys[i] && pidx == -1) + pidx = i; + if (rcd->pkeys[i] == key) { + ret = -EEXIST; + goto bail; + } + } + if (pidx == -1) { + ret = -EBUSY; + goto bail; + } + for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { + if (!ppd->pkeys[i]) { + any++; + continue; + } + if (ppd->pkeys[i] == key) { + atomic_t *pkrefs = &ppd->pkeyrefs[i]; + + if (atomic_inc_return(pkrefs) > 1) { + rcd->pkeys[pidx] = key; + ret = 0; + goto bail; + } else { + /* + * lost race, decrement count, catch below + */ + atomic_dec(pkrefs); + any++; + } + } + if ((ppd->pkeys[i] & 0x7FFF) == lkey) { + /* + * It makes no sense to have both the limited and + * full membership PKEY set at the same time since + * the unlimited one will disable the limited one. + */ + ret = -EEXIST; + goto bail; + } + } + if (!any) { + ret = -EBUSY; + goto bail; + } + for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { + if (!ppd->pkeys[i] && + atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { + rcd->pkeys[pidx] = key; + ppd->pkeys[i] = key; + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); + ret = 0; + goto bail; + } + } + ret = -EBUSY; + +bail: + return ret; +} + +/** + * qib_manage_rcvq - manage a context's receive queue + * @rcd: the context + * @subctxt: the subcontext + * @start_stop: action to carry out + * + * start_stop == 0 disables receive on the context, for use in queue + * overflow conditions. start_stop==1 re-enables, to be used to + * re-init the software copy of the head register + */ +static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt, + int start_stop) +{ + struct qib_devdata *dd = rcd->dd; + unsigned int rcvctrl_op; + + if (subctxt) + goto bail; + /* atomically clear receive enable ctxt. */ + if (start_stop) { + /* + * On enable, force in-memory copy of the tail register to + * 0, so that protocol code doesn't have to worry about + * whether or not the chip has yet updated the in-memory + * copy or not on return from the system call. The chip + * always resets it's tail register back to 0 on a + * transition from disabled to enabled. + */ + if (rcd->rcvhdrtail_kvaddr) + qib_clear_rcvhdrtail(rcd); + rcvctrl_op = QIB_RCVCTRL_CTXT_ENB; + } else + rcvctrl_op = QIB_RCVCTRL_CTXT_DIS; + dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); + /* always; new head should be equal to new tail; see above */ +bail: + return 0; +} + +static void qib_clean_part_key(struct qib_ctxtdata *rcd, + struct qib_devdata *dd) +{ + int i, j, pchanged = 0; + u64 oldpkey; + struct qib_pportdata *ppd = rcd->ppd; + + /* for debugging only */ + oldpkey = (u64) ppd->pkeys[0] | + ((u64) ppd->pkeys[1] << 16) | + ((u64) ppd->pkeys[2] << 32) | + ((u64) ppd->pkeys[3] << 48); + + for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { + if (!rcd->pkeys[i]) + continue; + for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) { + /* check for match independent of the global bit */ + if ((ppd->pkeys[j] & 0x7fff) != + (rcd->pkeys[i] & 0x7fff)) + continue; + if (atomic_dec_and_test(&ppd->pkeyrefs[j])) { + ppd->pkeys[j] = 0; + pchanged++; + } + break; + } + rcd->pkeys[i] = 0; + } + if (pchanged) + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); +} + +/* common code for the mappings on dma_alloc_coherent mem */ +static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, + unsigned len, void *kvaddr, u32 write_ok, char *what) +{ + struct qib_devdata *dd = rcd->dd; + unsigned long pfn; + int ret; + + if ((vma->vm_end - vma->vm_start) > len) { + qib_devinfo(dd->pcidev, + "FAIL on %s: len %lx > %x\n", what, + vma->vm_end - vma->vm_start, len); + ret = -EFAULT; + goto bail; + } + + /* + * shared context user code requires rcvhdrq mapped r/w, others + * only allowed readonly mapping. + */ + if (!write_ok) { + if (vma->vm_flags & VM_WRITE) { + qib_devinfo(dd->pcidev, + "%s must be mapped readonly\n", what); + ret = -EPERM; + goto bail; + } + + /* don't allow them to later change with mprotect */ + vma->vm_flags &= ~VM_MAYWRITE; + } + + pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, vma->vm_start, pfn, + len, vma->vm_page_prot); + if (ret) + qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " + "bytes failed: %d\n", what, rcd->ctxt, + pfn, len, ret); +bail: + return ret; +} + +static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, + u64 ureg) +{ + unsigned long phys; + unsigned long sz; + int ret; + + /* + * This is real hardware, so use io_remap. This is the mechanism + * for the user process to update the head registers for their ctxt + * in the chip. + */ + sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; + if ((vma->vm_end - vma->vm_start) > sz) { + qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " + "%lx > PAGE\n", vma->vm_end - vma->vm_start); + ret = -EFAULT; + } else { + phys = dd->physaddr + ureg; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + ret = io_remap_pfn_range(vma, vma->vm_start, + phys >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + } + return ret; +} + +static int mmap_piobufs(struct vm_area_struct *vma, + struct qib_devdata *dd, + struct qib_ctxtdata *rcd, + unsigned piobufs, unsigned piocnt) +{ + unsigned long phys; + int ret; + + /* + * When we map the PIO buffers in the chip, we want to map them as + * writeonly, no read possible; unfortunately, x86 doesn't allow + * for this in hardware, but we still prevent users from asking + * for it. + */ + if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { + qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " + "reqlen %lx > PAGE\n", + vma->vm_end - vma->vm_start); + ret = -EINVAL; + goto bail; + } + + phys = dd->physaddr + piobufs; + +#if defined(__powerpc__) + /* There isn't a generic way to specify writethrough mappings */ + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; + pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; + pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; +#endif + + /* + * don't allow them to later change to readable with mprotect (for when + * not initially mapped readable, as is normally the case) + */ + vma->vm_flags &= ~VM_MAYREAD; + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + + if (qib_wc_pat) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +bail: + return ret; +} + +static int mmap_rcvegrbufs(struct vm_area_struct *vma, + struct qib_ctxtdata *rcd) +{ + struct qib_devdata *dd = rcd->dd; + unsigned long start, size; + size_t total_size, i; + unsigned long pfn; + int ret; + + size = rcd->rcvegrbuf_size; + total_size = rcd->rcvegrbuf_chunks * size; + if ((vma->vm_end - vma->vm_start) > total_size) { + qib_devinfo(dd->pcidev, "FAIL on egr bufs: " + "reqlen %lx > actual %lx\n", + vma->vm_end - vma->vm_start, + (unsigned long) total_size); + ret = -EINVAL; + goto bail; + } + + if (vma->vm_flags & VM_WRITE) { + qib_devinfo(dd->pcidev, "Can't map eager buffers as " + "writable (flags=%lx)\n", vma->vm_flags); + ret = -EPERM; + goto bail; + } + /* don't allow them to later change to writeable with mprotect */ + vma->vm_flags &= ~VM_MAYWRITE; + + start = vma->vm_start; + + for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { + pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, start, pfn, size, + vma->vm_page_prot); + if (ret < 0) + goto bail; + } + ret = 0; + +bail: + return ret; +} + +/* + * qib_file_vma_fault - handle a VMA page fault. + */ +static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page; + + page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); + if (!page) + return VM_FAULT_SIGBUS; + + get_page(page); + vmf->page = page; + + return 0; +} + +static struct vm_operations_struct qib_file_vm_ops = { + .fault = qib_file_vma_fault, +}; + +static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, + struct qib_ctxtdata *rcd, unsigned subctxt) +{ + struct qib_devdata *dd = rcd->dd; + unsigned subctxt_cnt; + unsigned long len; + void *addr; + size_t size; + int ret = 0; + + subctxt_cnt = rcd->subctxt_cnt; + size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; + + /* + * Each process has all the subctxt uregbase, rcvhdrq, and + * rcvegrbufs mmapped - as an array for all the processes, + * and also separately for this process. + */ + if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) { + addr = rcd->subctxt_uregbase; + size = PAGE_SIZE * subctxt_cnt; + } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) { + addr = rcd->subctxt_rcvhdr_base; + size = rcd->rcvhdrq_size * subctxt_cnt; + } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) { + addr = rcd->subctxt_rcvegrbuf; + size *= subctxt_cnt; + } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase + + PAGE_SIZE * subctxt)) { + addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt; + size = PAGE_SIZE; + } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base + + rcd->rcvhdrq_size * subctxt)) { + addr = rcd->subctxt_rcvhdr_base + + rcd->rcvhdrq_size * subctxt; + size = rcd->rcvhdrq_size; + } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) { + addr = rcd->user_event_mask; + size = PAGE_SIZE; + } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf + + size * subctxt)) { + addr = rcd->subctxt_rcvegrbuf + size * subctxt; + /* rcvegrbufs are read-only on the slave */ + if (vma->vm_flags & VM_WRITE) { + qib_devinfo(dd->pcidev, + "Can't map eager buffers as " + "writable (flags=%lx)\n", vma->vm_flags); + ret = -EPERM; + goto bail; + } + /* + * Don't allow permission to later change to writeable + * with mprotect. + */ + vma->vm_flags &= ~VM_MAYWRITE; + } else + goto bail; + len = vma->vm_end - vma->vm_start; + if (len > size) { + ret = -EINVAL; + goto bail; + } + + vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; + vma->vm_ops = &qib_file_vm_ops; + vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; + ret = 1; + +bail: + return ret; +} + +/** + * qib_mmapf - mmap various structures into user space + * @fp: the file pointer + * @vma: the VM area + * + * We use this to have a shared buffer between the kernel and the user code + * for the rcvhdr queue, egr buffers, and the per-context user regs and pio + * buffers in the chip. We have the open and close entries so we can bump + * the ref count and keep the driver from being unloaded while still mapped. + */ +static int qib_mmapf(struct file *fp, struct vm_area_struct *vma) +{ + struct qib_ctxtdata *rcd; + struct qib_devdata *dd; + u64 pgaddr, ureg; + unsigned piobufs, piocnt; + int ret, match = 1; + + rcd = ctxt_fp(fp); + if (!rcd || !(vma->vm_flags & VM_SHARED)) { + ret = -EINVAL; + goto bail; + } + dd = rcd->dd; + + /* + * This is the qib_do_user_init() code, mapping the shared buffers + * and per-context user registers into the user process. The address + * referred to by vm_pgoff is the file offset passed via mmap(). + * For shared contexts, this is the kernel vmalloc() address of the + * pages to share with the master. + * For non-shared or master ctxts, this is a physical address. + * We only do one mmap for each space mapped. + */ + pgaddr = vma->vm_pgoff << PAGE_SHIFT; + + /* + * Check for 0 in case one of the allocations failed, but user + * called mmap anyway. + */ + if (!pgaddr) { + ret = -EINVAL; + goto bail; + } + + /* + * Physical addresses must fit in 40 bits for our hardware. + * Check for kernel virtual addresses first, anything else must + * match a HW or memory address. + */ + ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp)); + if (ret) { + if (ret > 0) + ret = 0; + goto bail; + } + + ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; + if (!rcd->subctxt_cnt) { + /* ctxt is not shared */ + piocnt = rcd->piocnt; + piobufs = rcd->piobufs; + } else if (!subctxt_fp(fp)) { + /* caller is the master */ + piocnt = (rcd->piocnt / rcd->subctxt_cnt) + + (rcd->piocnt % rcd->subctxt_cnt); + piobufs = rcd->piobufs + + dd->palign * (rcd->piocnt - piocnt); + } else { + unsigned slave = subctxt_fp(fp) - 1; + + /* caller is a slave */ + piocnt = rcd->piocnt / rcd->subctxt_cnt; + piobufs = rcd->piobufs + dd->palign * piocnt * slave; + } + + if (pgaddr == ureg) + ret = mmap_ureg(vma, dd, ureg); + else if (pgaddr == piobufs) + ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt); + else if (pgaddr == dd->pioavailregs_phys) + /* in-memory copy of pioavail registers */ + ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, + (void *) dd->pioavailregs_dma, 0, + "pioavail registers"); + else if (pgaddr == rcd->rcvegr_phys) + ret = mmap_rcvegrbufs(vma, rcd); + else if (pgaddr == (u64) rcd->rcvhdrq_phys) + /* + * The rcvhdrq itself; multiple pages, contiguous + * from an i/o perspective. Shared contexts need + * to map r/w, so we allow writing. + */ + ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size, + rcd->rcvhdrq, 1, "rcvhdrq"); + else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys) + /* in-memory copy of rcvhdrq tail register */ + ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, + rcd->rcvhdrtail_kvaddr, 0, + "rcvhdrq tail"); + else + match = 0; + if (!match) + ret = -EINVAL; + + vma->vm_private_data = NULL; + + if (ret < 0) + qib_devinfo(dd->pcidev, + "mmap Failure %d: off %llx len %lx\n", + -ret, (unsigned long long)pgaddr, + vma->vm_end - vma->vm_start); +bail: + return ret; +} + +static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd, + struct file *fp, + struct poll_table_struct *pt) +{ + struct qib_devdata *dd = rcd->dd; + unsigned pollflag; + + poll_wait(fp, &rcd->wait, pt); + + spin_lock_irq(&dd->uctxt_lock); + if (rcd->urgent != rcd->urgent_poll) { + pollflag = POLLIN | POLLRDNORM; + rcd->urgent_poll = rcd->urgent; + } else { + pollflag = 0; + set_bit(QIB_CTXT_WAITING_URG, &rcd->flag); + } + spin_unlock_irq(&dd->uctxt_lock); + + return pollflag; +} + +static unsigned int qib_poll_next(struct qib_ctxtdata *rcd, + struct file *fp, + struct poll_table_struct *pt) +{ + struct qib_devdata *dd = rcd->dd; + unsigned pollflag; + + poll_wait(fp, &rcd->wait, pt); + + spin_lock_irq(&dd->uctxt_lock); + if (dd->f_hdrqempty(rcd)) { + set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag); + dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); + pollflag = 0; + } else + pollflag = POLLIN | POLLRDNORM; + spin_unlock_irq(&dd->uctxt_lock); + + return pollflag; +} + +static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) +{ + struct qib_ctxtdata *rcd; + unsigned pollflag; + + rcd = ctxt_fp(fp); + if (!rcd) + pollflag = POLLERR; + else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) + pollflag = qib_poll_urgent(rcd, fp, pt); + else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) + pollflag = qib_poll_next(rcd, fp, pt); + else /* invalid */ + pollflag = POLLERR; + + return pollflag; +} + +/* + * Check that userland and driver are compatible for subcontexts. + */ +static int qib_compatible_subctxts(int user_swmajor, int user_swminor) +{ + /* this code is written long-hand for clarity */ + if (QIB_USER_SWMAJOR != user_swmajor) { + /* no promise of compatibility if major mismatch */ + return 0; + } + if (QIB_USER_SWMAJOR == 1) { + switch (QIB_USER_SWMINOR) { + case 0: + case 1: + case 2: + /* no subctxt implementation so cannot be compatible */ + return 0; + case 3: + /* 3 is only compatible with itself */ + return user_swminor == 3; + default: + /* >= 4 are compatible (or are expected to be) */ + return user_swminor >= 4; + } + } + /* make no promises yet for future major versions */ + return 0; +} + +static int init_subctxts(struct qib_devdata *dd, + struct qib_ctxtdata *rcd, + const struct qib_user_info *uinfo) +{ + int ret = 0; + unsigned num_subctxts; + size_t size; + + /* + * If the user is requesting zero subctxts, + * skip the subctxt allocation. + */ + if (uinfo->spu_subctxt_cnt <= 0) + goto bail; + num_subctxts = uinfo->spu_subctxt_cnt; + + /* Check for subctxt compatibility */ + if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, + uinfo->spu_userversion & 0xffff)) { + qib_devinfo(dd->pcidev, + "Mismatched user version (%d.%d) and driver " + "version (%d.%d) while context sharing. Ensure " + "that driver and library are from the same " + "release.\n", + (int) (uinfo->spu_userversion >> 16), + (int) (uinfo->spu_userversion & 0xffff), + QIB_USER_SWMAJOR, QIB_USER_SWMINOR); + goto bail; + } + if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) { + ret = -EINVAL; + goto bail; + } + + rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts); + if (!rcd->subctxt_uregbase) { + ret = -ENOMEM; + goto bail; + } + /* Note: rcd->rcvhdrq_size isn't initialized yet. */ + size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * + sizeof(u32), PAGE_SIZE) * num_subctxts; + rcd->subctxt_rcvhdr_base = vmalloc_user(size); + if (!rcd->subctxt_rcvhdr_base) { + ret = -ENOMEM; + goto bail_ureg; + } + + rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks * + rcd->rcvegrbuf_size * + num_subctxts); + if (!rcd->subctxt_rcvegrbuf) { + ret = -ENOMEM; + goto bail_rhdr; + } + + rcd->subctxt_cnt = uinfo->spu_subctxt_cnt; + rcd->subctxt_id = uinfo->spu_subctxt_id; + rcd->active_slaves = 1; + rcd->redirect_seq_cnt = 1; + set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); + goto bail; + +bail_rhdr: + vfree(rcd->subctxt_rcvhdr_base); +bail_ureg: + vfree(rcd->subctxt_uregbase); + rcd->subctxt_uregbase = NULL; +bail: + return ret; +} + +static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, + struct file *fp, const struct qib_user_info *uinfo) +{ + struct qib_devdata *dd = ppd->dd; + struct qib_ctxtdata *rcd; + void *ptmp = NULL; + int ret; + + rcd = qib_create_ctxtdata(ppd, ctxt); + + /* + * Allocate memory for use in qib_tid_update() at open to + * reduce cost of expected send setup per message segment + */ + if (rcd) + ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + + dd->rcvtidcnt * sizeof(struct page **), + GFP_KERNEL); + + if (!rcd || !ptmp) { + qib_dev_err(dd, "Unable to allocate ctxtdata " + "memory, failing open\n"); + ret = -ENOMEM; + goto bailerr; + } + rcd->userversion = uinfo->spu_userversion; + ret = init_subctxts(dd, rcd, uinfo); + if (ret) + goto bailerr; + rcd->tid_pg_list = ptmp; + rcd->pid = current->pid; + init_waitqueue_head(&dd->rcd[ctxt]->wait); + strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); + ctxt_fp(fp) = rcd; + qib_stats.sps_ctxts++; + ret = 0; + goto bail; + +bailerr: + dd->rcd[ctxt] = NULL; + kfree(rcd); + kfree(ptmp); +bail: + return ret; +} + +static inline int usable(struct qib_pportdata *ppd, int active_only) +{ + struct qib_devdata *dd = ppd->dd; + u32 linkok = active_only ? QIBL_LINKACTIVE : + (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE); + + return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && + (ppd->lflags & linkok); +} + +static int find_free_ctxt(int unit, struct file *fp, + const struct qib_user_info *uinfo) +{ + struct qib_devdata *dd = qib_lookup(unit); + struct qib_pportdata *ppd = NULL; + int ret; + u32 ctxt; + + if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { + ret = -ENODEV; + goto bail; + } + + /* + * If users requests specific port, only try that one port, else + * select "best" port below, based on context. + */ + if (uinfo->spu_port) { + ppd = dd->pport + uinfo->spu_port - 1; + if (!usable(ppd, 0)) { + ret = -ENETDOWN; + goto bail; + } + } + + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + if (dd->rcd[ctxt]) + continue; + /* + * The setting and clearing of user context rcd[x] protected + * by the qib_mutex + */ + if (!ppd) { + /* choose port based on ctxt, if up, else 1st up */ + ppd = dd->pport + (ctxt % dd->num_pports); + if (!usable(ppd, 0)) { + int i; + for (i = 0; i < dd->num_pports; i++) { + ppd = dd->pport + i; + if (usable(ppd, 0)) + break; + } + if (i == dd->num_pports) { + ret = -ENETDOWN; + goto bail; + } + } + } + ret = setup_ctxt(ppd, ctxt, fp, uinfo); + goto bail; + } + ret = -EBUSY; + +bail: + return ret; +} + +static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) +{ + struct qib_pportdata *ppd; + int ret = 0, devmax; + int npresent, nup; + int ndev; + u32 port = uinfo->spu_port, ctxt; + + devmax = qib_count_units(&npresent, &nup); + + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + + /* device portion of usable() */ + if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) + continue; + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + if (dd->rcd[ctxt]) + continue; + if (port) { + if (port > dd->num_pports) + continue; + ppd = dd->pport + port - 1; + if (!usable(ppd, 0)) + continue; + } else { + /* + * choose port based on ctxt, if up, else + * first port that's up for multi-port HCA + */ + ppd = dd->pport + (ctxt % dd->num_pports); + if (!usable(ppd, 0)) { + int j; + + ppd = NULL; + for (j = 0; j < dd->num_pports && + !ppd; j++) + if (usable(dd->pport + j, 0)) + ppd = dd->pport + j; + if (!ppd) + continue; /* to next unit */ + } + } + ret = setup_ctxt(ppd, ctxt, fp, uinfo); + goto done; + } + } + + if (npresent) { + if (nup == 0) + ret = -ENETDOWN; + else + ret = -EBUSY; + } else + ret = -ENXIO; + +done: + return ret; +} + +static int find_shared_ctxt(struct file *fp, + const struct qib_user_info *uinfo) +{ + int devmax, ndev, i; + int ret = 0; + + devmax = qib_count_units(NULL, NULL); + + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + + /* device portion of usable() */ + if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) + continue; + for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { + struct qib_ctxtdata *rcd = dd->rcd[i]; + + /* Skip ctxts which are not yet open */ + if (!rcd || !rcd->cnt) + continue; + /* Skip ctxt if it doesn't match the requested one */ + if (rcd->subctxt_id != uinfo->spu_subctxt_id) + continue; + /* Verify the sharing process matches the master */ + if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt || + rcd->userversion != uinfo->spu_userversion || + rcd->cnt >= rcd->subctxt_cnt) { + ret = -EINVAL; + goto done; + } + ctxt_fp(fp) = rcd; + subctxt_fp(fp) = rcd->cnt++; + rcd->subpid[subctxt_fp(fp)] = current->pid; + tidcursor_fp(fp) = 0; + rcd->active_slaves |= 1 << subctxt_fp(fp); + ret = 1; + goto done; + } + } + +done: + return ret; +} + +static int qib_open(struct inode *in, struct file *fp) +{ + /* The real work is performed later in qib_assign_ctxt() */ + fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL); + if (fp->private_data) /* no cpu affinity by default */ + ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1; + return fp->private_data ? 0 : -ENOMEM; +} + +/* + * Get ctxt early, so can set affinity prior to memory allocation. + */ +static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) +{ + int ret; + int i_minor; + unsigned swmajor, swminor; + + /* Check to be sure we haven't already initialized this file */ + if (ctxt_fp(fp)) { + ret = -EINVAL; + goto done; + } + + /* for now, if major version is different, bail */ + swmajor = uinfo->spu_userversion >> 16; + if (swmajor != QIB_USER_SWMAJOR) { + ret = -ENODEV; + goto done; + } + + swminor = uinfo->spu_userversion & 0xffff; + + mutex_lock(&qib_mutex); + + if (qib_compatible_subctxts(swmajor, swminor) && + uinfo->spu_subctxt_cnt) { + ret = find_shared_ctxt(fp, uinfo); + if (ret) { + if (ret > 0) + ret = 0; + goto done_chk_sdma; + } + } + + i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE; + if (i_minor) + ret = find_free_ctxt(i_minor - 1, fp, uinfo); + else + ret = get_a_ctxt(fp, uinfo); + +done_chk_sdma: + if (!ret) { + struct qib_filedata *fd = fp->private_data; + const struct qib_ctxtdata *rcd = fd->rcd; + const struct qib_devdata *dd = rcd->dd; + + if (dd->flags & QIB_HAS_SEND_DMA) { + fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, + dd->unit, + rcd->ctxt, + fd->subctxt); + if (!fd->pq) + ret = -ENOMEM; + } + + /* + * If process has NOT already set it's affinity, select and + * reserve a processor for it, as a rendevous for all + * users of the driver. If they don't actually later + * set affinity to this cpu, or set it to some other cpu, + * it just means that sooner or later we don't recommend + * a cpu, and let the scheduler do it's best. + */ + if (!ret && cpus_weight(current->cpus_allowed) >= + qib_cpulist_count) { + int cpu; + cpu = find_first_zero_bit(qib_cpulist, + qib_cpulist_count); + if (cpu != qib_cpulist_count) { + __set_bit(cpu, qib_cpulist); + fd->rec_cpu_num = cpu; + } + } else if (cpus_weight(current->cpus_allowed) == 1 && + test_bit(first_cpu(current->cpus_allowed), + qib_cpulist)) + qib_devinfo(dd->pcidev, "%s PID %u affinity " + "set to cpu %d; already allocated\n", + current->comm, current->pid, + first_cpu(current->cpus_allowed)); + } + + mutex_unlock(&qib_mutex); + +done: + return ret; +} + + +static int qib_do_user_init(struct file *fp, + const struct qib_user_info *uinfo) +{ + int ret; + struct qib_ctxtdata *rcd = ctxt_fp(fp); + struct qib_devdata *dd; + unsigned uctxt; + + /* Subctxts don't need to initialize anything since master did it. */ + if (subctxt_fp(fp)) { + ret = wait_event_interruptible(rcd->wait, + !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag)); + goto bail; + } + + dd = rcd->dd; + + /* some ctxts may get extra buffers, calculate that here */ + uctxt = rcd->ctxt - dd->first_user_ctxt; + if (uctxt < dd->ctxts_extrabuf) { + rcd->piocnt = dd->pbufsctxt + 1; + rcd->pio_base = rcd->piocnt * uctxt; + } else { + rcd->piocnt = dd->pbufsctxt; + rcd->pio_base = rcd->piocnt * uctxt + + dd->ctxts_extrabuf; + } + + /* + * All user buffers are 2KB buffers. If we ever support + * giving 4KB buffers to user processes, this will need some + * work. Can't use piobufbase directly, because it has + * both 2K and 4K buffer base values. So check and handle. + */ + if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { + if (rcd->pio_base >= dd->piobcnt2k) { + qib_dev_err(dd, + "%u:ctxt%u: no 2KB buffers available\n", + dd->unit, rcd->ctxt); + ret = -ENOBUFS; + goto bail; + } + rcd->piocnt = dd->piobcnt2k - rcd->pio_base; + qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n", + rcd->ctxt, rcd->piocnt); + } + + rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; + qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, + TXCHK_CHG_TYPE_USER, rcd); + /* + * try to ensure that processes start up with consistent avail update + * for their own range, at least. If system very quiet, it might + * have the in-memory copy out of date at startup for this range of + * buffers, when a context gets re-used. Do after the chg_pioavail + * and before the rest of setup, so it's "almost certain" the dma + * will have occurred (can't 100% guarantee, but should be many + * decimals of 9s, with this ordering), given how much else happens + * after this. + */ + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + + /* + * Now allocate the rcvhdr Q and eager TIDs; skip the TID + * array for time being. If rcd->ctxt > chip-supported, + * we need to do extra stuff here to handle by handling overflow + * through ctxt 0, someday + */ + ret = qib_create_rcvhdrq(dd, rcd); + if (!ret) + ret = qib_setup_eagerbufs(rcd); + if (ret) + goto bail_pio; + + rcd->tidcursor = 0; /* start at beginning after open */ + + /* initialize poll variables... */ + rcd->urgent = 0; + rcd->urgent_poll = 0; + + /* + * Now enable the ctxt for receive. + * For chips that are set to DMA the tail register to memory + * when they change (and when the update bit transitions from + * 0 to 1. So for those chips, we turn it off and then back on. + * This will (very briefly) affect any other open ctxts, but the + * duration is very short, and therefore isn't an issue. We + * explictly set the in-memory tail copy to 0 beforehand, so we + * don't have to wait to be sure the DMA update has happened + * (chip resets head/tail to 0 on transition to enable). + */ + if (rcd->rcvhdrtail_kvaddr) + qib_clear_rcvhdrtail(rcd); + + dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, + rcd->ctxt); + + /* Notify any waiting slaves */ + if (rcd->subctxt_cnt) { + clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); + wake_up(&rcd->wait); + } + return 0; + +bail_pio: + qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, + TXCHK_CHG_TYPE_KERN, rcd); +bail: + return ret; +} + +/** + * unlock_exptid - unlock any expected TID entries context still had in use + * @rcd: ctxt + * + * We don't actually update the chip here, because we do a bulk update + * below, using f_clear_tids. + */ +static void unlock_expected_tids(struct qib_ctxtdata *rcd) +{ + struct qib_devdata *dd = rcd->dd; + int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; + int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; + + for (i = ctxt_tidbase; i < maxtid; i++) { + struct page *p = dd->pageshadow[i]; + dma_addr_t phys; + + if (!p) + continue; + + phys = dd->physshadow[i]; + dd->physshadow[i] = dd->tidinvalid; + dd->pageshadow[i] = NULL; + pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + qib_release_user_pages(&p, 1); + cnt++; + } +} + +static int qib_close(struct inode *in, struct file *fp) +{ + int ret = 0; + struct qib_filedata *fd; + struct qib_ctxtdata *rcd; + struct qib_devdata *dd; + unsigned long flags; + unsigned ctxt; + pid_t pid; + + mutex_lock(&qib_mutex); + + fd = (struct qib_filedata *) fp->private_data; + fp->private_data = NULL; + rcd = fd->rcd; + if (!rcd) { + mutex_unlock(&qib_mutex); + goto bail; + } + + dd = rcd->dd; + + /* ensure all pio buffer writes in progress are flushed */ + qib_flush_wc(); + + /* drain user sdma queue */ + if (fd->pq) { + qib_user_sdma_queue_drain(rcd->ppd, fd->pq); + qib_user_sdma_queue_destroy(fd->pq); + } + + if (fd->rec_cpu_num != -1) + __clear_bit(fd->rec_cpu_num, qib_cpulist); + + if (--rcd->cnt) { + /* + * XXX If the master closes the context before the slave(s), + * revoke the mmap for the eager receive queue so + * the slave(s) don't wait for receive data forever. + */ + rcd->active_slaves &= ~(1 << fd->subctxt); + rcd->subpid[fd->subctxt] = 0; + mutex_unlock(&qib_mutex); + goto bail; + } + + /* early; no interrupt users after this */ + spin_lock_irqsave(&dd->uctxt_lock, flags); + ctxt = rcd->ctxt; + dd->rcd[ctxt] = NULL; + pid = rcd->pid; + rcd->pid = 0; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + + if (rcd->rcvwait_to || rcd->piowait_to || + rcd->rcvnowait || rcd->pionowait) { + rcd->rcvwait_to = 0; + rcd->piowait_to = 0; + rcd->rcvnowait = 0; + rcd->pionowait = 0; + } + if (rcd->flag) + rcd->flag = 0; + + if (dd->kregbase) { + /* atomically clear receive enable ctxt and intr avail. */ + dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | + QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); + + /* clean up the pkeys for this ctxt user */ + qib_clean_part_key(rcd, dd); + qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); + qib_chg_pioavailkernel(dd, rcd->pio_base, + rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL); + + dd->f_clear_tids(dd, rcd); + + if (dd->pageshadow) + unlock_expected_tids(rcd); + qib_stats.sps_ctxts--; + } + + mutex_unlock(&qib_mutex); + qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */ + +bail: + kfree(fd); + return ret; +} + +static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) +{ + struct qib_ctxt_info info; + int ret; + size_t sz; + struct qib_ctxtdata *rcd = ctxt_fp(fp); + struct qib_filedata *fd; + + fd = (struct qib_filedata *) fp->private_data; + + info.num_active = qib_count_active_units(); + info.unit = rcd->dd->unit; + info.port = rcd->ppd->port; + info.ctxt = rcd->ctxt; + info.subctxt = subctxt_fp(fp); + /* Number of user ctxts available for this device. */ + info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; + info.num_subctxts = rcd->subctxt_cnt; + info.rec_cpu = fd->rec_cpu_num; + sz = sizeof(info); + + if (copy_to_user(uinfo, &info, sz)) { + ret = -EFAULT; + goto bail; + } + ret = 0; + +bail: + return ret; +} + +static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq, + u32 __user *inflightp) +{ + const u32 val = qib_user_sdma_inflight_counter(pq); + + if (put_user(val, inflightp)) + return -EFAULT; + + return 0; +} + +static int qib_sdma_get_complete(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq, + u32 __user *completep) +{ + u32 val; + int err; + + if (!pq) + return -EINVAL; + + err = qib_user_sdma_make_progress(ppd, pq); + if (err < 0) + return err; + + val = qib_user_sdma_complete_counter(pq); + if (put_user(val, completep)) + return -EFAULT; + + return 0; +} + +static int disarm_req_delay(struct qib_ctxtdata *rcd) +{ + int ret = 0; + + if (!usable(rcd->ppd, 1)) { + int i; + /* + * if link is down, or otherwise not usable, delay + * the caller up to 30 seconds, so we don't thrash + * in trying to get the chip back to ACTIVE, and + * set flag so they make the call again. + */ + if (rcd->user_event_mask) { + /* + * subctxt_cnt is 0 if not shared, so do base + * separately, first, then remaining subctxt, if any + */ + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[0]); + for (i = 1; i < rcd->subctxt_cnt; i++) + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[i]); + } + for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) + msleep(100); + ret = -ENETDOWN; + } + return ret; +} + +/* + * Find all user contexts in use, and set the specified bit in their + * event mask. + * See also find_ctxt() for a similar use, that is specific to send buffers. + */ +int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit) +{ + struct qib_ctxtdata *rcd; + unsigned ctxt; + int ret = 0; + + spin_lock(&ppd->dd->uctxt_lock); + for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; + ctxt++) { + rcd = ppd->dd->rcd[ctxt]; + if (!rcd) + continue; + if (rcd->user_event_mask) { + int i; + /* + * subctxt_cnt is 0 if not shared, so do base + * separately, first, then remaining subctxt, if any + */ + set_bit(evtbit, &rcd->user_event_mask[0]); + for (i = 1; i < rcd->subctxt_cnt; i++) + set_bit(evtbit, &rcd->user_event_mask[i]); + } + ret = 1; + break; + } + spin_unlock(&ppd->dd->uctxt_lock); + + return ret; +} + +/* + * clear the event notifier events for this context. + * For the DISARM_BUFS case, we also take action (this obsoletes + * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards + * compatibility. + * Other bits don't currently require actions, just atomically clear. + * User process then performs actions appropriate to bit having been + * set, if desired, and checks again in future. + */ +static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt, + unsigned long events) +{ + int ret = 0, i; + + for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) { + if (!test_bit(i, &events)) + continue; + if (i == _QIB_EVENT_DISARM_BUFS_BIT) { + (void)qib_disarm_piobufs_ifneeded(rcd); + ret = disarm_req_delay(rcd); + } else + clear_bit(i, &rcd->user_event_mask[subctxt]); + } + return ret; +} + +static ssize_t qib_write(struct file *fp, const char __user *data, + size_t count, loff_t *off) +{ + const struct qib_cmd __user *ucmd; + struct qib_ctxtdata *rcd; + const void __user *src; + size_t consumed, copy = 0; + struct qib_cmd cmd; + ssize_t ret = 0; + void *dest; + + if (count < sizeof(cmd.type)) { + ret = -EINVAL; + goto bail; + } + + ucmd = (const struct qib_cmd __user *) data; + + if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { + ret = -EFAULT; + goto bail; + } + + consumed = sizeof(cmd.type); + + switch (cmd.type) { + case QIB_CMD_ASSIGN_CTXT: + case QIB_CMD_USER_INIT: + copy = sizeof(cmd.cmd.user_info); + dest = &cmd.cmd.user_info; + src = &ucmd->cmd.user_info; + break; + + case QIB_CMD_RECV_CTRL: + copy = sizeof(cmd.cmd.recv_ctrl); + dest = &cmd.cmd.recv_ctrl; + src = &ucmd->cmd.recv_ctrl; + break; + + case QIB_CMD_CTXT_INFO: + copy = sizeof(cmd.cmd.ctxt_info); + dest = &cmd.cmd.ctxt_info; + src = &ucmd->cmd.ctxt_info; + break; + + case QIB_CMD_TID_UPDATE: + case QIB_CMD_TID_FREE: + copy = sizeof(cmd.cmd.tid_info); + dest = &cmd.cmd.tid_info; + src = &ucmd->cmd.tid_info; + break; + + case QIB_CMD_SET_PART_KEY: + copy = sizeof(cmd.cmd.part_key); + dest = &cmd.cmd.part_key; + src = &ucmd->cmd.part_key; + break; + + case QIB_CMD_DISARM_BUFS: + case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */ + copy = 0; + src = NULL; + dest = NULL; + break; + + case QIB_CMD_POLL_TYPE: + copy = sizeof(cmd.cmd.poll_type); + dest = &cmd.cmd.poll_type; + src = &ucmd->cmd.poll_type; + break; + + case QIB_CMD_ARMLAUNCH_CTRL: + copy = sizeof(cmd.cmd.armlaunch_ctrl); + dest = &cmd.cmd.armlaunch_ctrl; + src = &ucmd->cmd.armlaunch_ctrl; + break; + + case QIB_CMD_SDMA_INFLIGHT: + copy = sizeof(cmd.cmd.sdma_inflight); + dest = &cmd.cmd.sdma_inflight; + src = &ucmd->cmd.sdma_inflight; + break; + + case QIB_CMD_SDMA_COMPLETE: + copy = sizeof(cmd.cmd.sdma_complete); + dest = &cmd.cmd.sdma_complete; + src = &ucmd->cmd.sdma_complete; + break; + + case QIB_CMD_ACK_EVENT: + copy = sizeof(cmd.cmd.event_mask); + dest = &cmd.cmd.event_mask; + src = &ucmd->cmd.event_mask; + break; + + default: + ret = -EINVAL; + goto bail; + } + + if (copy) { + if ((count - consumed) < copy) { + ret = -EINVAL; + goto bail; + } + if (copy_from_user(dest, src, copy)) { + ret = -EFAULT; + goto bail; + } + consumed += copy; + } + + rcd = ctxt_fp(fp); + if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) { + ret = -EINVAL; + goto bail; + } + + switch (cmd.type) { + case QIB_CMD_ASSIGN_CTXT: + ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); + if (ret) + goto bail; + break; + + case QIB_CMD_USER_INIT: + ret = qib_do_user_init(fp, &cmd.cmd.user_info); + if (ret) + goto bail; + ret = qib_get_base_info(fp, (void __user *) (unsigned long) + cmd.cmd.user_info.spu_base_info, + cmd.cmd.user_info.spu_base_info_size); + break; + + case QIB_CMD_RECV_CTRL: + ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl); + break; + + case QIB_CMD_CTXT_INFO: + ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *) + (unsigned long) cmd.cmd.ctxt_info); + break; + + case QIB_CMD_TID_UPDATE: + ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info); + break; + + case QIB_CMD_TID_FREE: + ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info); + break; + + case QIB_CMD_SET_PART_KEY: + ret = qib_set_part_key(rcd, cmd.cmd.part_key); + break; + + case QIB_CMD_DISARM_BUFS: + (void)qib_disarm_piobufs_ifneeded(rcd); + ret = disarm_req_delay(rcd); + break; + + case QIB_CMD_PIOAVAILUPD: + qib_force_pio_avail_update(rcd->dd); + break; + + case QIB_CMD_POLL_TYPE: + rcd->poll_type = cmd.cmd.poll_type; + break; + + case QIB_CMD_ARMLAUNCH_CTRL: + rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); + break; + + case QIB_CMD_SDMA_INFLIGHT: + ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp), + (u32 __user *) (unsigned long) + cmd.cmd.sdma_inflight); + break; + + case QIB_CMD_SDMA_COMPLETE: + ret = qib_sdma_get_complete(rcd->ppd, + user_sdma_queue_fp(fp), + (u32 __user *) (unsigned long) + cmd.cmd.sdma_complete); + break; + + case QIB_CMD_ACK_EVENT: + ret = qib_user_event_ack(rcd, subctxt_fp(fp), + cmd.cmd.event_mask); + break; + } + + if (ret >= 0) + ret = consumed; + +bail: + return ret; +} + +static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long dim, loff_t off) +{ + struct qib_filedata *fp = iocb->ki_filp->private_data; + struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); + struct qib_user_sdma_queue *pq = fp->pq; + + if (!dim || !pq) + return -EINVAL; + + return qib_user_sdma_writev(rcd, pq, iov, dim); +} + +static struct class *qib_class; +static dev_t qib_dev; + +int qib_cdev_init(int minor, const char *name, + const struct file_operations *fops, + struct cdev **cdevp, struct device **devp) +{ + const dev_t dev = MKDEV(MAJOR(qib_dev), minor); + struct cdev *cdev; + struct device *device = NULL; + int ret; + + cdev = cdev_alloc(); + if (!cdev) { + printk(KERN_ERR QIB_DRV_NAME + ": Could not allocate cdev for minor %d, %s\n", + minor, name); + ret = -ENOMEM; + goto done; + } + + cdev->owner = THIS_MODULE; + cdev->ops = fops; + kobject_set_name(&cdev->kobj, name); + + ret = cdev_add(cdev, dev, 1); + if (ret < 0) { + printk(KERN_ERR QIB_DRV_NAME + ": Could not add cdev for minor %d, %s (err %d)\n", + minor, name, -ret); + goto err_cdev; + } + + device = device_create(qib_class, NULL, dev, NULL, name); + if (!IS_ERR(device)) + goto done; + ret = PTR_ERR(device); + device = NULL; + printk(KERN_ERR QIB_DRV_NAME ": Could not create " + "device for minor %d, %s (err %d)\n", + minor, name, -ret); +err_cdev: + cdev_del(cdev); + cdev = NULL; +done: + *cdevp = cdev; + *devp = device; + return ret; +} + +void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp) +{ + struct device *device = *devp; + + if (device) { + device_unregister(device); + *devp = NULL; + } + + if (*cdevp) { + cdev_del(*cdevp); + *cdevp = NULL; + } +} + +static struct cdev *wildcard_cdev; +static struct device *wildcard_device; + +int __init qib_dev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); + if (ret < 0) { + printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " + "chrdev region (err %d)\n", -ret); + goto done; + } + + qib_class = class_create(THIS_MODULE, "ipath"); + if (IS_ERR(qib_class)) { + ret = PTR_ERR(qib_class); + printk(KERN_ERR QIB_DRV_NAME ": Could not create " + "device class (err %d)\n", -ret); + unregister_chrdev_region(qib_dev, QIB_NMINORS); + } + +done: + return ret; +} + +void qib_dev_cleanup(void) +{ + if (qib_class) { + class_destroy(qib_class); + qib_class = NULL; + } + + unregister_chrdev_region(qib_dev, QIB_NMINORS); +} + +static atomic_t user_count = ATOMIC_INIT(0); + +static void qib_user_remove(struct qib_devdata *dd) +{ + if (atomic_dec_return(&user_count) == 0) + qib_cdev_cleanup(&wildcard_cdev, &wildcard_device); + + qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); +} + +static int qib_user_add(struct qib_devdata *dd) +{ + char name[10]; + int ret; + + if (atomic_inc_return(&user_count) == 1) { + ret = qib_cdev_init(0, "ipath", &qib_file_ops, + &wildcard_cdev, &wildcard_device); + if (ret) + goto done; + } + + snprintf(name, sizeof(name), "ipath%d", dd->unit); + ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, + &dd->user_cdev, &dd->user_device); + if (ret) + qib_user_remove(dd); +done: + return ret; +} + +/* + * Create per-unit files in /dev + */ +int qib_device_create(struct qib_devdata *dd) +{ + int r, ret; + + r = qib_user_add(dd); + ret = qib_diag_add(dd); + if (r && !ret) + ret = r; + return ret; +} + +/* + * Remove per-unit files in /dev + * void, core kernel returns no errors for this stuff + */ +void qib_device_remove(struct qib_devdata *dd) +{ + qib_user_remove(dd); + qib_diag_remove(dd); +} diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c new file mode 100644 index 000000000000..edef8527eb34 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -0,0 +1,618 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/pagemap.h> +#include <linux/init.h> +#include <linux/namei.h> + +#include "qib.h" + +#define QIBFS_MAGIC 0x726a77 + +static struct super_block *qib_super; + +#define private2dd(file) ((file)->f_dentry->d_inode->i_private) + +static int qibfs_mknod(struct inode *dir, struct dentry *dentry, + int mode, const struct file_operations *fops, + void *data) +{ + int error; + struct inode *inode = new_inode(dir->i_sb); + + if (!inode) { + error = -EPERM; + goto bail; + } + + inode->i_mode = mode; + inode->i_uid = 0; + inode->i_gid = 0; + inode->i_blocks = 0; + inode->i_atime = CURRENT_TIME; + inode->i_mtime = inode->i_atime; + inode->i_ctime = inode->i_atime; + inode->i_private = data; + if ((mode & S_IFMT) == S_IFDIR) { + inode->i_op = &simple_dir_inode_operations; + inc_nlink(inode); + inc_nlink(dir); + } + + inode->i_fop = fops; + + d_instantiate(dentry, inode); + error = 0; + +bail: + return error; +} + +static int create_file(const char *name, mode_t mode, + struct dentry *parent, struct dentry **dentry, + const struct file_operations *fops, void *data) +{ + int error; + + *dentry = NULL; + mutex_lock(&parent->d_inode->i_mutex); + *dentry = lookup_one_len(name, parent, strlen(name)); + if (!IS_ERR(*dentry)) + error = qibfs_mknod(parent->d_inode, *dentry, + mode, fops, data); + else + error = PTR_ERR(*dentry); + mutex_unlock(&parent->d_inode->i_mutex); + + return error; +} + +static ssize_t driver_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, &qib_stats, + sizeof qib_stats); +} + +/* + * driver stats field names, one line per stat, single string. Used by + * programs like ipathstats to print the stats in a way which works for + * different versions of drivers, without changing program source. + * if qlogic_ib_stats changes, this needs to change. Names need to be + * 12 chars or less (w/o newline), for proper display by ipathstats utility. + */ +static const char qib_statnames[] = + "KernIntr\n" + "ErrorIntr\n" + "Tx_Errs\n" + "Rcv_Errs\n" + "H/W_Errs\n" + "NoPIOBufs\n" + "CtxtsOpen\n" + "RcvLen_Errs\n" + "EgrBufFull\n" + "EgrHdrFull\n" + ; + +static ssize_t driver_names_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(buf, count, ppos, qib_statnames, + sizeof qib_statnames - 1); /* no null */ +} + +static const struct file_operations driver_ops[] = { + { .read = driver_stats_read, }, + { .read = driver_names_read, }, +}; + +/* read the per-device counters */ +static ssize_t dev_counters_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + u64 *counters; + size_t avail; + struct qib_devdata *dd = private2dd(file); + + avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); +} + +/* read the per-device counters */ +static ssize_t dev_names_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + char *names; + size_t avail; + struct qib_devdata *dd = private2dd(file); + + avail = dd->f_read_cntrs(dd, *ppos, &names, NULL); + return simple_read_from_buffer(buf, count, ppos, names, avail); +} + +static const struct file_operations cntr_ops[] = { + { .read = dev_counters_read, }, + { .read = dev_names_read, }, +}; + +/* + * Could use file->f_dentry->d_inode->i_ino to figure out which file, + * instead of separate routine for each, but for now, this works... + */ + +/* read the per-port names (same for each port) */ +static ssize_t portnames_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + char *names; + size_t avail; + struct qib_devdata *dd = private2dd(file); + + avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL); + return simple_read_from_buffer(buf, count, ppos, names, avail); +} + +/* read the per-port counters for port 1 (pidx 0) */ +static ssize_t portcntrs_1_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + u64 *counters; + size_t avail; + struct qib_devdata *dd = private2dd(file); + + avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); +} + +/* read the per-port counters for port 2 (pidx 1) */ +static ssize_t portcntrs_2_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + u64 *counters; + size_t avail; + struct qib_devdata *dd = private2dd(file); + + avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); +} + +static const struct file_operations portcntr_ops[] = { + { .read = portnames_read, }, + { .read = portcntrs_1_read, }, + { .read = portcntrs_2_read, }, +}; + +/* + * read the per-port QSFP data for port 1 (pidx 0) + */ +static ssize_t qsfp_1_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct qib_devdata *dd = private2dd(file); + char *tmp; + int ret; + + tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE); + if (ret > 0) + ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); + kfree(tmp); + return ret; +} + +/* + * read the per-port QSFP data for port 2 (pidx 1) + */ +static ssize_t qsfp_2_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct qib_devdata *dd = private2dd(file); + char *tmp; + int ret; + + if (dd->num_pports < 2) + return -ENODEV; + + tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE); + if (ret > 0) + ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); + kfree(tmp); + return ret; +} + +static const struct file_operations qsfp_ops[] = { + { .read = qsfp_1_read, }, + { .read = qsfp_2_read, }, +}; + +static ssize_t flash_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct qib_devdata *dd; + ssize_t ret; + loff_t pos; + char *tmp; + + pos = *ppos; + + if (pos < 0) { + ret = -EINVAL; + goto bail; + } + + if (pos >= sizeof(struct qib_flash)) { + ret = 0; + goto bail; + } + + if (count > sizeof(struct qib_flash) - pos) + count = sizeof(struct qib_flash) - pos; + + tmp = kmalloc(count, GFP_KERNEL); + if (!tmp) { + ret = -ENOMEM; + goto bail; + } + + dd = private2dd(file); + if (qib_eeprom_read(dd, pos, tmp, count)) { + qib_dev_err(dd, "failed to read from flash\n"); + ret = -ENXIO; + goto bail_tmp; + } + + if (copy_to_user(buf, tmp, count)) { + ret = -EFAULT; + goto bail_tmp; + } + + *ppos = pos + count; + ret = count; + +bail_tmp: + kfree(tmp); + +bail: + return ret; +} + +static ssize_t flash_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct qib_devdata *dd; + ssize_t ret; + loff_t pos; + char *tmp; + + pos = *ppos; + + if (pos != 0) { + ret = -EINVAL; + goto bail; + } + + if (count != sizeof(struct qib_flash)) { + ret = -EINVAL; + goto bail; + } + + tmp = kmalloc(count, GFP_KERNEL); + if (!tmp) { + ret = -ENOMEM; + goto bail; + } + + if (copy_from_user(tmp, buf, count)) { + ret = -EFAULT; + goto bail_tmp; + } + + dd = private2dd(file); + if (qib_eeprom_write(dd, pos, tmp, count)) { + ret = -ENXIO; + qib_dev_err(dd, "failed to write to flash\n"); + goto bail_tmp; + } + + *ppos = pos + count; + ret = count; + +bail_tmp: + kfree(tmp); + +bail: + return ret; +} + +static const struct file_operations flash_ops = { + .read = flash_read, + .write = flash_write, +}; + +static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) +{ + struct dentry *dir, *tmp; + char unit[10]; + int ret, i; + + /* create the per-unit directory */ + snprintf(unit, sizeof unit, "%u", dd->unit); + ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, + &simple_dir_operations, dd); + if (ret) { + printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); + goto bail; + } + + /* create the files in the new directory */ + ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, + &cntr_ops[0], dd); + if (ret) { + printk(KERN_ERR "create_file(%s/counters) failed: %d\n", + unit, ret); + goto bail; + } + ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, + &cntr_ops[1], dd); + if (ret) { + printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", + unit, ret); + goto bail; + } + ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, + &portcntr_ops[0], dd); + if (ret) { + printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + unit, "portcounter_names", ret); + goto bail; + } + for (i = 1; i <= dd->num_pports; i++) { + char fname[24]; + + sprintf(fname, "port%dcounters", i); + /* create the files in the new directory */ + ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, + &portcntr_ops[i], dd); + if (ret) { + printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + unit, fname, ret); + goto bail; + } + if (!(dd->flags & QIB_HAS_QSFP)) + continue; + sprintf(fname, "qsfp%d", i); + ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, + &qsfp_ops[i - 1], dd); + if (ret) { + printk(KERN_ERR "create_file(%s/%s) failed: %d\n", + unit, fname, ret); + goto bail; + } + } + + ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, + &flash_ops, dd); + if (ret) + printk(KERN_ERR "create_file(%s/flash) failed: %d\n", + unit, ret); +bail: + return ret; +} + +static int remove_file(struct dentry *parent, char *name) +{ + struct dentry *tmp; + int ret; + + tmp = lookup_one_len(name, parent, strlen(name)); + + if (IS_ERR(tmp)) { + ret = PTR_ERR(tmp); + goto bail; + } + + spin_lock(&dcache_lock); + spin_lock(&tmp->d_lock); + if (!(d_unhashed(tmp) && tmp->d_inode)) { + dget_locked(tmp); + __d_drop(tmp); + spin_unlock(&tmp->d_lock); + spin_unlock(&dcache_lock); + simple_unlink(parent->d_inode, tmp); + } else { + spin_unlock(&tmp->d_lock); + spin_unlock(&dcache_lock); + } + + ret = 0; +bail: + /* + * We don't expect clients to care about the return value, but + * it's there if they need it. + */ + return ret; +} + +static int remove_device_files(struct super_block *sb, + struct qib_devdata *dd) +{ + struct dentry *dir, *root; + char unit[10]; + int ret, i; + + root = dget(sb->s_root); + mutex_lock(&root->d_inode->i_mutex); + snprintf(unit, sizeof unit, "%u", dd->unit); + dir = lookup_one_len(unit, root, strlen(unit)); + + if (IS_ERR(dir)) { + ret = PTR_ERR(dir); + printk(KERN_ERR "Lookup of %s failed\n", unit); + goto bail; + } + + remove_file(dir, "counters"); + remove_file(dir, "counter_names"); + remove_file(dir, "portcounter_names"); + for (i = 0; i < dd->num_pports; i++) { + char fname[24]; + + sprintf(fname, "port%dcounters", i + 1); + remove_file(dir, fname); + if (dd->flags & QIB_HAS_QSFP) { + sprintf(fname, "qsfp%d", i + 1); + remove_file(dir, fname); + } + } + remove_file(dir, "flash"); + d_delete(dir); + ret = simple_rmdir(root->d_inode, dir); + +bail: + mutex_unlock(&root->d_inode->i_mutex); + dput(root); + return ret; +} + +/* + * This fills everything in when the fs is mounted, to handle umount/mount + * after device init. The direct add_cntr_files() call handles adding + * them from the init code, when the fs is already mounted. + */ +static int qibfs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct qib_devdata *dd, *tmp; + unsigned long flags; + int ret; + + static struct tree_descr files[] = { + [2] = {"driver_stats", &driver_ops[0], S_IRUGO}, + [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO}, + {""}, + }; + + ret = simple_fill_super(sb, QIBFS_MAGIC, files); + if (ret) { + printk(KERN_ERR "simple_fill_super failed: %d\n", ret); + goto bail; + } + + spin_lock_irqsave(&qib_devs_lock, flags); + + list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) { + spin_unlock_irqrestore(&qib_devs_lock, flags); + ret = add_cntr_files(sb, dd); + if (ret) { + deactivate_super(sb); + goto bail; + } + spin_lock_irqsave(&qib_devs_lock, flags); + } + + spin_unlock_irqrestore(&qib_devs_lock, flags); + +bail: + return ret; +} + +static int qibfs_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) +{ + int ret = get_sb_single(fs_type, flags, data, + qibfs_fill_super, mnt); + if (ret >= 0) + qib_super = mnt->mnt_sb; + return ret; +} + +static void qibfs_kill_super(struct super_block *s) +{ + kill_litter_super(s); + qib_super = NULL; +} + +int qibfs_add(struct qib_devdata *dd) +{ + int ret; + + /* + * On first unit initialized, qib_super will not yet exist + * because nobody has yet tried to mount the filesystem, so + * we can't consider that to be an error; if an error occurs + * during the mount, that will get a complaint, so this is OK. + * add_cntr_files() for all units is done at mount from + * qibfs_fill_super(), so one way or another, everything works. + */ + if (qib_super == NULL) + ret = 0; + else + ret = add_cntr_files(qib_super, dd); + return ret; +} + +int qibfs_remove(struct qib_devdata *dd) +{ + int ret = 0; + + if (qib_super) + ret = remove_device_files(qib_super, dd); + + return ret; +} + +static struct file_system_type qibfs_fs_type = { + .owner = THIS_MODULE, + .name = "ipathfs", + .get_sb = qibfs_get_sb, + .kill_sb = qibfs_kill_super, +}; + +int __init qib_init_qibfs(void) +{ + return register_filesystem(&qibfs_fs_type); +} + +int __exit qib_exit_qibfs(void) +{ + return unregister_filesystem(&qibfs_fs_type); +} diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c new file mode 100644 index 000000000000..1eadadc13da8 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -0,0 +1,3576 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* + * This file contains all of the code that is specific to the + * QLogic_IB 6120 PCIe chip. + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <rdma/ib_verbs.h> + +#include "qib.h" +#include "qib_6120_regs.h" + +static void qib_6120_setup_setextled(struct qib_pportdata *, u32); +static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op); +static u8 qib_6120_phys_portstate(u64); +static u32 qib_6120_iblink_state(u64); + +/* + * This file contains all the chip-specific register information and + * access functions for the QLogic QLogic_IB PCI-Express chip. + * + */ + +/* KREG_IDX uses machine-generated #defines */ +#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64)) + +/* Use defines to tie machine-generated names to lower-case names */ +#define kr_extctrl KREG_IDX(EXTCtrl) +#define kr_extstatus KREG_IDX(EXTStatus) +#define kr_gpio_clear KREG_IDX(GPIOClear) +#define kr_gpio_mask KREG_IDX(GPIOMask) +#define kr_gpio_out KREG_IDX(GPIOOut) +#define kr_gpio_status KREG_IDX(GPIOStatus) +#define kr_rcvctrl KREG_IDX(RcvCtrl) +#define kr_sendctrl KREG_IDX(SendCtrl) +#define kr_partitionkey KREG_IDX(RcvPartitionKey) +#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) +#define kr_ibcstatus KREG_IDX(IBCStatus) +#define kr_ibcctrl KREG_IDX(IBCCtrl) +#define kr_sendbuffererror KREG_IDX(SendBufErr0) +#define kr_rcvbthqp KREG_IDX(RcvBTHQP) +#define kr_counterregbase KREG_IDX(CntrRegBase) +#define kr_palign KREG_IDX(PageAlign) +#define kr_rcvegrbase KREG_IDX(RcvEgrBase) +#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) +#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) +#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) +#define kr_rcvhdrsize KREG_IDX(RcvHdrSize) +#define kr_rcvtidbase KREG_IDX(RcvTIDBase) +#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) +#define kr_scratch KREG_IDX(Scratch) +#define kr_sendctrl KREG_IDX(SendCtrl) +#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr) +#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase) +#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt) +#define kr_sendpiosize KREG_IDX(SendPIOSize) +#define kr_sendregbase KREG_IDX(SendRegBase) +#define kr_userregbase KREG_IDX(UserRegBase) +#define kr_control KREG_IDX(Control) +#define kr_intclear KREG_IDX(IntClear) +#define kr_intmask KREG_IDX(IntMask) +#define kr_intstatus KREG_IDX(IntStatus) +#define kr_errclear KREG_IDX(ErrClear) +#define kr_errmask KREG_IDX(ErrMask) +#define kr_errstatus KREG_IDX(ErrStatus) +#define kr_hwerrclear KREG_IDX(HwErrClear) +#define kr_hwerrmask KREG_IDX(HwErrMask) +#define kr_hwerrstatus KREG_IDX(HwErrStatus) +#define kr_revision KREG_IDX(Revision) +#define kr_portcnt KREG_IDX(PortCnt) +#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0) +#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1) +#define kr_serdes_stat KREG_IDX(SerdesStat) +#define kr_xgxs_cfg KREG_IDX(XGXSCfg) + +/* These must only be written via qib_write_kreg_ctxt() */ +#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) +#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) + +#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \ + QIB_6120_LBIntCnt_OFFS) / sizeof(u64)) + +#define cr_badformat CREG_IDX(RxBadFormatCnt) +#define cr_erricrc CREG_IDX(RxICRCErrCnt) +#define cr_errlink CREG_IDX(RxLinkProblemCnt) +#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) +#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) +#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt) +#define cr_err_rlen CREG_IDX(RxLenErrCnt) +#define cr_errslen CREG_IDX(TxLenErrCnt) +#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) +#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) +#define cr_errvcrc CREG_IDX(RxVCRCErrCnt) +#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) +#define cr_lbint CREG_IDX(LBIntCnt) +#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) +#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) +#define cr_lbflowstall CREG_IDX(LBFlowStallCnt) +#define cr_pktrcv CREG_IDX(RxDataPktCnt) +#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) +#define cr_pktsend CREG_IDX(TxDataPktCnt) +#define cr_pktsendflow CREG_IDX(TxFlowPktCnt) +#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) +#define cr_rcvebp CREG_IDX(RxEBPCnt) +#define cr_rcvovfl CREG_IDX(RxBufOvflCnt) +#define cr_senddropped CREG_IDX(TxDroppedPktCnt) +#define cr_sendstall CREG_IDX(TxFlowStallCnt) +#define cr_sendunderrun CREG_IDX(TxUnderrunCnt) +#define cr_wordrcv CREG_IDX(RxDwordCnt) +#define cr_wordsend CREG_IDX(TxDwordCnt) +#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) +#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) +#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) +#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) +#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) + +#define SYM_RMASK(regname, fldname) ((u64) \ + QIB_6120_##regname##_##fldname##_RMASK) +#define SYM_MASK(regname, fldname) ((u64) \ + QIB_6120_##regname##_##fldname##_RMASK << \ + QIB_6120_##regname##_##fldname##_LSB) +#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB) + +#define SYM_FIELD(value, regname, fldname) ((u64) \ + (((value) >> SYM_LSB(regname, fldname)) & \ + SYM_RMASK(regname, fldname))) +#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) +#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) + +/* link training states, from IBC */ +#define IB_6120_LT_STATE_DISABLED 0x00 +#define IB_6120_LT_STATE_LINKUP 0x01 +#define IB_6120_LT_STATE_POLLACTIVE 0x02 +#define IB_6120_LT_STATE_POLLQUIET 0x03 +#define IB_6120_LT_STATE_SLEEPDELAY 0x04 +#define IB_6120_LT_STATE_SLEEPQUIET 0x05 +#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08 +#define IB_6120_LT_STATE_CFGRCVFCFG 0x09 +#define IB_6120_LT_STATE_CFGWAITRMT 0x0a +#define IB_6120_LT_STATE_CFGIDLE 0x0b +#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c +#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e +#define IB_6120_LT_STATE_RECOVERIDLE 0x0f + +/* link state machine states from IBC */ +#define IB_6120_L_STATE_DOWN 0x0 +#define IB_6120_L_STATE_INIT 0x1 +#define IB_6120_L_STATE_ARM 0x2 +#define IB_6120_L_STATE_ACTIVE 0x3 +#define IB_6120_L_STATE_ACT_DEFER 0x4 + +static const u8 qib_6120_physportstate[0x20] = { + [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, + [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, + [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, + [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, + [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, + [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, + [IB_6120_LT_STATE_CFGDEBOUNCE] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_6120_LT_STATE_CFGRCVFCFG] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_6120_LT_STATE_CFGWAITRMT] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_6120_LT_STATE_RECOVERRETRAIN] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_6120_LT_STATE_RECOVERWAITRMT] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_6120_LT_STATE_RECOVERIDLE] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN +}; + + +struct qib_chip_specific { + u64 __iomem *cregbase; + u64 *cntrs; + u64 *portcntrs; + void *dummy_hdrq; /* used after ctxt close */ + dma_addr_t dummy_hdrq_phys; + spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */ + spinlock_t user_tid_lock; /* no back to back user TID writes */ + spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ + spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ + u64 hwerrmask; + u64 errormask; + u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ + u64 gpio_mask; /* shadow the gpio mask register */ + u64 extctrl; /* shadow the gpio output enable, etc... */ + /* + * these 5 fields are used to establish deltas for IB symbol + * errors and linkrecovery errors. They can be reported on + * some chips during link negotiation prior to INIT, and with + * DDR when faking DDR negotiations with non-IBTA switches. + * The chip counters are adjusted at driver unload if there is + * a non-zero delta. + */ + u64 ibdeltainprog; + u64 ibsymdelta; + u64 ibsymsnap; + u64 iblnkerrdelta; + u64 iblnkerrsnap; + u64 ibcctrl; /* shadow for kr_ibcctrl */ + u32 lastlinkrecov; /* link recovery issue */ + int irq; + u32 cntrnamelen; + u32 portcntrnamelen; + u32 ncntrs; + u32 nportcntrs; + /* used with gpio interrupts to implement IB counters */ + u32 rxfc_unsupvl_errs; + u32 overrun_thresh_errs; + /* + * these count only cases where _successive_ LocalLinkIntegrity + * errors were seen in the receive headers of IB standard packets + */ + u32 lli_errs; + u32 lli_counter; + u64 lli_thresh; + u64 sword; /* total dwords sent (sample result) */ + u64 rword; /* total dwords received (sample result) */ + u64 spkts; /* total packets sent (sample result) */ + u64 rpkts; /* total packets received (sample result) */ + u64 xmit_wait; /* # of ticks no data sent (sample result) */ + struct timer_list pma_timer; + char emsgbuf[128]; + char bitsmsgbuf[64]; + u8 pma_sample_status; +}; + +/* ibcctrl bits */ +#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 +/* cycle through TS1/TS2 till OK */ +#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 +/* wait for TS1, then go on */ +#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 +#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 + +#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ +#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ +#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ +#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18 + +/* + * We could have a single register get/put routine, that takes a group type, + * but this is somewhat clearer and cleaner. It also gives us some error + * checking. 64 bit register reads should always work, but are inefficient + * on opteron (the northbridge always generates 2 separate HT 32 bit reads), + * so we use kreg32 wherever possible. User register and counter register + * reads are always 32 bit reads, so only one form of those routines. + */ + +/** + * qib_read_ureg32 - read 32-bit virtualized per-context register + * @dd: device + * @regno: register number + * @ctxt: context number + * + * Return the contents of a register that is virtualized to be per context. + * Returns -1 on errors (not distinguishable from valid contents at + * runtime; we may add a separate error variable at some point). + */ +static inline u32 qib_read_ureg32(const struct qib_devdata *dd, + enum qib_ureg regno, int ctxt) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return 0; + + if (dd->userbase) + return readl(regno + (u64 __iomem *) + ((char __iomem *)dd->userbase + + dd->ureg_align * ctxt)); + else + return readl(regno + (u64 __iomem *) + (dd->uregbase + + (char __iomem *)dd->kregbase + + dd->ureg_align * ctxt)); +} + +/** + * qib_write_ureg - write 32-bit virtualized per-context register + * @dd: device + * @regno: register number + * @value: value + * @ctxt: context + * + * Write the contents of a register that is virtualized to be per context. + */ +static inline void qib_write_ureg(const struct qib_devdata *dd, + enum qib_ureg regno, u64 value, int ctxt) +{ + u64 __iomem *ubase; + if (dd->userbase) + ubase = (u64 __iomem *) + ((char __iomem *) dd->userbase + + dd->ureg_align * ctxt); + else + ubase = (u64 __iomem *) + (dd->uregbase + + (char __iomem *) dd->kregbase + + dd->ureg_align * ctxt); + + if (dd->kregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &ubase[regno]); +} + +static inline u32 qib_read_kreg32(const struct qib_devdata *dd, + const u16 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + return readl((u32 __iomem *)&dd->kregbase[regno]); +} + +static inline u64 qib_read_kreg64(const struct qib_devdata *dd, + const u16 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + + return readq(&dd->kregbase[regno]); +} + +static inline void qib_write_kreg(const struct qib_devdata *dd, + const u16 regno, u64 value) +{ + if (dd->kregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &dd->kregbase[regno]); +} + +/** + * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register + * @dd: the qlogic_ib device + * @regno: the register number to write + * @ctxt: the context containing the register + * @value: the value to write + */ +static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, + const u16 regno, unsigned ctxt, + u64 value) +{ + qib_write_kreg(dd, regno + ctxt, value); +} + +static inline void write_6120_creg(const struct qib_devdata *dd, + u16 regno, u64 value) +{ + if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &dd->cspec->cregbase[regno]); +} + +static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readq(&dd->cspec->cregbase[regno]); +} + +static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readl(&dd->cspec->cregbase[regno]); +} + +/* kr_control bits */ +#define QLOGIC_IB_C_RESET 1U + +/* kr_intstatus, kr_intclear, kr_intmask bits */ +#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1) +#define QLOGIC_IB_I_RCVURG_SHIFT 0 +#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1) +#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12 + +#define QLOGIC_IB_C_FREEZEMODE 0x00000002 +#define QLOGIC_IB_C_LINKENABLE 0x00000004 +#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL +#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL +#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL +#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL +#define QLOGIC_IB_I_BITSEXTANT \ + ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ + (QLOGIC_IB_I_RCVAVAIL_MASK << \ + QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ + QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ + QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO) + +/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ +#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL +#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 +#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL +#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL +#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL +#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL +#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL +#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL +#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL + + +/* kr_extstatus bits */ +#define QLOGIC_IB_EXTS_FREQSEL 0x2 +#define QLOGIC_IB_EXTS_SERDESSEL 0x4 +#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 +#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000 + +/* kr_xgxsconfig bits */ +#define QLOGIC_IB_XGXS_RESET 0x5ULL + +#define _QIB_GPIO_SDA_NUM 1 +#define _QIB_GPIO_SCL_NUM 0 + +/* Bits in GPIO for the added IB link interrupts */ +#define GPIO_RXUVL_BIT 3 +#define GPIO_OVRUN_BIT 4 +#define GPIO_LLI_BIT 5 +#define GPIO_ERRINTR_MASK 0x38 + + +#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL +#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \ + ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1) +#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid)) +#define QLOGIC_IB_RT_IS_VALID(tid) \ + (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \ + ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK))) +#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */ +#define QLOGIC_IB_RT_ADDR_SHIFT 10 + +#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16 +#define QLOGIC_IB_R_TAILUPD_SHIFT 31 +#define IBA6120_R_PKEY_DIS_SHIFT 30 + +#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */ + +#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) +#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) + +#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ + ((1ULL << (SYM_LSB(regname, fldname) + (bit))))) + +#define TXEMEMPARITYERR_PIOBUF \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) +#define TXEMEMPARITYERR_PIOPBC \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) +#define TXEMEMPARITYERR_PIOLAUNCHFIFO \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) + +#define RXEMEMPARITYERR_RCVBUF \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) +#define RXEMEMPARITYERR_LOOKUPQ \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) +#define RXEMEMPARITYERR_EXPTID \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) +#define RXEMEMPARITYERR_EAGERTID \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) +#define RXEMEMPARITYERR_FLAGBUF \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) +#define RXEMEMPARITYERR_DATAINFO \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) +#define RXEMEMPARITYERR_HDRINFO \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) + +/* 6120 specific hardware errors... */ +static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = { + /* generic hardware errors */ + QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), + QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), + + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, + "TXE PIOBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, + "TXE PIOPBC Memory Parity"), + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, + "TXE PIOLAUNCHFIFO Memory Parity"), + + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, + "RXE RCVBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, + "RXE LOOKUPQ Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, + "RXE EAGERTID Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, + "RXE EXPTID Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, + "RXE FLAGBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, + "RXE DATAINFO Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, + "RXE HDRINFO Memory Parity"), + + /* chip-specific hardware errors */ + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, + "PCIe Poisoned TLP"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, + "PCIe completion timeout"), + /* + * In practice, it's unlikely wthat we'll see PCIe PLL, or bus + * parity or memory parity error failures, because most likely we + * won't be able to talk to the core of the chip. Nonetheless, we + * might see them, if they are in parts of the PCIe core that aren't + * essential. + */ + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, + "PCIePLL1"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, + "PCIePLL0"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, + "PCIe XTLH core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, + "PCIe ADM TX core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, + "PCIe ADM RX core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, + "SerDes PLL"), +}; + +#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC) +#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ + QLOGIC_IB_HWE_COREPLL_RFSLIP) + + /* variables for sanity checking interrupt and errors */ +#define IB_HWE_BITSEXTANT \ + (HWE_MASK(RXEMemParityErr) | \ + HWE_MASK(TXEMemParityErr) | \ + (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ + QLOGIC_IB_HWE_PCIE1PLLFAILED | \ + QLOGIC_IB_HWE_PCIE0PLLFAILED | \ + QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ + QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ + QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ + QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ + QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ + HWE_MASK(PowerOnBISTFailed) | \ + QLOGIC_IB_HWE_COREPLL_FBSLIP | \ + QLOGIC_IB_HWE_COREPLL_RFSLIP | \ + QLOGIC_IB_HWE_SERDESPLLFAILED | \ + HWE_MASK(IBCBusToSPCParityErr) | \ + HWE_MASK(IBCBusFromSPCParityErr)) + +#define IB_E_BITSEXTANT \ + (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ + ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ + ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ + ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ + ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ + ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ + ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ + ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ + ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \ + ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \ + ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(SendPioArmLaunchErr) | \ + ERR_MASK(SendUnexpectedPktNumErr) | \ + ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \ + ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \ + ERR_MASK(HardwareErr)) + +#define QLOGIC_IB_E_PKTERRS ( \ + ERR_MASK(SendPktLenErr) | \ + ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(RcvVCRCErr) | \ + ERR_MASK(RcvICRCErr) | \ + ERR_MASK(RcvShortPktLenErr) | \ + ERR_MASK(RcvEBPErr)) + +/* These are all rcv-related errors which we want to count for stats */ +#define E_SUM_PKTERRS \ + (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ + ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ + ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ + ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ + ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) + +/* These are all send-related errors which we want to count for stats */ +#define E_SUM_ERRS \ + (ERR_MASK(SendPioArmLaunchErr) | \ + ERR_MASK(SendUnexpectedPktNumErr) | \ + ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ + ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ + ERR_MASK(InvalidAddrErr)) + +/* + * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore + * errors not related to freeze and cancelling buffers. Can't ignore + * armlaunch because could get more while still cleaning up, and need + * to cancel those as they happen. + */ +#define E_SPKT_ERRS_IGNORE \ + (ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ + ERR_MASK(SendPktLenErr)) + +/* + * these are errors that can occur when the link changes state while + * a packet is being sent or received. This doesn't cover things + * like EBP or VCRC that can be the result of a sending having the + * link change state, so we receive a "known bad" packet. + */ +#define E_SUM_LINK_PKTERRS \ + (ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ + ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvUnexpectedCharErr)) + +static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *, + u32, unsigned long); + +/* + * On platforms using this chip, and not having ordered WC stores, we + * can get TXE parity errors due to speculative reads to the PIO buffers, + * and this, due to a chip issue can result in (many) false parity error + * reports. So it's a debug print on those, and an info print on systems + * where the speculative reads don't occur. + */ +static void qib_6120_txe_recover(struct qib_devdata *dd) +{ + if (!qib_unordered_wc()) + qib_devinfo(dd->pcidev, + "Recovering from TXE PIO parity error\n"); +} + +/* enable/disable chip from delivering interrupts */ +static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + if (dd->flags & QIB_BADINTR) + return; + qib_write_kreg(dd, kr_intmask, ~0ULL); + /* force re-interrupt of any pending interrupts. */ + qib_write_kreg(dd, kr_intclear, 0ULL); + } else + qib_write_kreg(dd, kr_intmask, 0ULL); +} + +/* + * Try to cleanup as much as possible for anything that might have gone + * wrong while in freeze mode, such as pio buffers being written by user + * processes (causing armlaunch), send errors due to going into freeze mode, + * etc., and try to avoid causing extra interrupts while doing so. + * Forcibly update the in-memory pioavail register copies after cleanup + * because the chip won't do it while in freeze mode (the register values + * themselves are kept correct). + * Make sure that we don't lose any important interrupts by using the chip + * feature that says that writing 0 to a bit in *clear that is set in + * *status will cause an interrupt to be generated again (if allowed by + * the *mask value). + * This is in chip-specific code because of all of the register accesses, + * even though the details are similar on most chips + */ +static void qib_6120_clear_freeze(struct qib_devdata *dd) +{ + /* disable error interrupts, to avoid confusion */ + qib_write_kreg(dd, kr_errmask, 0ULL); + + /* also disable interrupts; errormask is sometimes overwriten */ + qib_6120_set_intr_state(dd, 0); + + qib_cancel_sends(dd->pport); + + /* clear the freeze, and be sure chip saw it */ + qib_write_kreg(dd, kr_control, dd->control); + qib_read_kreg32(dd, kr_scratch); + + /* force in-memory update now we are out of freeze */ + qib_force_pio_avail_update(dd); + + /* + * force new interrupt if any hwerr, error or interrupt bits are + * still set, and clear "safe" send packet errors related to freeze + * and cancelling sends. Re-enable error interrupts before possible + * force of re-interrupt on pending interrupts. + */ + qib_write_kreg(dd, kr_hwerrclear, 0ULL); + qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); + qib_6120_set_intr_state(dd, 1); +} + +/** + * qib_handle_6120_hwerrors - display hardware errors. + * @dd: the qlogic_ib device + * @msg: the output buffer + * @msgl: the size of the output buffer + * + * Use same msg buffer as regular errors to avoid excessive stack + * use. Most hardware errors are catastrophic, but for right now, + * we'll print them and continue. Reuse the same message buffer as + * handle_6120_errors() to avoid excessive stack usage. + */ +static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, + size_t msgl) +{ + u64 hwerrs; + u32 bits, ctrl; + int isfatal = 0; + char *bitsmsg; + int log_idx; + + hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); + if (!hwerrs) + return; + if (hwerrs == ~0ULL) { + qib_dev_err(dd, "Read of hardware error status failed " + "(all bits set); ignoring\n"); + return; + } + qib_stats.sps_hwerrs++; + + /* Always clear the error status register, except MEMBISTFAIL, + * regardless of whether we continue or stop using the chip. + * We want that set so we know it failed, even across driver reload. + * We'll still ignore it in the hwerrmask. We do this partly for + * diagnostics, but also for support */ + qib_write_kreg(dd, kr_hwerrclear, + hwerrs & ~HWE_MASK(PowerOnBISTFailed)); + + hwerrs &= dd->cspec->hwerrmask; + + /* We log some errors to EEPROM, check if we have any of those. */ + for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) + if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) + qib_inc_eeprom_err(dd, log_idx, 1); + + /* + * Make sure we get this much out, unless told to be quiet, + * or it's occurred within the last 5 seconds. + */ + if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) + qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " + "(cleared)\n", (unsigned long long) hwerrs); + + if (hwerrs & ~IB_HWE_BITSEXTANT) + qib_dev_err(dd, "hwerror interrupt with unknown errors " + "%llx set\n", (unsigned long long) + (hwerrs & ~IB_HWE_BITSEXTANT)); + + ctrl = qib_read_kreg32(dd, kr_control); + if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { + /* + * Parity errors in send memory are recoverable, + * just cancel the send (if indicated in * sendbuffererror), + * count the occurrence, unfreeze (if no other handled + * hardware error bits are set), and continue. They can + * occur if a processor speculative read is done to the PIO + * buffer while we are sending a packet, for example. + */ + if (hwerrs & TXE_PIO_PARITY) { + qib_6120_txe_recover(dd); + hwerrs &= ~TXE_PIO_PARITY; + } + + if (!hwerrs) { + static u32 freeze_cnt; + + freeze_cnt++; + qib_6120_clear_freeze(dd); + } else + isfatal = 1; + } + + *msg = '\0'; + + if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { + isfatal = 1; + strlcat(msg, "[Memory BIST test failed, InfiniPath hardware" + " unusable]", msgl); + /* ignore from now on, so disable until driver reloaded */ + dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs, + ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl); + + bitsmsg = dd->cspec->bitsmsgbuf; + if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { + bits = (u32) ((hwerrs >> + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & + QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); + snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + "[PCIe Mem Parity Errs %x] ", bits); + strlcat(msg, bitsmsg, msgl); + } + + if (hwerrs & _QIB_PLL_FAIL) { + isfatal = 1; + snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + "[PLL failed (%llx), InfiniPath hardware unusable]", + (unsigned long long) hwerrs & _QIB_PLL_FAIL); + strlcat(msg, bitsmsg, msgl); + /* ignore from now on, so disable until driver reloaded */ + dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { + /* + * If it occurs, it is left masked since the external + * interface is unused + */ + dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + if (hwerrs) + /* + * if any set that we aren't ignoring; only + * make the complaint once, in case it's stuck + * or recurring, and we get here multiple + * times. + */ + qib_dev_err(dd, "%s hardware error\n", msg); + else + *msg = 0; /* recovered from all of them */ + + if (isfatal && !dd->diag_client) { + qib_dev_err(dd, "Fatal Hardware Error, no longer" + " usable, SN %.16s\n", dd->serial); + /* + * for /sys status file and user programs to print; if no + * trailing brace is copied, we'll know it was truncated. + */ + if (dd->freezemsg) + snprintf(dd->freezemsg, dd->freezelen, + "{%s}", msg); + qib_disable_after_error(dd); + } +} + +/* + * Decode the error status into strings, deciding whether to always + * print * it or not depending on "normal packet errors" vs everything + * else. Return 1 if "real" errors, otherwise 0 if only packet + * errors, so caller can decide what to print with the string. + */ +static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen, + u64 err) +{ + int iserr = 1; + + *buf = '\0'; + if (err & QLOGIC_IB_E_PKTERRS) { + if (!(err & ~QLOGIC_IB_E_PKTERRS)) + iserr = 0; + if ((err & ERR_MASK(RcvICRCErr)) && + !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr)))) + strlcat(buf, "CRC ", blen); + if (!iserr) + goto done; + } + if (err & ERR_MASK(RcvHdrLenErr)) + strlcat(buf, "rhdrlen ", blen); + if (err & ERR_MASK(RcvBadTidErr)) + strlcat(buf, "rbadtid ", blen); + if (err & ERR_MASK(RcvBadVersionErr)) + strlcat(buf, "rbadversion ", blen); + if (err & ERR_MASK(RcvHdrErr)) + strlcat(buf, "rhdr ", blen); + if (err & ERR_MASK(RcvLongPktLenErr)) + strlcat(buf, "rlongpktlen ", blen); + if (err & ERR_MASK(RcvMaxPktLenErr)) + strlcat(buf, "rmaxpktlen ", blen); + if (err & ERR_MASK(RcvMinPktLenErr)) + strlcat(buf, "rminpktlen ", blen); + if (err & ERR_MASK(SendMinPktLenErr)) + strlcat(buf, "sminpktlen ", blen); + if (err & ERR_MASK(RcvFormatErr)) + strlcat(buf, "rformaterr ", blen); + if (err & ERR_MASK(RcvUnsupportedVLErr)) + strlcat(buf, "runsupvl ", blen); + if (err & ERR_MASK(RcvUnexpectedCharErr)) + strlcat(buf, "runexpchar ", blen); + if (err & ERR_MASK(RcvIBFlowErr)) + strlcat(buf, "ribflow ", blen); + if (err & ERR_MASK(SendUnderRunErr)) + strlcat(buf, "sunderrun ", blen); + if (err & ERR_MASK(SendPioArmLaunchErr)) + strlcat(buf, "spioarmlaunch ", blen); + if (err & ERR_MASK(SendUnexpectedPktNumErr)) + strlcat(buf, "sunexperrpktnum ", blen); + if (err & ERR_MASK(SendDroppedSmpPktErr)) + strlcat(buf, "sdroppedsmppkt ", blen); + if (err & ERR_MASK(SendMaxPktLenErr)) + strlcat(buf, "smaxpktlen ", blen); + if (err & ERR_MASK(SendUnsupportedVLErr)) + strlcat(buf, "sunsupVL ", blen); + if (err & ERR_MASK(InvalidAddrErr)) + strlcat(buf, "invalidaddr ", blen); + if (err & ERR_MASK(RcvEgrFullErr)) + strlcat(buf, "rcvegrfull ", blen); + if (err & ERR_MASK(RcvHdrFullErr)) + strlcat(buf, "rcvhdrfull ", blen); + if (err & ERR_MASK(IBStatusChanged)) + strlcat(buf, "ibcstatuschg ", blen); + if (err & ERR_MASK(RcvIBLostLinkErr)) + strlcat(buf, "riblostlink ", blen); + if (err & ERR_MASK(HardwareErr)) + strlcat(buf, "hardware ", blen); + if (err & ERR_MASK(ResetNegated)) + strlcat(buf, "reset ", blen); +done: + return iserr; +} + +/* + * Called when we might have an error that is specific to a particular + * PIO buffer, and may need to cancel that buffer, so it can be re-used. + */ +static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd) +{ + unsigned long sbuf[2]; + struct qib_devdata *dd = ppd->dd; + + /* + * It's possible that sendbuffererror could have bits set; might + * have already done this as a result of hardware error handling. + */ + sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); + sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); + + if (sbuf[0] || sbuf[1]) + qib_disarm_piobufs_set(dd, sbuf, + dd->piobcnt2k + dd->piobcnt4k); +} + +static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs) +{ + int ret = 1; + u32 ibstate = qib_6120_iblink_state(ibcs); + u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov); + + if (linkrecov != dd->cspec->lastlinkrecov) { + /* and no more until active again */ + dd->cspec->lastlinkrecov = 0; + qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN); + ret = 0; + } + if (ibstate == IB_PORT_ACTIVE) + dd->cspec->lastlinkrecov = + read_6120_creg32(dd, cr_iblinkerrrecov); + return ret; +} + +static void handle_6120_errors(struct qib_devdata *dd, u64 errs) +{ + char *msg; + u64 ignore_this_time = 0; + u64 iserr = 0; + int log_idx; + struct qib_pportdata *ppd = dd->pport; + u64 mask; + + /* don't report errors that are masked */ + errs &= dd->cspec->errormask; + msg = dd->cspec->emsgbuf; + + /* do these first, they are most important */ + if (errs & ERR_MASK(HardwareErr)) + qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + else + for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) + if (errs & dd->eep_st_masks[log_idx].errs_to_log) + qib_inc_eeprom_err(dd, log_idx, 1); + + if (errs & ~IB_E_BITSEXTANT) + qib_dev_err(dd, "error interrupt with unknown errors " + "%llx set\n", + (unsigned long long) (errs & ~IB_E_BITSEXTANT)); + + if (errs & E_SUM_ERRS) { + qib_disarm_6120_senderrbufs(ppd); + if ((errs & E_SUM_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when trying to bring the link + * up, but the IB link changes state at the "wrong" + * time. The IB logic then complains that the packet + * isn't valid. We don't want to confuse people, so + * we just don't print them, except at debug + */ + ignore_this_time = errs & E_SUM_LINK_PKTERRS; + } + } else if ((errs & E_SUM_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when SMA is trying to bring the link + * up, but the IB link changes state at the "wrong" time. + * The IB logic then complains that the packet isn't + * valid. We don't want to confuse people, so we just + * don't print them, except at debug + */ + ignore_this_time = errs & E_SUM_LINK_PKTERRS; + } + + qib_write_kreg(dd, kr_errclear, errs); + + errs &= ~ignore_this_time; + if (!errs) + goto done; + + /* + * The ones we mask off are handled specially below + * or above. + */ + mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | + ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); + qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + + if (errs & E_SUM_PKTERRS) + qib_stats.sps_rcverrs++; + if (errs & E_SUM_ERRS) + qib_stats.sps_txerrs++; + + iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS); + + if (errs & ERR_MASK(IBStatusChanged)) { + u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus); + u32 ibstate = qib_6120_iblink_state(ibcs); + int handle = 1; + + if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov) + handle = chk_6120_linkrecovery(dd, ibcs); + /* + * Since going into a recovery state causes the link state + * to go down and since recovery is transitory, it is better + * if we "miss" ever seeing the link training state go into + * recovery (i.e., ignore this transition for link state + * special handling purposes) without updating lastibcstat. + */ + if (handle && qib_6120_phys_portstate(ibcs) == + IB_PHYSPORTSTATE_LINK_ERR_RECOVER) + handle = 0; + if (handle) + qib_handle_e_ibstatuschanged(ppd, ibcs); + } + + if (errs & ERR_MASK(ResetNegated)) { + qib_dev_err(dd, "Got reset, requires re-init " + "(unload and reload driver)\n"); + dd->flags &= ~QIB_INITTED; /* needs re-init */ + /* mark as having had error */ + *dd->devstatusp |= QIB_STATUS_HWERROR; + *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; + } + + if (*msg && iserr) + qib_dev_porterr(dd, ppd->port, "%s error\n", msg); + + if (ppd->state_wanted & ppd->lflags) + wake_up_interruptible(&ppd->state_wait); + + /* + * If there were hdrq or egrfull errors, wake up any processes + * waiting in poll. We used to try to check which contexts had + * the overflow, but given the cost of that and the chip reads + * to support it, it's better to just wake everybody up if we + * get an overflow; waiters can poll again if it's not them. + */ + if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { + qib_handle_urcv(dd, ~0U); + if (errs & ERR_MASK(RcvEgrFullErr)) + qib_stats.sps_buffull++; + else + qib_stats.sps_hdrfull++; + } +done: + return; +} + +/** + * qib_6120_init_hwerrors - enable hardware errors + * @dd: the qlogic_ib device + * + * now that we have finished initializing everything that might reasonably + * cause a hardware error, and cleared those errors bits as they occur, + * we can enable hardware errors in the mask (potentially enabling + * freeze mode), and enable hardware errors as errors (along with + * everything else) in errormask + */ +static void qib_6120_init_hwerrors(struct qib_devdata *dd) +{ + u64 val; + u64 extsval; + + extsval = qib_read_kreg64(dd, kr_extstatus); + + if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST)) + qib_dev_err(dd, "MemBIST did not complete!\n"); + + /* init so all hwerrors interrupt, and enter freeze, ajdust below */ + val = ~0ULL; + if (dd->minrev < 2) { + /* + * Avoid problem with internal interface bus parity + * checking. Fixed in Rev2. + */ + val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM; + } + /* avoid some intel cpu's speculative read freeze mode issue */ + val &= ~TXEMEMPARITYERR_PIOBUF; + + dd->cspec->hwerrmask = val; + + qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + + /* clear all */ + qib_write_kreg(dd, kr_errclear, ~0ULL); + /* enable errors that are masked, at least this first time. */ + qib_write_kreg(dd, kr_errmask, ~0ULL); + dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); + /* clear any interrupts up to this point (ints still not enabled) */ + qib_write_kreg(dd, kr_intclear, ~0ULL); + + qib_write_kreg(dd, kr_rcvbthqp, + dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) | + QIB_KD_QP); +} + +/* + * Disable and enable the armlaunch error. Used for PIO bandwidth testing + * on chips that are count-based, rather than trigger-based. There is no + * reference counting, but that's also fine, given the intended use. + * Only chip-specific because it's all register accesses + */ +static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + qib_write_kreg(dd, kr_errclear, + ERR_MASK(SendPioArmLaunchErr)); + dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); + } else + dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); +} + +/* + * Formerly took parameter <which> in pre-shifted, + * pre-merged form with LinkCmd and LinkInitCmd + * together, and assuming the zero was NOP. + */ +static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd, + u16 linitcmd) +{ + u64 mod_wd; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { + /* + * If we are told to disable, note that so link-recovery + * code does not attempt to bring us back up. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { + /* + * Any other linkinitcmd will lead to LINKDOWN and then + * to INIT (if all is well), so clear flag to let + * link-recovery code attempt to bring us back up. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } + + mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) | + (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + + qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd); + /* write to chip to prevent back-to-back writes of control reg */ + qib_write_kreg(dd, kr_scratch, 0); +} + +/** + * qib_6120_bringup_serdes - bring up the serdes + * @dd: the qlogic_ib device + */ +static int qib_6120_bringup_serdes(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u64 val, config1, prev_val, hwstat, ibc; + + /* Put IBC in reset, sends disabled */ + dd->control &= ~QLOGIC_IB_C_LINKENABLE; + qib_write_kreg(dd, kr_control, 0ULL); + + dd->cspec->ibdeltainprog = 1; + dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr); + dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov); + + /* flowcontrolwatermark is in units of KBytes */ + ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); + /* + * How often flowctrl sent. More or less in usecs; balance against + * watermark value, so that in theory senders always get a flow + * control update in time to not let the IB link go idle. + */ + ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); + /* max error tolerance */ + dd->cspec->lli_thresh = 0xf; + ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold); + /* use "real" buffer space for */ + ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); + /* IB credit flow control. */ + ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); + /* + * set initial max size pkt IBC will send, including ICRC; it's the + * PIO buffer size in dwords, less 1; also see qib_set_mtu() + */ + ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); + dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ + + /* initially come up waiting for TS1, without sending anything. */ + val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << + QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + qib_write_kreg(dd, kr_ibcctrl, val); + + val = qib_read_kreg64(dd, kr_serdes_cfg0); + config1 = qib_read_kreg64(dd, kr_serdes_cfg1); + + /* + * Force reset on, also set rxdetect enable. Must do before reading + * serdesstatus at least for simulation, or some of the bits in + * serdes status will come back as undefined and cause simulation + * failures + */ + val |= SYM_MASK(SerdesCfg0, ResetPLL) | + SYM_MASK(SerdesCfg0, RxDetEnX) | + (SYM_MASK(SerdesCfg0, L1PwrDnA) | + SYM_MASK(SerdesCfg0, L1PwrDnB) | + SYM_MASK(SerdesCfg0, L1PwrDnC) | + SYM_MASK(SerdesCfg0, L1PwrDnD)); + qib_write_kreg(dd, kr_serdes_cfg0, val); + /* be sure chip saw it */ + qib_read_kreg64(dd, kr_scratch); + udelay(5); /* need pll reset set at least for a bit */ + /* + * after PLL is reset, set the per-lane Resets and TxIdle and + * clear the PLL reset and rxdetect (to get falling edge). + * Leave L1PWR bits set (permanently) + */ + val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) | + SYM_MASK(SerdesCfg0, ResetPLL) | + (SYM_MASK(SerdesCfg0, L1PwrDnA) | + SYM_MASK(SerdesCfg0, L1PwrDnB) | + SYM_MASK(SerdesCfg0, L1PwrDnC) | + SYM_MASK(SerdesCfg0, L1PwrDnD))); + val |= (SYM_MASK(SerdesCfg0, ResetA) | + SYM_MASK(SerdesCfg0, ResetB) | + SYM_MASK(SerdesCfg0, ResetC) | + SYM_MASK(SerdesCfg0, ResetD)) | + SYM_MASK(SerdesCfg0, TxIdeEnX); + qib_write_kreg(dd, kr_serdes_cfg0, val); + /* be sure chip saw it */ + (void) qib_read_kreg64(dd, kr_scratch); + /* need PLL reset clear for at least 11 usec before lane + * resets cleared; give it a few more to be sure */ + udelay(15); + val &= ~((SYM_MASK(SerdesCfg0, ResetA) | + SYM_MASK(SerdesCfg0, ResetB) | + SYM_MASK(SerdesCfg0, ResetC) | + SYM_MASK(SerdesCfg0, ResetD)) | + SYM_MASK(SerdesCfg0, TxIdeEnX)); + + qib_write_kreg(dd, kr_serdes_cfg0, val); + /* be sure chip saw it */ + (void) qib_read_kreg64(dd, kr_scratch); + + val = qib_read_kreg64(dd, kr_xgxs_cfg); + prev_val = val; + if (val & QLOGIC_IB_XGXS_RESET) + val &= ~QLOGIC_IB_XGXS_RESET; + if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) { + /* need to compensate for Tx inversion in partner */ + val &= ~SYM_MASK(XGXSCfg, polarity_inv); + val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv); + } + if (val != prev_val) + qib_write_kreg(dd, kr_xgxs_cfg, val); + + val = qib_read_kreg64(dd, kr_serdes_cfg0); + + /* clear current and de-emphasis bits */ + config1 &= ~0x0ffffffff00ULL; + /* set current to 20ma */ + config1 |= 0x00000000000ULL; + /* set de-emphasis to -5.68dB */ + config1 |= 0x0cccc000000ULL; + qib_write_kreg(dd, kr_serdes_cfg1, config1); + + /* base and port guid same for single port */ + ppd->guid = dd->base_guid; + + /* + * the process of setting and un-resetting the serdes normally + * causes a serdes PLL error, so check for that and clear it + * here. Also clearr hwerr bit in errstatus, but not others. + */ + hwstat = qib_read_kreg64(dd, kr_hwerrstatus); + if (hwstat) { + /* should just have PLL, clear all set, in an case */ + if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED) + qib_write_kreg(dd, kr_hwerrclear, hwstat); + qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); + } + + dd->control |= QLOGIC_IB_C_LINKENABLE; + dd->control &= ~QLOGIC_IB_C_FREEZEMODE; + qib_write_kreg(dd, kr_control, dd->control); + + return 0; +} + +/** + * qib_6120_quiet_serdes - set serdes to txidle + * @ppd: physical port of the qlogic_ib device + * Called when driver is being unloaded + */ +static void qib_6120_quiet_serdes(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u64 val; + + qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + + /* disable IBC */ + dd->control &= ~QLOGIC_IB_C_LINKENABLE; + qib_write_kreg(dd, kr_control, + dd->control | QLOGIC_IB_C_FREEZEMODE); + + if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta || + dd->cspec->ibdeltainprog) { + u64 diagc; + + /* enable counter writes */ + diagc = qib_read_kreg64(dd, kr_hwdiagctrl); + qib_write_kreg(dd, kr_hwdiagctrl, + diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); + + if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) { + val = read_6120_creg32(dd, cr_ibsymbolerr); + if (dd->cspec->ibdeltainprog) + val -= val - dd->cspec->ibsymsnap; + val -= dd->cspec->ibsymdelta; + write_6120_creg(dd, cr_ibsymbolerr, val); + } + if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) { + val = read_6120_creg32(dd, cr_iblinkerrrecov); + if (dd->cspec->ibdeltainprog) + val -= val - dd->cspec->iblnkerrsnap; + val -= dd->cspec->iblnkerrdelta; + write_6120_creg(dd, cr_iblinkerrrecov, val); + } + + /* and disable counter writes */ + qib_write_kreg(dd, kr_hwdiagctrl, diagc); + } + + val = qib_read_kreg64(dd, kr_serdes_cfg0); + val |= SYM_MASK(SerdesCfg0, TxIdeEnX); + qib_write_kreg(dd, kr_serdes_cfg0, val); +} + +/** + * qib_6120_setup_setextled - set the state of the two external LEDs + * @dd: the qlogic_ib device + * @on: whether the link is up or not + * + * The exact combo of LEDs if on is true is determined by looking + * at the ibcstatus. + + * These LEDs indicate the physical and logical state of IB link. + * For this chip (at least with recommended board pinouts), LED1 + * is Yellow (logical state) and LED2 is Green (physical state), + * + * Note: We try to match the Mellanox HCA LED behavior as best + * we can. Green indicates physical link state is OK (something is + * plugged in, and we can train). + * Amber indicates the link is logically up (ACTIVE). + * Mellanox further blinks the amber LED to indicate data packet + * activity, but we have no hardware support for that, so it would + * require waking up every 10-20 msecs and checking the counters + * on the chip, and then turning the LED off if appropriate. That's + * visible overhead, so not something we will do. + * + */ +static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on) +{ + u64 extctl, val, lst, ltst; + unsigned long flags; + struct qib_devdata *dd = ppd->dd; + + /* + * The diags use the LED to indicate diag info, so we leave + * the external LED alone when the diags are running. + */ + if (dd->diag_client) + return; + + /* Allow override of LED display for, e.g. Locating system in rack */ + if (ppd->led_override) { + ltst = (ppd->led_override & QIB_LED_PHYS) ? + IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, + lst = (ppd->led_override & QIB_LED_LOG) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + } else if (on) { + val = qib_read_kreg64(dd, kr_ibcstatus); + ltst = qib_6120_phys_portstate(val); + lst = qib_6120_iblink_state(val); + } else { + ltst = 0; + lst = 0; + } + + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | + SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); + + if (ltst == IB_PHYSPORTSTATE_LINKUP) + extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); + if (lst == IB_PORT_ACTIVE) + extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); + dd->cspec->extctrl = extctl; + qib_write_kreg(dd, kr_extctrl, extctl); + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); +} + +static void qib_6120_free_irq(struct qib_devdata *dd) +{ + if (dd->cspec->irq) { + free_irq(dd->cspec->irq, dd); + dd->cspec->irq = 0; + } + qib_nomsi(dd); +} + +/** + * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff + * @dd: the qlogic_ib device + * + * This is called during driver unload. +*/ +static void qib_6120_setup_cleanup(struct qib_devdata *dd) +{ + qib_6120_free_irq(dd); + kfree(dd->cspec->cntrs); + kfree(dd->cspec->portcntrs); + if (dd->cspec->dummy_hdrq) { + dma_free_coherent(&dd->pcidev->dev, + ALIGN(dd->rcvhdrcnt * + dd->rcvhdrentsize * + sizeof(u32), PAGE_SIZE), + dd->cspec->dummy_hdrq, + dd->cspec->dummy_hdrq_phys); + dd->cspec->dummy_hdrq = NULL; + } +} + +static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (needint) + dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail); + else + dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail); + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0ULL); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); +} + +/* + * handle errors and unusual events first, separate function + * to improve cache hits for fast path interrupt handling + */ +static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat) +{ + if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) + qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n", + istat & ~QLOGIC_IB_I_BITSEXTANT); + + if (istat & QLOGIC_IB_I_ERROR) { + u64 estat = 0; + + qib_stats.sps_errints++; + estat = qib_read_kreg64(dd, kr_errstatus); + if (!estat) + qib_devinfo(dd->pcidev, "error interrupt (%Lx), " + "but no error bits set!\n", istat); + handle_6120_errors(dd, estat); + } + + if (istat & QLOGIC_IB_I_GPIO) { + u32 gpiostatus; + u32 to_clear = 0; + + /* + * GPIO_3..5 on IBA6120 Rev2 chips indicate + * errors that we need to count. + */ + gpiostatus = qib_read_kreg32(dd, kr_gpio_status); + /* First the error-counter case. */ + if (gpiostatus & GPIO_ERRINTR_MASK) { + /* want to clear the bits we see asserted. */ + to_clear |= (gpiostatus & GPIO_ERRINTR_MASK); + + /* + * Count appropriately, clear bits out of our copy, + * as they have been "handled". + */ + if (gpiostatus & (1 << GPIO_RXUVL_BIT)) + dd->cspec->rxfc_unsupvl_errs++; + if (gpiostatus & (1 << GPIO_OVRUN_BIT)) + dd->cspec->overrun_thresh_errs++; + if (gpiostatus & (1 << GPIO_LLI_BIT)) + dd->cspec->lli_errs++; + gpiostatus &= ~GPIO_ERRINTR_MASK; + } + if (gpiostatus) { + /* + * Some unexpected bits remain. If they could have + * caused the interrupt, complain and clear. + * To avoid repetition of this condition, also clear + * the mask. It is almost certainly due to error. + */ + const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); + + /* + * Also check that the chip reflects our shadow, + * and report issues, If they caused the interrupt. + * we will suppress by refreshing from the shadow. + */ + if (mask & gpiostatus) { + to_clear |= (gpiostatus & mask); + dd->cspec->gpio_mask &= ~(gpiostatus & mask); + qib_write_kreg(dd, kr_gpio_mask, + dd->cspec->gpio_mask); + } + } + if (to_clear) + qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear); + } +} + +static irqreturn_t qib_6120intr(int irq, void *data) +{ + struct qib_devdata *dd = data; + irqreturn_t ret; + u32 istat, ctxtrbits, rmask, crcs = 0; + unsigned i; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + ret = IRQ_HANDLED; + goto bail; + } + + istat = qib_read_kreg32(dd, kr_intstatus); + + if (unlikely(!istat)) { + ret = IRQ_NONE; /* not our interrupt, or already handled */ + goto bail; + } + if (unlikely(istat == -1)) { + qib_bad_intrstatus(dd); + /* don't know if it was our interrupt or not */ + ret = IRQ_NONE; + goto bail; + } + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | + QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) + unlikely_6120_intr(dd, istat); + + /* + * Clear the interrupt bits we found set, relatively early, so we + * "know" know the chip will have seen this by the time we process + * the queue, and will re-interrupt if necessary. The processor + * itself won't take the interrupt again until we return. + */ + qib_write_kreg(dd, kr_intclear, istat); + + /* + * Handle kernel receive queues before checking for pio buffers + * available since receives can overflow; piobuf waiters can afford + * a few extra cycles, since they were waiting anyway. + */ + ctxtrbits = istat & + ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); + if (ctxtrbits) { + rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (1U << QLOGIC_IB_I_RCVURG_SHIFT); + for (i = 0; i < dd->first_user_ctxt; i++) { + if (ctxtrbits & rmask) { + ctxtrbits &= ~rmask; + crcs += qib_kreceive(dd->rcd[i], + &dd->cspec->lli_counter, + NULL); + } + rmask <<= 1; + } + if (crcs) { + u32 cntr = dd->cspec->lli_counter; + cntr += crcs; + if (cntr) { + if (cntr > dd->cspec->lli_thresh) { + dd->cspec->lli_counter = 0; + dd->cspec->lli_errs++; + } else + dd->cspec->lli_counter += cntr; + } + } + + + if (ctxtrbits) { + ctxtrbits = + (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); + qib_handle_urcv(dd, ctxtrbits); + } + } + + if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) + qib_ib_piobufavail(dd); + + ret = IRQ_HANDLED; +bail: + return ret; +} + +/* + * Set up our chip-specific interrupt handler + * The interrupt type has already been setup, so + * we just need to do the registration and error checking. + */ +static void qib_setup_6120_interrupt(struct qib_devdata *dd) +{ + /* + * If the chip supports added error indication via GPIO pins, + * enable interrupts on those bits so the interrupt routine + * can count the events. Also set flag so interrupt routine + * can know they are expected. + */ + if (SYM_FIELD(dd->revision, Revision_R, + ChipRevMinor) > 1) { + /* Rev2+ reports extra errors via internal GPIO pins */ + dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK; + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + } + + if (!dd->cspec->irq) + qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " + "work\n"); + else { + int ret; + ret = request_irq(dd->cspec->irq, qib_6120intr, 0, + QIB_DRV_NAME, dd); + if (ret) + qib_dev_err(dd, "Couldn't setup interrupt " + "(irq=%d): %d\n", dd->cspec->irq, + ret); + } +} + +/** + * pe_boardname - fill in the board name + * @dd: the qlogic_ib device + * + * info is based on the board revision register + */ +static void pe_boardname(struct qib_devdata *dd) +{ + char *n; + u32 boardid, namelen; + + boardid = SYM_FIELD(dd->revision, Revision, + BoardID); + + switch (boardid) { + case 2: + n = "InfiniPath_QLE7140"; + break; + default: + qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid); + n = "Unknown_InfiniPath_6120"; + break; + } + namelen = strlen(n) + 1; + dd->boardname = kmalloc(namelen, GFP_KERNEL); + if (!dd->boardname) + qib_dev_err(dd, "Failed allocation for board name: %s\n", n); + else + snprintf(dd->boardname, namelen, "%s", n); + + if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) + qib_dev_err(dd, "Unsupported InfiniPath hardware revision " + "%u.%u!\n", dd->majrev, dd->minrev); + + snprintf(dd->boardversion, sizeof(dd->boardversion), + "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", + QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, + (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), + dd->majrev, dd->minrev, + (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); + +} + +/* + * This routine sleeps, so it can only be called from user context, not + * from interrupt context. If we need interrupt context, we can split + * it into two routines. + */ +static int qib_6120_setup_reset(struct qib_devdata *dd) +{ + u64 val; + int i; + int ret; + u16 cmdval; + u8 int_line, clinesz; + + qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); + + /* Use ERROR so it shows up in logs, etc. */ + qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); + + /* no interrupts till re-initted */ + qib_6120_set_intr_state(dd, 0); + + dd->cspec->ibdeltainprog = 0; + dd->cspec->ibsymdelta = 0; + dd->cspec->iblnkerrdelta = 0; + + /* + * Keep chip from being accessed until we are ready. Use + * writeq() directly, to allow the write even though QIB_PRESENT + * isnt' set. + */ + dd->flags &= ~(QIB_INITTED | QIB_PRESENT); + dd->int_counter = 0; /* so we check interrupts work again */ + val = dd->control | QLOGIC_IB_C_RESET; + writeq(val, &dd->kregbase[kr_control]); + mb(); /* prevent compiler re-ordering around actual reset */ + + for (i = 1; i <= 5; i++) { + /* + * Allow MBIST, etc. to complete; longer on each retry. + * We sometimes get machine checks from bus timeout if no + * response, so for now, make it *really* long. + */ + msleep(1000 + (1 + i) * 2000); + + qib_pcie_reenable(dd, cmdval, int_line, clinesz); + + /* + * Use readq directly, so we don't need to mark it as PRESENT + * until we get a successful indication that all is well. + */ + val = readq(&dd->kregbase[kr_revision]); + if (val == dd->revision) { + dd->flags |= QIB_PRESENT; /* it's back */ + ret = qib_reinit_intr(dd); + goto bail; + } + } + ret = 0; /* failed */ + +bail: + if (ret) { + if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) + qib_dev_err(dd, "Reset failed to setup PCIe or " + "interrupts; continuing anyway\n"); + /* clear the reset error, init error/hwerror mask */ + qib_6120_init_hwerrors(dd); + /* for Rev2 error interrupts; nop for rev 1 */ + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + /* clear the reset error, init error/hwerror mask */ + qib_6120_init_hwerrors(dd); + } + return ret; +} + +/** + * qib_6120_put_tid - write a TID in chip + * @dd: the qlogic_ib device + * @tidptr: pointer to the expected TID (in chip) to update + * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) + * for expected + * @pa: physical address of in memory buffer; tidinvalid if freeing + * + * This exists as a separate routine to allow for special locking etc. + * It's used for both the full cleanup on exit, as well as the normal + * setup and teardown. + */ +static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, + u32 type, unsigned long pa) +{ + u32 __iomem *tidp32 = (u32 __iomem *)tidptr; + unsigned long flags; + int tidx; + spinlock_t *tidlockp; /* select appropriate spinlock */ + + if (!dd->kregbase) + return; + + if (pa != dd->tidinvalid) { + if (pa & ((1U << 11) - 1)) { + qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", + pa); + return; + } + pa >>= 11; + if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { + qib_dev_err(dd, "Physical page address 0x%lx " + "larger than supported\n", pa); + return; + } + + if (type == RCVHQ_RCV_TYPE_EAGER) + pa |= dd->tidtemplate; + else /* for now, always full 4KB page */ + pa |= 2 << 29; + } + + /* + * Avoid chip issue by writing the scratch register + * before and after the TID, and with an io write barrier. + * We use a spinlock around the writes, so they can't intermix + * with other TID (eager or expected) writes (the chip problem + * is triggered by back to back TID writes). Unfortunately, this + * call can be done from interrupt level for the ctxt 0 eager TIDs, + * so we have to use irqsave locks. + */ + /* + * Assumes tidptr always > egrtidbase + * if type == RCVHQ_RCV_TYPE_EAGER. + */ + tidx = tidptr - dd->egrtidbase; + + tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt) + ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock; + spin_lock_irqsave(tidlockp, flags); + qib_write_kreg(dd, kr_scratch, 0xfeeddeaf); + writel(pa, tidp32); + qib_write_kreg(dd, kr_scratch, 0xdeadbeef); + mmiowb(); + spin_unlock_irqrestore(tidlockp, flags); +} + +/** + * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher + * @dd: the qlogic_ib device + * @tidptr: pointer to the expected TID (in chip) to update + * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) + * for expected + * @pa: physical address of in memory buffer; tidinvalid if freeing + * + * This exists as a separate routine to allow for selection of the + * appropriate "flavor". The static calls in cleanup just use the + * revision-agnostic form, as they are not performance critical. + */ +static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr, + u32 type, unsigned long pa) +{ + u32 __iomem *tidp32 = (u32 __iomem *)tidptr; + u32 tidx; + + if (!dd->kregbase) + return; + + if (pa != dd->tidinvalid) { + if (pa & ((1U << 11) - 1)) { + qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", + pa); + return; + } + pa >>= 11; + if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { + qib_dev_err(dd, "Physical page address 0x%lx " + "larger than supported\n", pa); + return; + } + + if (type == RCVHQ_RCV_TYPE_EAGER) + pa |= dd->tidtemplate; + else /* for now, always full 4KB page */ + pa |= 2 << 29; + } + tidx = tidptr - dd->egrtidbase; + writel(pa, tidp32); + mmiowb(); +} + + +/** + * qib_6120_clear_tids - clear all TID entries for a context, expected and eager + * @dd: the qlogic_ib device + * @ctxt: the context + * + * clear all TID entries for a context, expected and eager. + * Used from qib_close(). On this chip, TIDs are only 32 bits, + * not 64, but they are still on 64 bit boundaries, so tidbase + * is declared as u64 * for the pointer math, even though we write 32 bits + */ +static void qib_6120_clear_tids(struct qib_devdata *dd, + struct qib_ctxtdata *rcd) +{ + u64 __iomem *tidbase; + unsigned long tidinv; + u32 ctxt; + int i; + + if (!dd->kregbase || !rcd) + return; + + ctxt = rcd->ctxt; + + tidinv = dd->tidinvalid; + tidbase = (u64 __iomem *) + ((char __iomem *)(dd->kregbase) + + dd->rcvtidbase + + ctxt * dd->rcvtidcnt * sizeof(*tidbase)); + + for (i = 0; i < dd->rcvtidcnt; i++) + /* use func pointer because could be one of two funcs */ + dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, + tidinv); + + tidbase = (u64 __iomem *) + ((char __iomem *)(dd->kregbase) + + dd->rcvegrbase + + rcd->rcvegr_tid_base * sizeof(*tidbase)); + + for (i = 0; i < rcd->rcvegrcnt; i++) + /* use func pointer because could be one of two funcs */ + dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, + tidinv); +} + +/** + * qib_6120_tidtemplate - setup constants for TID updates + * @dd: the qlogic_ib device + * + * We setup stuff that we use a lot, to avoid calculating each time + */ +static void qib_6120_tidtemplate(struct qib_devdata *dd) +{ + u32 egrsize = dd->rcvegrbufsize; + + /* + * For now, we always allocate 4KB buffers (at init) so we can + * receive max size packets. We may want a module parameter to + * specify 2KB or 4KB and/or make be per ctxt instead of per device + * for those who want to reduce memory footprint. Note that the + * rcvhdrentsize size must be large enough to hold the largest + * IB header (currently 96 bytes) that we expect to handle (plus of + * course the 2 dwords of RHF). + */ + if (egrsize == 2048) + dd->tidtemplate = 1U << 29; + else if (egrsize == 4096) + dd->tidtemplate = 2U << 29; + dd->tidinvalid = 0; +} + +int __attribute__((weak)) qib_unordered_wc(void) +{ + return 0; +} + +/** + * qib_6120_get_base_info - set chip-specific flags for user code + * @rcd: the qlogic_ib ctxt + * @kbase: qib_base_info pointer + * + * We set the PCIE flag because the lower bandwidth on PCIe vs + * HyperTransport can affect some user packet algorithms. + */ +static int qib_6120_get_base_info(struct qib_ctxtdata *rcd, + struct qib_base_info *kinfo) +{ + if (qib_unordered_wc()) + kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER; + + kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | + QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED; + return 0; +} + + +static struct qib_message_header * +qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) +{ + return (struct qib_message_header *) + &rhf_addr[sizeof(u64) / sizeof(u32)]; +} + +static void qib_6120_config_ctxts(struct qib_devdata *dd) +{ + dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt); + if (qib_n_krcv_queues > 1) { + dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; + if (dd->first_user_ctxt > dd->ctxtcnt) + dd->first_user_ctxt = dd->ctxtcnt; + dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6; + } else + dd->first_user_ctxt = dd->num_pports; + dd->n_krcv_queues = dd->first_user_ctxt; +} + +static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, + u32 updegr, u32 egrhd) +{ + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + if (updegr) + qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); +} + +static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd) +{ + u32 head, tail; + + head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); + if (rcd->rcvhdrtail_kvaddr) + tail = qib_get_rcvhdrtail(rcd); + else + tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); + return head == tail; +} + +/* + * Used when we close any ctxt, for DMA already in flight + * at close. Can't be done until we know hdrq size, so not + * early in chip init. + */ +static void alloc_dummy_hdrq(struct qib_devdata *dd) +{ + dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, + dd->rcd[0]->rcvhdrq_size, + &dd->cspec->dummy_hdrq_phys, + GFP_KERNEL | __GFP_COMP); + if (!dd->cspec->dummy_hdrq) { + qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); + /* fallback to just 0'ing */ + dd->cspec->dummy_hdrq_phys = 0UL; + } +} + +/* + * Modify the RCVCTRL register in chip-specific way. This + * is a function because bit positions and (future) register + * location is chip-specific, but the needed operations are + * generic. <op> is a bit-mask because we often want to + * do multiple modifications. + */ +static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op, + int ctxt) +{ + struct qib_devdata *dd = ppd->dd; + u64 mask, val; + unsigned long flags; + + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + + if (op & QIB_RCVCTRL_TAILUPD_ENB) + dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT); + if (op & QIB_RCVCTRL_TAILUPD_DIS) + dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT); + if (op & QIB_RCVCTRL_PKEY_ENB) + dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT); + if (op & QIB_RCVCTRL_PKEY_DIS) + dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT); + if (ctxt < 0) + mask = (1ULL << dd->ctxtcnt) - 1; + else + mask = (1ULL << ctxt); + if (op & QIB_RCVCTRL_CTXT_ENB) { + /* always done for specific ctxt */ + dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); + if (!(dd->flags & QIB_NODMA_RTAIL)) + dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT; + /* Write these registers before the context is enabled. */ + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, + dd->rcd[ctxt]->rcvhdrqtailaddr_phys); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, + dd->rcd[ctxt]->rcvhdrq_phys); + + if (ctxt == 0 && !dd->cspec->dummy_hdrq) + alloc_dummy_hdrq(dd); + } + if (op & QIB_RCVCTRL_CTXT_DIS) + dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); + if (op & QIB_RCVCTRL_INTRAVAIL_ENB) + dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT); + if (op & QIB_RCVCTRL_INTRAVAIL_DIS) + dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT); + qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); + if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { + /* arm rcv interrupt */ + val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | + dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } + if (op & QIB_RCVCTRL_CTXT_ENB) { + /* + * Init the context registers also; if we were + * disabled, tail and head should both be zero + * already from the enable, but since we don't + * know, we have to do it explictly. + */ + val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); + qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); + + val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); + dd->rcd[ctxt]->head = val; + /* If kctxt, interrupt on next receive. */ + if (ctxt < dd->first_user_ctxt) + val |= dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } + if (op & QIB_RCVCTRL_CTXT_DIS) { + /* + * Be paranoid, and never write 0's to these, just use an + * unused page. Of course, + * rcvhdraddr points to a large chunk of memory, so this + * could still trash things, but at least it won't trash + * page 0, and by disabling the ctxt, it should stop "soon", + * even if a packet or two is in already in flight after we + * disabled the ctxt. Only 6120 has this issue. + */ + if (ctxt >= 0) { + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, + dd->cspec->dummy_hdrq_phys); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, + dd->cspec->dummy_hdrq_phys); + } else { + unsigned i; + + for (i = 0; i < dd->cfgctxts; i++) { + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, + i, dd->cspec->dummy_hdrq_phys); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, + i, dd->cspec->dummy_hdrq_phys); + } + } + } + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); +} + +/* + * Modify the SENDCTRL register in chip-specific way. This + * is a function there may be multiple such registers with + * slightly different layouts. Only operations actually used + * are implemented yet. + * Chip requires no back-back sendctrl writes, so write + * scratch register after writing sendctrl + */ +static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op) +{ + struct qib_devdata *dd = ppd->dd; + u64 tmp_dd_sendctrl; + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + + /* First the ones that are "sticky", saved in shadow */ + if (op & QIB_SENDCTRL_CLEAR) + dd->sendctrl = 0; + if (op & QIB_SENDCTRL_SEND_DIS) + dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable); + else if (op & QIB_SENDCTRL_SEND_ENB) + dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable); + if (op & QIB_SENDCTRL_AVAIL_DIS) + dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd); + else if (op & QIB_SENDCTRL_AVAIL_ENB) + dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd); + + if (op & QIB_SENDCTRL_DISARM_ALL) { + u32 i, last; + + tmp_dd_sendctrl = dd->sendctrl; + /* + * disarm any that are not yet launched, disabling sends + * and updates until done. + */ + last = dd->piobcnt2k + dd->piobcnt4k; + tmp_dd_sendctrl &= + ~(SYM_MASK(SendCtrl, PIOEnable) | + SYM_MASK(SendCtrl, PIOBufAvailUpd)); + for (i = 0; i < last; i++) { + qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl | + SYM_MASK(SendCtrl, Disarm) | i); + qib_write_kreg(dd, kr_scratch, 0); + } + } + + tmp_dd_sendctrl = dd->sendctrl; + + if (op & QIB_SENDCTRL_FLUSH) + tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); + if (op & QIB_SENDCTRL_DISARM) + tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | + ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) << + SYM_LSB(SendCtrl, DisarmPIOBuf)); + if (op & QIB_SENDCTRL_AVAIL_BLIP) + tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd); + + qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + + if (op & QIB_SENDCTRL_AVAIL_BLIP) { + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + + if (op & QIB_SENDCTRL_FLUSH) { + u32 v; + /* + * ensure writes have hit chip, then do a few + * more reads, to allow DMA of pioavail registers + * to occur, so in-memory copy is in sync with + * the chip. Not always safe to sleep. + */ + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + qib_read_kreg32(dd, kr_scratch); + } +} + +/** + * qib_portcntr_6120 - read a per-port counter + * @dd: the qlogic_ib device + * @creg: the counter to snapshot + */ +static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg) +{ + u64 ret = 0ULL; + struct qib_devdata *dd = ppd->dd; + u16 creg; + /* 0xffff for unimplemented or synthesized counters */ + static const u16 xlator[] = { + [QIBPORTCNTR_PKTSEND] = cr_pktsend, + [QIBPORTCNTR_WORDSEND] = cr_wordsend, + [QIBPORTCNTR_PSXMITDATA] = 0xffff, + [QIBPORTCNTR_PSXMITPKTS] = 0xffff, + [QIBPORTCNTR_PSXMITWAIT] = 0xffff, + [QIBPORTCNTR_SENDSTALL] = cr_sendstall, + [QIBPORTCNTR_PKTRCV] = cr_pktrcv, + [QIBPORTCNTR_PSRCVDATA] = 0xffff, + [QIBPORTCNTR_PSRCVPKTS] = 0xffff, + [QIBPORTCNTR_RCVEBP] = cr_rcvebp, + [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, + [QIBPORTCNTR_WORDRCV] = cr_wordrcv, + [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, + [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff, + [QIBPORTCNTR_RXVLERR] = 0xffff, + [QIBPORTCNTR_ERRICRC] = cr_erricrc, + [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, + [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, + [QIBPORTCNTR_BADFORMAT] = cr_badformat, + [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, + [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, + [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, + [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, + [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff, + [QIBPORTCNTR_ERRLINK] = cr_errlink, + [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, + [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, + [QIBPORTCNTR_LLI] = 0xffff, + [QIBPORTCNTR_PSINTERVAL] = 0xffff, + [QIBPORTCNTR_PSSTART] = 0xffff, + [QIBPORTCNTR_PSSTAT] = 0xffff, + [QIBPORTCNTR_VL15PKTDROP] = 0xffff, + [QIBPORTCNTR_ERRPKEY] = cr_errpkey, + [QIBPORTCNTR_KHDROVFL] = 0xffff, + }; + + if (reg >= ARRAY_SIZE(xlator)) { + qib_devinfo(ppd->dd->pcidev, + "Unimplemented portcounter %u\n", reg); + goto done; + } + creg = xlator[reg]; + + /* handle counters requests not implemented as chip counters */ + if (reg == QIBPORTCNTR_LLI) + ret = dd->cspec->lli_errs; + else if (reg == QIBPORTCNTR_EXCESSBUFOVFL) + ret = dd->cspec->overrun_thresh_errs; + else if (reg == QIBPORTCNTR_KHDROVFL) { + int i; + + /* sum over all kernel contexts */ + for (i = 0; i < dd->first_user_ctxt; i++) + ret += read_6120_creg32(dd, cr_portovfl + i); + } else if (reg == QIBPORTCNTR_PSSTAT) + ret = dd->cspec->pma_sample_status; + if (creg == 0xffff) + goto done; + + /* + * only fast incrementing counters are 64bit; use 32 bit reads to + * avoid two independent reads when on opteron + */ + if (creg == cr_wordsend || creg == cr_wordrcv || + creg == cr_pktsend || creg == cr_pktrcv) + ret = read_6120_creg(dd, creg); + else + ret = read_6120_creg32(dd, creg); + if (creg == cr_ibsymbolerr) { + if (dd->cspec->ibdeltainprog) + ret -= ret - dd->cspec->ibsymsnap; + ret -= dd->cspec->ibsymdelta; + } else if (creg == cr_iblinkerrrecov) { + if (dd->cspec->ibdeltainprog) + ret -= ret - dd->cspec->iblnkerrsnap; + ret -= dd->cspec->iblnkerrdelta; + } + if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */ + ret += dd->cspec->rxfc_unsupvl_errs; + +done: + return ret; +} + +/* + * Device counter names (not port-specific), one line per stat, + * single string. Used by utilities like ipathstats to print the stats + * in a way which works for different versions of drivers, without changing + * the utility. Names need to be 12 chars or less (w/o newline), for proper + * display by utility. + * Non-error counters are first. + * Start of "error" conters is indicated by a leading "E " on the first + * "error" counter, and doesn't count in label length. + * The EgrOvfl list needs to be last so we truncate them at the configured + * context count for the device. + * cntr6120indices contains the corresponding register indices. + */ +static const char cntr6120names[] = + "Interrupts\n" + "HostBusStall\n" + "E RxTIDFull\n" + "RxTIDInvalid\n" + "Ctxt0EgrOvfl\n" + "Ctxt1EgrOvfl\n" + "Ctxt2EgrOvfl\n" + "Ctxt3EgrOvfl\n" + "Ctxt4EgrOvfl\n"; + +static const size_t cntr6120indices[] = { + cr_lbint, + cr_lbflowstall, + cr_errtidfull, + cr_errtidvalid, + cr_portovfl + 0, + cr_portovfl + 1, + cr_portovfl + 2, + cr_portovfl + 3, + cr_portovfl + 4, +}; + +/* + * same as cntr6120names and cntr6120indices, but for port-specific counters. + * portcntr6120indices is somewhat complicated by some registers needing + * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG + */ +static const char portcntr6120names[] = + "TxPkt\n" + "TxFlowPkt\n" + "TxWords\n" + "RxPkt\n" + "RxFlowPkt\n" + "RxWords\n" + "TxFlowStall\n" + "E IBStatusChng\n" + "IBLinkDown\n" + "IBLnkRecov\n" + "IBRxLinkErr\n" + "IBSymbolErr\n" + "RxLLIErr\n" + "RxBadFormat\n" + "RxBadLen\n" + "RxBufOvrfl\n" + "RxEBP\n" + "RxFlowCtlErr\n" + "RxICRCerr\n" + "RxLPCRCerr\n" + "RxVCRCerr\n" + "RxInvalLen\n" + "RxInvalPKey\n" + "RxPktDropped\n" + "TxBadLength\n" + "TxDropped\n" + "TxInvalLen\n" + "TxUnderrun\n" + "TxUnsupVL\n" + ; + +#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ +static const size_t portcntr6120indices[] = { + QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, + cr_pktsendflow, + QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, + QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, + cr_pktrcvflowctrl, + QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, + QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, + cr_ibstatuschange, + QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, + QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, + cr_rcvflowctrl_err, + QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, + QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, + cr_invalidslen, + cr_senddropped, + cr_errslen, + cr_sendunderrun, + cr_txunsupvl, +}; + +/* do all the setup to make the counter reads efficient later */ +static void init_6120_cntrnames(struct qib_devdata *dd) +{ + int i, j = 0; + char *s; + + for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts; + i++) { + /* we always have at least one counter before the egrovfl */ + if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) + j = 1; + s = strchr(s + 1, '\n'); + if (s && j) + j++; + } + dd->cspec->ncntrs = i; + if (!s) + /* full list; size is without terminating null */ + dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1; + else + dd->cspec->cntrnamelen = 1 + s - cntr6120names; + dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->cspec->cntrs) + qib_dev_err(dd, "Failed allocation for counters\n"); + + for (i = 0, s = (char *)portcntr6120names; s; i++) + s = strchr(s + 1, '\n'); + dd->cspec->nportcntrs = i - 1; + dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1; + dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->cspec->portcntrs) + qib_dev_err(dd, "Failed allocation for portcounters\n"); +} + +static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep, + u64 **cntrp) +{ + u32 ret; + + if (namep) { + ret = dd->cspec->cntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + else + *namep = (char *)cntr6120names; + } else { + u64 *cntr = dd->cspec->cntrs; + int i; + + ret = dd->cspec->ncntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + if (pos >= ret) { + ret = 0; /* final read after getting everything */ + goto done; + } + *cntrp = cntr; + for (i = 0; i < dd->cspec->ncntrs; i++) + *cntr++ = read_6120_creg32(dd, cntr6120indices[i]); + } +done: + return ret; +} + +static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, + char **namep, u64 **cntrp) +{ + u32 ret; + + if (namep) { + ret = dd->cspec->portcntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + else + *namep = (char *)portcntr6120names; + } else { + u64 *cntr = dd->cspec->portcntrs; + struct qib_pportdata *ppd = &dd->pport[port]; + int i; + + ret = dd->cspec->nportcntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + *cntrp = cntr; + for (i = 0; i < dd->cspec->nportcntrs; i++) { + if (portcntr6120indices[i] & _PORT_VIRT_FLAG) + *cntr++ = qib_portcntr_6120(ppd, + portcntr6120indices[i] & + ~_PORT_VIRT_FLAG); + else + *cntr++ = read_6120_creg32(dd, + portcntr6120indices[i]); + } + } +done: + return ret; +} + +static void qib_chk_6120_errormask(struct qib_devdata *dd) +{ + static u32 fixed; + u32 ctrl; + unsigned long errormask; + unsigned long hwerrs; + + if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED)) + return; + + errormask = qib_read_kreg64(dd, kr_errmask); + + if (errormask == dd->cspec->errormask) + return; + fixed++; + + hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); + ctrl = qib_read_kreg32(dd, kr_control); + + qib_write_kreg(dd, kr_errmask, + dd->cspec->errormask); + + if ((hwerrs & dd->cspec->hwerrmask) || + (ctrl & QLOGIC_IB_C_FREEZEMODE)) { + qib_write_kreg(dd, kr_hwerrclear, 0ULL); + qib_write_kreg(dd, kr_errclear, 0ULL); + /* force re-interrupt of pending events, just in case */ + qib_write_kreg(dd, kr_intclear, 0ULL); + qib_devinfo(dd->pcidev, + "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n", + fixed, errormask, (unsigned long)dd->cspec->errormask, + ctrl, hwerrs); + } +} + +/** + * qib_get_faststats - get word counters from chip before they overflow + * @opaque - contains a pointer to the qlogic_ib device qib_devdata + * + * This needs more work; in particular, decision on whether we really + * need traffic_wds done the way it is + * called from add_timer + */ +static void qib_get_6120_faststats(unsigned long opaque) +{ + struct qib_devdata *dd = (struct qib_devdata *) opaque; + struct qib_pportdata *ppd = dd->pport; + unsigned long flags; + u64 traffic_wds; + + /* + * don't access the chip while running diags, or memory diags can + * fail + */ + if (!(dd->flags & QIB_INITTED) || dd->diag_client) + /* but re-arm the timer, for diags case; won't hurt other */ + goto done; + + /* + * We now try to maintain an activity timer, based on traffic + * exceeding a threshold, so we need to check the word-counts + * even if they are 64-bit. + */ + traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) + + qib_portcntr_6120(ppd, cr_wordrcv); + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; + if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) + atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); + + qib_chk_6120_errormask(dd); +done: + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); +} + +/* no interrupt fallback for these chips */ +static int qib_6120_nointr_fallback(struct qib_devdata *dd) +{ + return 0; +} + +/* + * reset the XGXS (between serdes and IBC). Slightly less intrusive + * than resetting the IBC or external link state, and useful in some + * cases to cause some retraining. To do this right, we reset IBC + * as well. + */ +static void qib_6120_xgxs_reset(struct qib_pportdata *ppd) +{ + u64 val, prev_val; + struct qib_devdata *dd = ppd->dd; + + prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); + val = prev_val | QLOGIC_IB_XGXS_RESET; + prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ + qib_write_kreg(dd, kr_control, + dd->control & ~QLOGIC_IB_C_LINKENABLE); + qib_write_kreg(dd, kr_xgxs_cfg, val); + qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_xgxs_cfg, prev_val); + qib_write_kreg(dd, kr_control, dd->control); +} + +static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which) +{ + int ret; + + switch (which) { + case QIB_IB_CFG_LWID: + ret = ppd->link_width_active; + break; + + case QIB_IB_CFG_SPD: + ret = ppd->link_speed_active; + break; + + case QIB_IB_CFG_LWID_ENB: + ret = ppd->link_width_enabled; + break; + + case QIB_IB_CFG_SPD_ENB: + ret = ppd->link_speed_enabled; + break; + + case QIB_IB_CFG_OP_VLS: + ret = ppd->vls_operational; + break; + + case QIB_IB_CFG_VL_HIGH_CAP: + ret = 0; + break; + + case QIB_IB_CFG_VL_LOW_CAP: + ret = 0; + break; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl, + OverrunThreshold); + break; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl, + PhyerrThreshold); + break; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + ret = (ppd->dd->cspec->ibcctrl & + SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? + IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; + break; + + case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ + ret = 0; /* no heartbeat on this chip */ + break; + + case QIB_IB_CFG_PMA_TICKS: + ret = 250; /* 1 usec. */ + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +/* + * We assume range checking is already done, if needed. + */ +static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) +{ + struct qib_devdata *dd = ppd->dd; + int ret = 0; + u64 val64; + u16 lcmd, licmd; + + switch (which) { + case QIB_IB_CFG_LWID_ENB: + ppd->link_width_enabled = val; + break; + + case QIB_IB_CFG_SPD_ENB: + ppd->link_speed_enabled = val; + break; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl, + OverrunThreshold); + if (val64 != val) { + dd->cspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, OverrunThreshold); + dd->cspec->ibcctrl |= (u64) val << + SYM_LSB(IBCCtrl, OverrunThreshold); + qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + break; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl, + PhyerrThreshold); + if (val64 != val) { + dd->cspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, PhyerrThreshold); + dd->cspec->ibcctrl |= (u64) val << + SYM_LSB(IBCCtrl, PhyerrThreshold); + qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + break; + + case QIB_IB_CFG_PKEYS: /* update pkeys */ + val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | + ((u64) ppd->pkeys[2] << 32) | + ((u64) ppd->pkeys[3] << 48); + qib_write_kreg(dd, kr_partitionkey, val64); + break; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + if (val == IB_LINKINITCMD_POLL) + dd->cspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, LinkDownDefaultState); + else /* SLEEP */ + dd->cspec->ibcctrl |= + SYM_MASK(IBCCtrl, LinkDownDefaultState); + qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + break; + + case QIB_IB_CFG_MTU: /* update the MTU in IBC */ + /* + * Update our housekeeping variables, and set IBC max + * size, same as init code; max IBC is max we allow in + * buffer, less the qword pbc, plus 1 for ICRC, in dwords + * Set even if it's unchanged, print debug message only + * on changes. + */ + val = (ppd->ibmaxlen >> 2) + 1; + dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); + dd->cspec->ibcctrl |= (u64)val << + SYM_LSB(IBCCtrl, MaxPktLen); + qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + break; + + case QIB_IB_CFG_LSTATE: /* set the IB link state */ + switch (val & 0xffff0000) { + case IB_LINKCMD_DOWN: + lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; + if (!dd->cspec->ibdeltainprog) { + dd->cspec->ibdeltainprog = 1; + dd->cspec->ibsymsnap = + read_6120_creg32(dd, cr_ibsymbolerr); + dd->cspec->iblnkerrsnap = + read_6120_creg32(dd, cr_iblinkerrrecov); + } + break; + + case IB_LINKCMD_ARMED: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; + break; + + case IB_LINKCMD_ACTIVE: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); + goto bail; + } + switch (val & 0xffff) { + case IB_LINKINITCMD_NOP: + licmd = 0; + break; + + case IB_LINKINITCMD_POLL: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; + break; + + case IB_LINKINITCMD_SLEEP: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; + break; + + case IB_LINKINITCMD_DISABLE: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", + val & 0xffff); + goto bail; + } + qib_set_ib_6120_lstate(ppd, lcmd, licmd); + goto bail; + + case QIB_IB_CFG_HRTBT: + ret = -EINVAL; + break; + + default: + ret = -EINVAL; + } +bail: + return ret; +} + +static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) +{ + int ret = 0; + if (!strncmp(what, "ibc", 3)) { + ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); + qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", + ppd->dd->unit, ppd->port); + } else if (!strncmp(what, "off", 3)) { + ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); + qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " + "(normal)\n", ppd->dd->unit, ppd->port); + } else + ret = -EINVAL; + if (!ret) { + qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl); + qib_write_kreg(ppd->dd, kr_scratch, 0); + } + return ret; +} + +static void pma_6120_timer(unsigned long data) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)data; + struct qib_chip_specific *cs = ppd->dd->cspec; + struct qib_ibport *ibp = &ppd->ibport_data; + unsigned long flags; + + spin_lock_irqsave(&ibp->lock, flags); + if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { + cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; + qib_snapshot_counters(ppd, &cs->sword, &cs->rword, + &cs->spkts, &cs->rpkts, &cs->xmit_wait); + mod_timer(&cs->pma_timer, + jiffies + usecs_to_jiffies(ibp->pma_sample_interval)); + } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { + u64 ta, tb, tc, td, te; + + cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; + qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te); + + cs->sword = ta - cs->sword; + cs->rword = tb - cs->rword; + cs->spkts = tc - cs->spkts; + cs->rpkts = td - cs->rpkts; + cs->xmit_wait = te - cs->xmit_wait; + } + spin_unlock_irqrestore(&ibp->lock, flags); +} + +/* + * Note that the caller has the ibp->lock held. + */ +static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv, + u32 start) +{ + struct qib_chip_specific *cs = ppd->dd->cspec; + + if (start && intv) { + cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; + mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start)); + } else if (intv) { + cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; + qib_snapshot_counters(ppd, &cs->sword, &cs->rword, + &cs->spkts, &cs->rpkts, &cs->xmit_wait); + mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv)); + } else { + cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; + cs->sword = 0; + cs->rword = 0; + cs->spkts = 0; + cs->rpkts = 0; + cs->xmit_wait = 0; + } +} + +static u32 qib_6120_iblink_state(u64 ibcs) +{ + u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); + + switch (state) { + case IB_6120_L_STATE_INIT: + state = IB_PORT_INIT; + break; + case IB_6120_L_STATE_ARM: + state = IB_PORT_ARMED; + break; + case IB_6120_L_STATE_ACTIVE: + /* fall through */ + case IB_6120_L_STATE_ACT_DEFER: + state = IB_PORT_ACTIVE; + break; + default: /* fall through */ + case IB_6120_L_STATE_DOWN: + state = IB_PORT_DOWN; + break; + } + return state; +} + +/* returns the IBTA port state, rather than the IBC link training state */ +static u8 qib_6120_phys_portstate(u64 ibcs) +{ + u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); + return qib_6120_physportstate[state]; +} + +static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) +{ + unsigned long flags; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + + if (ibup) { + if (ppd->dd->cspec->ibdeltainprog) { + ppd->dd->cspec->ibdeltainprog = 0; + ppd->dd->cspec->ibsymdelta += + read_6120_creg32(ppd->dd, cr_ibsymbolerr) - + ppd->dd->cspec->ibsymsnap; + ppd->dd->cspec->iblnkerrdelta += + read_6120_creg32(ppd->dd, cr_iblinkerrrecov) - + ppd->dd->cspec->iblnkerrsnap; + } + qib_hol_init(ppd); + } else { + ppd->dd->cspec->lli_counter = 0; + if (!ppd->dd->cspec->ibdeltainprog) { + ppd->dd->cspec->ibdeltainprog = 1; + ppd->dd->cspec->ibsymsnap = + read_6120_creg32(ppd->dd, cr_ibsymbolerr); + ppd->dd->cspec->iblnkerrsnap = + read_6120_creg32(ppd->dd, cr_iblinkerrrecov); + } + qib_hol_down(ppd); + } + + qib_6120_setup_setextled(ppd, ibup); + + return 0; +} + +/* Does read/modify/write to appropriate registers to + * set output and direction bits selected by mask. + * these are in their canonical postions (e.g. lsb of + * dir will end up in D48 of extctrl on existing chips). + * returns contents of GP Inputs. + */ +static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) +{ + u64 read_val, new_out; + unsigned long flags; + + if (mask) { + /* some bits being written, lock access to GPIO */ + dir &= mask; + out &= mask; + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); + dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); + new_out = (dd->cspec->gpio_out & ~mask) | out; + + qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); + qib_write_kreg(dd, kr_gpio_out, new_out); + dd->cspec->gpio_out = new_out; + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + } + /* + * It is unlikely that a read at this time would get valid + * data on a pin whose direction line was set in the same + * call to this function. We include the read here because + * that allows us to potentially combine a change on one pin with + * a read on another, and because the old code did something like + * this. + */ + read_val = qib_read_kreg64(dd, kr_extstatus); + return SYM_FIELD(read_val, EXTStatus, GPIOIn); +} + +/* + * Read fundamental info we need to use the chip. These are + * the registers that describe chip capabilities, and are + * saved in shadow registers. + */ +static void get_6120_chip_params(struct qib_devdata *dd) +{ + u64 val; + u32 piobufs; + int mtu; + + dd->uregbase = qib_read_kreg32(dd, kr_userregbase); + + dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); + dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); + dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); + dd->palign = qib_read_kreg32(dd, kr_palign); + dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); + dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; + + dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); + + val = qib_read_kreg64(dd, kr_sendpiosize); + dd->piosize2k = val & ~0U; + dd->piosize4k = val >> 32; + + mtu = ib_mtu_enum_to_int(qib_ibmtu); + if (mtu == -1) + mtu = QIB_DEFAULT_MTU; + dd->pport->ibmtu = (u32)mtu; + + val = qib_read_kreg64(dd, kr_sendpiobufcnt); + dd->piobcnt2k = val & ~0U; + dd->piobcnt4k = val >> 32; + /* these may be adjusted in init_chip_wc_pat() */ + dd->pio2kbase = (u32 __iomem *) + (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase); + if (dd->piobcnt4k) { + dd->pio4kbase = (u32 __iomem *) + (((char __iomem *) dd->kregbase) + + (dd->piobufbase >> 32)); + /* + * 4K buffers take 2 pages; we use roundup just to be + * paranoid; we calculate it once here, rather than on + * ever buf allocate + */ + dd->align4k = ALIGN(dd->piosize4k, dd->palign); + } + + piobufs = dd->piobcnt4k + dd->piobcnt2k; + + dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / + (sizeof(u64) * BITS_PER_BYTE / 2); +} + +/* + * The chip base addresses in cspec and cpspec have to be set + * after possible init_chip_wc_pat(), rather than in + * get_6120_chip_params(), so split out as separate function + */ +static void set_6120_baseaddrs(struct qib_devdata *dd) +{ + u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); + dd->cspec->cregbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + cregbase); + + dd->egrtidbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + dd->rcvegrbase); +} + +/* + * Write the final few registers that depend on some of the + * init setup. Done late in init, just before bringing up + * the serdes. + */ +static int qib_late_6120_initreg(struct qib_devdata *dd) +{ + int ret = 0; + u64 val; + + qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); + qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); + qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); + qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); + val = qib_read_kreg64(dd, kr_sendpioavailaddr); + if (val != dd->pioavailregs_phys) { + qib_dev_err(dd, "Catastrophic software error, " + "SendPIOAvailAddr written as %lx, " + "read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); + ret = -EINVAL; + } + return ret; +} + +static int init_6120_variables(struct qib_devdata *dd) +{ + int ret = 0; + struct qib_pportdata *ppd; + u32 sbufs; + + ppd = (struct qib_pportdata *)(dd + 1); + dd->pport = ppd; + dd->num_pports = 1; + + dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports); + ppd->cpspec = NULL; /* not used in this chip */ + + spin_lock_init(&dd->cspec->kernel_tid_lock); + spin_lock_init(&dd->cspec->user_tid_lock); + spin_lock_init(&dd->cspec->rcvmod_lock); + spin_lock_init(&dd->cspec->gpio_lock); + + /* we haven't yet set QIB_PRESENT, so use read directly */ + dd->revision = readq(&dd->kregbase[kr_revision]); + + if ((dd->revision & 0xffffffffU) == 0xffffffffU) { + qib_dev_err(dd, "Revision register read failure, " + "giving up initialization\n"); + ret = -ENODEV; + goto bail; + } + dd->flags |= QIB_PRESENT; /* now register routines work */ + + dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, + ChipRevMajor); + dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, + ChipRevMinor); + + get_6120_chip_params(dd); + pe_boardname(dd); /* fill in boardname */ + + /* + * GPIO bits for TWSI data and clock, + * used for serial EEPROM. + */ + dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; + dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; + dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV; + + if (qib_unordered_wc()) + dd->flags |= QIB_PIO_FLUSH_WC; + + /* + * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. + * 2 is Some Misc, 3 is reserved for future. + */ + dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); + + /* Ignore errors in PIO/PBC on systems with unordered write-combining */ + if (qib_unordered_wc()) + dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY; + + dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); + + dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); + + qib_init_pportdata(ppd, dd, 0, 1); + ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; + ppd->link_speed_supported = QIB_IB_SDR; + ppd->link_width_enabled = IB_WIDTH_4X; + ppd->link_speed_enabled = ppd->link_speed_supported; + /* these can't change for this chip, so set once */ + ppd->link_width_active = ppd->link_width_enabled; + ppd->link_speed_active = ppd->link_speed_enabled; + ppd->vls_supported = IB_VL_VL0; + ppd->vls_operational = ppd->vls_supported; + + dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; + dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; + dd->rhf_offset = 0; + + /* we always allocate at least 2048 bytes for eager buffers */ + ret = ib_mtu_enum_to_int(qib_ibmtu); + dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; + + qib_6120_tidtemplate(dd); + + /* + * We can request a receive interrupt for 1 or + * more packets from current offset. For now, we set this + * up for a single packet. + */ + dd->rhdrhead_intr_off = 1ULL << 32; + + /* setup the stats timer; the add_timer is done at end of init */ + init_timer(&dd->stats_timer); + dd->stats_timer.function = qib_get_6120_faststats; + dd->stats_timer.data = (unsigned long) dd; + + init_timer(&dd->cspec->pma_timer); + dd->cspec->pma_timer.function = pma_6120_timer; + dd->cspec->pma_timer.data = (unsigned long) ppd; + + dd->ureg_align = qib_read_kreg32(dd, kr_palign); + + dd->piosize2kmax_dwords = dd->piosize2k >> 2; + qib_6120_config_ctxts(dd); + qib_set_ctxtcnt(dd); + + if (qib_wc_pat) { + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; + } + set_6120_baseaddrs(dd); /* set chip access pointers now */ + + ret = 0; + if (qib_mini_init) + goto bail; + + qib_num_cfg_vls = 1; /* if any 6120's, only one VL */ + + ret = qib_create_ctxts(dd); + init_6120_cntrnames(dd); + + /* use all of 4KB buffers for the kernel, otherwise 16 */ + sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16; + + dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs; + dd->pbufsctxt = dd->lastctxt_piobuf / + (dd->cfgctxts - dd->first_user_ctxt); + + if (ret) + goto bail; +bail: + return ret; +} + +/* + * For this chip, we want to use the same buffer every time + * when we are trying to bring the link up (they are always VL15 + * packets). At that link state the packet should always go out immediately + * (or at least be discarded at the tx interface if the link is down). + * If it doesn't, and the buffer isn't available, that means some other + * sender has gotten ahead of us, and is preventing our packet from going + * out. In that case, we flush all packets, and try again. If that still + * fails, we fail the request, and hope things work the next time around. + * + * We don't need very complicated heuristics on whether the packet had + * time to go out or not, since even at SDR 1X, it goes out in very short + * time periods, covered by the chip reads done here and as part of the + * flush. + */ +static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum) +{ + u32 __iomem *buf; + u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1; + + /* + * always blip to get avail list updated, since it's almost + * always needed, and is fairly cheap. + */ + sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ + buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); + if (buf) + goto done; + + sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | + QIB_SENDCTRL_AVAIL_BLIP); + ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ + qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ + buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); +done: + return buf; +} + +static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc, + u32 *pbufnum) +{ + u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; + struct qib_devdata *dd = ppd->dd; + u32 __iomem *buf; + + if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) && + !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) + buf = get_6120_link_buf(ppd, pbufnum); + else { + + if ((plen + 1) > dd->piosize2kmax_dwords) + first = dd->piobcnt2k; + else + first = 0; + /* try 4k if all 2k busy, so same last for both sizes */ + last = dd->piobcnt2k + dd->piobcnt4k - 1; + buf = qib_getsendbuf_range(dd, pbufnum, first, last); + } + return buf; +} + +static int init_sdma_6120_regs(struct qib_pportdata *ppd) +{ + return -ENODEV; +} + +static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd) +{ + return 0; +} + +static int qib_sdma_6120_busy(struct qib_pportdata *ppd) +{ + return 0; +} + +static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail) +{ +} + +static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) +{ +} + +static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) +{ +} + +/* + * the pbc doesn't need a VL15 indicator, but we need it for link_buf. + * The chip ignores the bit if set. + */ +static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen, + u8 srate, u8 vl) +{ + return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0; +} + +static void qib_6120_initvl15_bufs(struct qib_devdata *dd) +{ +} + +static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd) +{ + rcd->rcvegrcnt = rcd->dd->rcvhdrcnt; + rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt; +} + +static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start, + u32 len, u32 avail, struct qib_ctxtdata *rcd) +{ +} + +static void writescratch(struct qib_devdata *dd, u32 val) +{ + (void) qib_write_kreg(dd, kr_scratch, val); +} + +static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum) +{ + return -ENXIO; +} + +/* Dummy function, as 6120 boards never disable EEPROM Write */ +static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen) +{ + return 1; +} + +/** + * qib_init_iba6120_funcs - set up the chip-specific function pointers + * @pdev: pci_dev of the qlogic_ib device + * @ent: pci_device_id matching this chip + * + * This is global, and is called directly at init to set up the + * chip-specific function pointers for later use. + * + * It also allocates/partially-inits the qib_devdata struct for + * this device. + */ +struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct qib_devdata *dd; + int ret; + + dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + + sizeof(struct qib_chip_specific)); + if (IS_ERR(dd)) + goto bail; + + dd->f_bringup_serdes = qib_6120_bringup_serdes; + dd->f_cleanup = qib_6120_setup_cleanup; + dd->f_clear_tids = qib_6120_clear_tids; + dd->f_free_irq = qib_6120_free_irq; + dd->f_get_base_info = qib_6120_get_base_info; + dd->f_get_msgheader = qib_6120_get_msgheader; + dd->f_getsendbuf = qib_6120_getsendbuf; + dd->f_gpio_mod = gpio_6120_mod; + dd->f_eeprom_wen = qib_6120_eeprom_wen; + dd->f_hdrqempty = qib_6120_hdrqempty; + dd->f_ib_updown = qib_6120_ib_updown; + dd->f_init_ctxt = qib_6120_init_ctxt; + dd->f_initvl15_bufs = qib_6120_initvl15_bufs; + dd->f_intr_fallback = qib_6120_nointr_fallback; + dd->f_late_initreg = qib_late_6120_initreg; + dd->f_setpbc_control = qib_6120_setpbc_control; + dd->f_portcntr = qib_portcntr_6120; + dd->f_put_tid = (dd->minrev >= 2) ? + qib_6120_put_tid_2 : + qib_6120_put_tid; + dd->f_quiet_serdes = qib_6120_quiet_serdes; + dd->f_rcvctrl = rcvctrl_6120_mod; + dd->f_read_cntrs = qib_read_6120cntrs; + dd->f_read_portcntrs = qib_read_6120portcntrs; + dd->f_reset = qib_6120_setup_reset; + dd->f_init_sdma_regs = init_sdma_6120_regs; + dd->f_sdma_busy = qib_sdma_6120_busy; + dd->f_sdma_gethead = qib_sdma_6120_gethead; + dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl; + dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt; + dd->f_sdma_update_tail = qib_sdma_update_6120_tail; + dd->f_sendctrl = sendctrl_6120_mod; + dd->f_set_armlaunch = qib_set_6120_armlaunch; + dd->f_set_cntr_sample = qib_set_cntr_6120_sample; + dd->f_iblink_state = qib_6120_iblink_state; + dd->f_ibphys_portstate = qib_6120_phys_portstate; + dd->f_get_ib_cfg = qib_6120_get_ib_cfg; + dd->f_set_ib_cfg = qib_6120_set_ib_cfg; + dd->f_set_ib_loopback = qib_6120_set_loopback; + dd->f_set_intr_state = qib_6120_set_intr_state; + dd->f_setextled = qib_6120_setup_setextled; + dd->f_txchk_change = qib_6120_txchk_change; + dd->f_update_usrhead = qib_update_6120_usrhead; + dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr; + dd->f_xgxs_reset = qib_6120_xgxs_reset; + dd->f_writescratch = writescratch; + dd->f_tempsense_rd = qib_6120_tempsense_rd; + /* + * Do remaining pcie setup and save pcie values in dd. + * Any error printing is already done by the init code. + * On return, we have the chip mapped and accessible, + * but chip registers are not set up until start of + * init_6120_variables. + */ + ret = qib_pcie_ddinit(dd, pdev, ent); + if (ret < 0) + goto bail_free; + + /* initialize chip-specific variables */ + ret = init_6120_variables(dd); + if (ret) + goto bail_cleanup; + + if (qib_mini_init) + goto bail; + + if (qib_pcie_params(dd, 8, NULL, NULL)) + qib_dev_err(dd, "Failed to setup PCIe or interrupts; " + "continuing anyway\n"); + dd->cspec->irq = pdev->irq; /* save IRQ */ + + /* clear diagctrl register, in case diags were running and crashed */ + qib_write_kreg(dd, kr_hwdiagctrl, 0); + + if (qib_read_kreg64(dd, kr_hwerrstatus) & + QLOGIC_IB_HWE_SERDESPLLFAILED) + qib_write_kreg(dd, kr_hwerrclear, + QLOGIC_IB_HWE_SERDESPLLFAILED); + + /* setup interrupt handler (interrupt type handled above) */ + qib_setup_6120_interrupt(dd); + /* Note that qpn_mask is set by qib_6120_config_ctxts() first */ + qib_6120_init_hwerrors(dd); + + goto bail; + +bail_cleanup: + qib_pcie_ddcleanup(dd); +bail_free: + qib_free_devdata(dd); + dd = ERR_PTR(ret); +bail: + return dd; +} diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c new file mode 100644 index 000000000000..6fd8d74e7392 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -0,0 +1,4618 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* + * This file contains all of the code that is specific to the + * QLogic_IB 7220 chip (except that specific to the SerDes) + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <rdma/ib_verbs.h> + +#include "qib.h" +#include "qib_7220.h" + +static void qib_setup_7220_setextled(struct qib_pportdata *, u32); +static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t); +static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op); +static u32 qib_7220_iblink_state(u64); +static u8 qib_7220_phys_portstate(u64); +static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16); +static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16); + +/* + * This file contains almost all the chip-specific register information and + * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the + * exception of SerDes support, which in in qib_sd7220.c. + */ + +/* Below uses machine-generated qib_chipnum_regs.h file */ +#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) + +/* Use defines to tie machine-generated names to lower-case names */ +#define kr_control KREG_IDX(Control) +#define kr_counterregbase KREG_IDX(CntrRegBase) +#define kr_errclear KREG_IDX(ErrClear) +#define kr_errmask KREG_IDX(ErrMask) +#define kr_errstatus KREG_IDX(ErrStatus) +#define kr_extctrl KREG_IDX(EXTCtrl) +#define kr_extstatus KREG_IDX(EXTStatus) +#define kr_gpio_clear KREG_IDX(GPIOClear) +#define kr_gpio_mask KREG_IDX(GPIOMask) +#define kr_gpio_out KREG_IDX(GPIOOut) +#define kr_gpio_status KREG_IDX(GPIOStatus) +#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID) +#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) +#define kr_hwerrclear KREG_IDX(HwErrClear) +#define kr_hwerrmask KREG_IDX(HwErrMask) +#define kr_hwerrstatus KREG_IDX(HwErrStatus) +#define kr_ibcctrl KREG_IDX(IBCCtrl) +#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl) +#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus) +#define kr_ibcstatus KREG_IDX(IBCStatus) +#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) +#define kr_intclear KREG_IDX(IntClear) +#define kr_intmask KREG_IDX(IntMask) +#define kr_intstatus KREG_IDX(IntStatus) +#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl) +#define kr_palign KREG_IDX(PageAlign) +#define kr_partitionkey KREG_IDX(RcvPartitionKey) +#define kr_portcnt KREG_IDX(PortCnt) +#define kr_rcvbthqp KREG_IDX(RcvBTHQP) +#define kr_rcvctrl KREG_IDX(RcvCtrl) +#define kr_rcvegrbase KREG_IDX(RcvEgrBase) +#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) +#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) +#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) +#define kr_rcvhdrsize KREG_IDX(RcvHdrSize) +#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt) +#define kr_rcvtidbase KREG_IDX(RcvTIDBase) +#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) +#define kr_revision KREG_IDX(Revision) +#define kr_scratch KREG_IDX(Scratch) +#define kr_sendbuffererror KREG_IDX(SendBufErr0) +#define kr_sendctrl KREG_IDX(SendCtrl) +#define kr_senddmabase KREG_IDX(SendDmaBase) +#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0) +#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1) +#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2) +#define kr_senddmahead KREG_IDX(SendDmaHead) +#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr) +#define kr_senddmalengen KREG_IDX(SendDmaLenGen) +#define kr_senddmastatus KREG_IDX(SendDmaStatus) +#define kr_senddmatail KREG_IDX(SendDmaTail) +#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) +#define kr_sendpiobufbase KREG_IDX(SendBufBase) +#define kr_sendpiobufcnt KREG_IDX(SendBufCnt) +#define kr_sendpiosize KREG_IDX(SendBufSize) +#define kr_sendregbase KREG_IDX(SendRegBase) +#define kr_userregbase KREG_IDX(UserRegBase) +#define kr_xgxs_cfg KREG_IDX(XGXSCfg) + +/* These must only be written via qib_write_kreg_ctxt() */ +#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) +#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) + + +#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \ + QIB_7220_LBIntCnt_OFFS) / sizeof(u64)) + +#define cr_badformat CREG_IDX(RxVersionErrCnt) +#define cr_erricrc CREG_IDX(RxICRCErrCnt) +#define cr_errlink CREG_IDX(RxLinkMalformCnt) +#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) +#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) +#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt) +#define cr_err_rlen CREG_IDX(RxLenErrCnt) +#define cr_errslen CREG_IDX(TxLenErrCnt) +#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) +#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) +#define cr_errvcrc CREG_IDX(RxVCRCErrCnt) +#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) +#define cr_lbint CREG_IDX(LBIntCnt) +#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) +#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) +#define cr_lbflowstall CREG_IDX(LBFlowStallCnt) +#define cr_pktrcv CREG_IDX(RxDataPktCnt) +#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) +#define cr_pktsend CREG_IDX(TxDataPktCnt) +#define cr_pktsendflow CREG_IDX(TxFlowPktCnt) +#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) +#define cr_rcvebp CREG_IDX(RxEBPCnt) +#define cr_rcvovfl CREG_IDX(RxBufOvflCnt) +#define cr_senddropped CREG_IDX(TxDroppedPktCnt) +#define cr_sendstall CREG_IDX(TxFlowStallCnt) +#define cr_sendunderrun CREG_IDX(TxUnderrunCnt) +#define cr_wordrcv CREG_IDX(RxDwordCnt) +#define cr_wordsend CREG_IDX(TxDwordCnt) +#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) +#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) +#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) +#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) +#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) +#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) +#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) +#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) +#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) +#define cr_rxvlerr CREG_IDX(RxVlErrCnt) +#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt) +#define cr_psstat CREG_IDX(PSStat) +#define cr_psstart CREG_IDX(PSStart) +#define cr_psinterval CREG_IDX(PSInterval) +#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount) +#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount) +#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount) +#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount) +#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount) +#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt) +#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt) + +#define SYM_RMASK(regname, fldname) ((u64) \ + QIB_7220_##regname##_##fldname##_RMASK) +#define SYM_MASK(regname, fldname) ((u64) \ + QIB_7220_##regname##_##fldname##_RMASK << \ + QIB_7220_##regname##_##fldname##_LSB) +#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB) +#define SYM_FIELD(value, regname, fldname) ((u64) \ + (((value) >> SYM_LSB(regname, fldname)) & \ + SYM_RMASK(regname, fldname))) +#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) +#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) + +/* ibcctrl bits */ +#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 +/* cycle through TS1/TS2 till OK */ +#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 +/* wait for TS1, then go on */ +#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 +#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 + +#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ +#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ +#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ + +#define BLOB_7220_IBCHG 0x81 + +/* + * We could have a single register get/put routine, that takes a group type, + * but this is somewhat clearer and cleaner. It also gives us some error + * checking. 64 bit register reads should always work, but are inefficient + * on opteron (the northbridge always generates 2 separate HT 32 bit reads), + * so we use kreg32 wherever possible. User register and counter register + * reads are always 32 bit reads, so only one form of those routines. + */ + +/** + * qib_read_ureg32 - read 32-bit virtualized per-context register + * @dd: device + * @regno: register number + * @ctxt: context number + * + * Return the contents of a register that is virtualized to be per context. + * Returns -1 on errors (not distinguishable from valid contents at + * runtime; we may add a separate error variable at some point). + */ +static inline u32 qib_read_ureg32(const struct qib_devdata *dd, + enum qib_ureg regno, int ctxt) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return 0; + + if (dd->userbase) + return readl(regno + (u64 __iomem *) + ((char __iomem *)dd->userbase + + dd->ureg_align * ctxt)); + else + return readl(regno + (u64 __iomem *) + (dd->uregbase + + (char __iomem *)dd->kregbase + + dd->ureg_align * ctxt)); +} + +/** + * qib_write_ureg - write 32-bit virtualized per-context register + * @dd: device + * @regno: register number + * @value: value + * @ctxt: context + * + * Write the contents of a register that is virtualized to be per context. + */ +static inline void qib_write_ureg(const struct qib_devdata *dd, + enum qib_ureg regno, u64 value, int ctxt) +{ + u64 __iomem *ubase; + + if (dd->userbase) + ubase = (u64 __iomem *) + ((char __iomem *) dd->userbase + + dd->ureg_align * ctxt); + else + ubase = (u64 __iomem *) + (dd->uregbase + + (char __iomem *) dd->kregbase + + dd->ureg_align * ctxt); + + if (dd->kregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &ubase[regno]); +} + +/** + * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register + * @dd: the qlogic_ib device + * @regno: the register number to write + * @ctxt: the context containing the register + * @value: the value to write + */ +static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, + const u16 regno, unsigned ctxt, + u64 value) +{ + qib_write_kreg(dd, regno + ctxt, value); +} + +static inline void write_7220_creg(const struct qib_devdata *dd, + u16 regno, u64 value) +{ + if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &dd->cspec->cregbase[regno]); +} + +static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readq(&dd->cspec->cregbase[regno]); +} + +static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readl(&dd->cspec->cregbase[regno]); +} + +/* kr_revision bits */ +#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1) +#define QLOGIC_IB_R_EMULATORREV_SHIFT 40 + +/* kr_control bits */ +#define QLOGIC_IB_C_RESET (1U << 7) + +/* kr_intstatus, kr_intclear, kr_intmask bits */ +#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1) +#define QLOGIC_IB_I_RCVURG_SHIFT 32 +#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1) +#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0 +#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27) + +#define QLOGIC_IB_C_FREEZEMODE 0x00000002 +#define QLOGIC_IB_C_LINKENABLE 0x00000004 + +#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL +#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL +#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL +#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL +#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL +#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL + +/* variables for sanity checking interrupt and errors */ +#define QLOGIC_IB_I_BITSEXTANT \ + (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \ + (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ + (QLOGIC_IB_I_RCVAVAIL_MASK << \ + QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ + QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ + QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \ + QLOGIC_IB_I_SERDESTRIMDONE) + +#define IB_HWE_BITSEXTANT \ + (HWE_MASK(RXEMemParityErr) | \ + HWE_MASK(TXEMemParityErr) | \ + (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ + QLOGIC_IB_HWE_PCIE1PLLFAILED | \ + QLOGIC_IB_HWE_PCIE0PLLFAILED | \ + QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ + QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ + QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ + QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ + QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ + HWE_MASK(PowerOnBISTFailed) | \ + QLOGIC_IB_HWE_COREPLL_FBSLIP | \ + QLOGIC_IB_HWE_COREPLL_RFSLIP | \ + QLOGIC_IB_HWE_SERDESPLLFAILED | \ + HWE_MASK(IBCBusToSPCParityErr) | \ + HWE_MASK(IBCBusFromSPCParityErr) | \ + QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \ + QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \ + QLOGIC_IB_HWE_SDMAMEMREADERR | \ + QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \ + QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \ + QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \ + QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \ + QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \ + QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \ + QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \ + QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \ + QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR) + +#define IB_E_BITSEXTANT \ + (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ + ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ + ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ + ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ + ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ + ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ + ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ + ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ + ERR_MASK(SendSpecialTriggerErr) | \ + ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \ + ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \ + ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(SendPioArmLaunchErr) | \ + ERR_MASK(SendUnexpectedPktNumErr) | \ + ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \ + ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \ + ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ + ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ + ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ + ERR_MASK(SDmaUnexpDataErr) | \ + ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \ + ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \ + ERR_MASK(SDmaDescAddrMisalignErr) | \ + ERR_MASK(InvalidEEPCmd)) + +/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ +#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL +#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 +#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL +#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL +#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL +#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL +#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL +#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL +#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL +#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL +/* specific to this chip */ +#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL +#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL +#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL +#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL +#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL +#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL +#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL +#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL +#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL +#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL +#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL +#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL + +#define IBA7220_IBCC_LINKCMD_SHIFT 19 + +/* kr_ibcddrctrl bits */ +#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL +#define IBA7220_IBC_DLIDLMC_SHIFT 32 + +#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \ + SYM_RMASK(IBCDDRCtrl, HRTBT_ENB)) +#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB) + +#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) +#define IBA7220_IBC_LREV_MASK 1 +#define IBA7220_IBC_LREV_SHIFT 8 +#define IBA7220_IBC_RXPOL_MASK 1 +#define IBA7220_IBC_RXPOL_SHIFT 7 +#define IBA7220_IBC_WIDTH_SHIFT 5 +#define IBA7220_IBC_WIDTH_MASK 0x3 +#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT) +#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT) +#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT) +#define IBA7220_IBC_SPEED_AUTONEG (1 << 1) +#define IBA7220_IBC_SPEED_SDR (1 << 2) +#define IBA7220_IBC_SPEED_DDR (1 << 3) +#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1) +#define IBA7220_IBC_IBTA_1_2_MASK (1) + +/* kr_ibcddrstatus */ +/* link latency shift is 0, don't bother defining */ +#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff + +/* kr_extstatus bits */ +#define QLOGIC_IB_EXTS_FREQSEL 0x2 +#define QLOGIC_IB_EXTS_SERDESSEL 0x4 +#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 +#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000 + +/* kr_xgxsconfig bits */ +#define QLOGIC_IB_XGXS_RESET 0x5ULL +#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63) + +/* kr_rcvpktledcnt */ +#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ +#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ + +#define _QIB_GPIO_SDA_NUM 1 +#define _QIB_GPIO_SCL_NUM 0 +#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */ +#define QIB_TWSI_TEMP_DEV 0x98 + +/* HW counter clock is at 4nsec */ +#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000 + +#define IBA7220_R_INTRAVAIL_SHIFT 17 +#define IBA7220_R_PKEY_DIS_SHIFT 34 +#define IBA7220_R_TAILUPD_SHIFT 35 +#define IBA7220_R_CTXTCFG_SHIFT 36 + +#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ + +/* + * the size bits give us 2^N, in KB units. 0 marks as invalid, + * and 7 is reserved. We currently use only 2KB and 4KB + */ +#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ +#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */ +#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */ +#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ +#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ +#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ + +#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ + +/* packet rate matching delay multiplier */ +static u8 rate_to_delay[2][2] = { + /* 1x, 4x */ + { 8, 2 }, /* SDR */ + { 4, 1 } /* DDR */ +}; + +static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { + [IB_RATE_2_5_GBPS] = 8, + [IB_RATE_5_GBPS] = 4, + [IB_RATE_10_GBPS] = 2, + [IB_RATE_20_GBPS] = 1 +}; + +#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive) +#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive) + +/* link training states, from IBC */ +#define IB_7220_LT_STATE_DISABLED 0x00 +#define IB_7220_LT_STATE_LINKUP 0x01 +#define IB_7220_LT_STATE_POLLACTIVE 0x02 +#define IB_7220_LT_STATE_POLLQUIET 0x03 +#define IB_7220_LT_STATE_SLEEPDELAY 0x04 +#define IB_7220_LT_STATE_SLEEPQUIET 0x05 +#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08 +#define IB_7220_LT_STATE_CFGRCVFCFG 0x09 +#define IB_7220_LT_STATE_CFGWAITRMT 0x0a +#define IB_7220_LT_STATE_CFGIDLE 0x0b +#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c +#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e +#define IB_7220_LT_STATE_RECOVERIDLE 0x0f + +/* link state machine states from IBC */ +#define IB_7220_L_STATE_DOWN 0x0 +#define IB_7220_L_STATE_INIT 0x1 +#define IB_7220_L_STATE_ARM 0x2 +#define IB_7220_L_STATE_ACTIVE 0x3 +#define IB_7220_L_STATE_ACT_DEFER 0x4 + +static const u8 qib_7220_physportstate[0x20] = { + [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, + [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, + [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, + [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, + [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, + [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, + [IB_7220_LT_STATE_CFGDEBOUNCE] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7220_LT_STATE_CFGRCVFCFG] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7220_LT_STATE_CFGWAITRMT] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7220_LT_STATE_RECOVERRETRAIN] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_7220_LT_STATE_RECOVERWAITRMT] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_7220_LT_STATE_RECOVERIDLE] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN +}; + +int qib_special_trigger; +module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO); +MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch"); + +#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) +#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) + +#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ + (1ULL << (SYM_LSB(regname, fldname) + (bit)))) + +#define TXEMEMPARITYERR_PIOBUF \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) +#define TXEMEMPARITYERR_PIOPBC \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) +#define TXEMEMPARITYERR_PIOLAUNCHFIFO \ + SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) + +#define RXEMEMPARITYERR_RCVBUF \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) +#define RXEMEMPARITYERR_LOOKUPQ \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) +#define RXEMEMPARITYERR_EXPTID \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) +#define RXEMEMPARITYERR_EAGERTID \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) +#define RXEMEMPARITYERR_FLAGBUF \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) +#define RXEMEMPARITYERR_DATAINFO \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) +#define RXEMEMPARITYERR_HDRINFO \ + SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) + +/* 7220 specific hardware errors... */ +static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = { + /* generic hardware errors */ + QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), + QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), + + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, + "TXE PIOBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, + "TXE PIOPBC Memory Parity"), + QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, + "TXE PIOLAUNCHFIFO Memory Parity"), + + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, + "RXE RCVBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, + "RXE LOOKUPQ Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, + "RXE EAGERTID Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, + "RXE EXPTID Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, + "RXE FLAGBUF Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, + "RXE DATAINFO Memory Parity"), + QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, + "RXE HDRINFO Memory Parity"), + + /* chip-specific hardware errors */ + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, + "PCIe Poisoned TLP"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, + "PCIe completion timeout"), + /* + * In practice, it's unlikely wthat we'll see PCIe PLL, or bus + * parity or memory parity error failures, because most likely we + * won't be able to talk to the core of the chip. Nonetheless, we + * might see them, if they are in parts of the PCIe core that aren't + * essential. + */ + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, + "PCIePLL1"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, + "PCIePLL0"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, + "PCIe XTLH core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, + "PCIe ADM TX core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, + "PCIe ADM RX core parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, + "SerDes PLL"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR, + "PCIe cpl header queue"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR, + "PCIe cpl data queue"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR, + "Send DMA memory read"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED, + "uC PLL clock not locked"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT, + "PCIe serdes Q0 no clock"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT, + "PCIe serdes Q1 no clock"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT, + "PCIe serdes Q2 no clock"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT, + "PCIe serdes Q3 no clock"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR, + "DDS RXEQ memory parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR, + "IB uC memory parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR, + "PCIe uC oct0 memory parity"), + QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR, + "PCIe uC oct1 memory parity"), +}; + +#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID) + +#define QLOGIC_IB_E_PKTERRS (\ + ERR_MASK(SendPktLenErr) | \ + ERR_MASK(SendDroppedDataPktErr) | \ + ERR_MASK(RcvVCRCErr) | \ + ERR_MASK(RcvICRCErr) | \ + ERR_MASK(RcvShortPktLenErr) | \ + ERR_MASK(RcvEBPErr)) + +/* Convenience for decoding Send DMA errors */ +#define QLOGIC_IB_E_SDMAERRS ( \ + ERR_MASK(SDmaGenMismatchErr) | \ + ERR_MASK(SDmaOutOfBoundErr) | \ + ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ + ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ + ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ + ERR_MASK(SDmaUnexpDataErr) | \ + ERR_MASK(SDmaDescAddrMisalignErr) | \ + ERR_MASK(SDmaDisabledErr) | \ + ERR_MASK(SendBufMisuseErr)) + +/* These are all rcv-related errors which we want to count for stats */ +#define E_SUM_PKTERRS \ + (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ + ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ + ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ + ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ + ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) + +/* These are all send-related errors which we want to count for stats */ +#define E_SUM_ERRS \ + (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \ + ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ + ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ + ERR_MASK(InvalidAddrErr)) + +/* + * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore + * errors not related to freeze and cancelling buffers. Can't ignore + * armlaunch because could get more while still cleaning up, and need + * to cancel those as they happen. + */ +#define E_SPKT_ERRS_IGNORE \ + (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ + ERR_MASK(SendPktLenErr)) + +/* + * these are errors that can occur when the link changes state while + * a packet is being sent or received. This doesn't cover things + * like EBP or VCRC that can be the result of a sending having the + * link change state, so we receive a "known bad" packet. + */ +#define E_SUM_LINK_PKTERRS \ + (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ + ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ + ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ + ERR_MASK(RcvUnexpectedCharErr)) + +static void autoneg_7220_work(struct work_struct *); +static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *); + +/* + * Called when we might have an error that is specific to a particular + * PIO buffer, and may need to cancel that buffer, so it can be re-used. + * because we don't need to force the update of pioavail. + */ +static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd) +{ + unsigned long sbuf[3]; + struct qib_devdata *dd = ppd->dd; + + /* + * It's possible that sendbuffererror could have bits set; might + * have already done this as a result of hardware error handling. + */ + /* read these before writing errorclear */ + sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); + sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); + sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); + + if (sbuf[0] || sbuf[1] || sbuf[2]) + qib_disarm_piobufs_set(dd, sbuf, + dd->piobcnt2k + dd->piobcnt4k); +} + +static void qib_7220_txe_recover(struct qib_devdata *dd) +{ + qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n"); + qib_disarm_7220_senderrbufs(dd->pport); +} + +/* + * This is called with interrupts disabled and sdma_lock held. + */ +static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) +{ + struct qib_devdata *dd = ppd->dd; + u64 set_sendctrl = 0; + u64 clr_sendctrl = 0; + + if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) + set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); + else + clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); + + if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) + set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); + else + clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); + + if (op & QIB_SDMA_SENDCTRL_OP_HALT) + set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); + else + clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); + + spin_lock(&dd->sendctrl_lock); + + dd->sendctrl |= set_sendctrl; + dd->sendctrl &= ~clr_sendctrl; + + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + + spin_unlock(&dd->sendctrl_lock); +} + +static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd, + u64 err, char *buf, size_t blen) +{ + static const struct { + u64 err; + const char *msg; + } errs[] = { + { ERR_MASK(SDmaGenMismatchErr), + "SDmaGenMismatch" }, + { ERR_MASK(SDmaOutOfBoundErr), + "SDmaOutOfBound" }, + { ERR_MASK(SDmaTailOutOfBoundErr), + "SDmaTailOutOfBound" }, + { ERR_MASK(SDmaBaseErr), + "SDmaBase" }, + { ERR_MASK(SDma1stDescErr), + "SDma1stDesc" }, + { ERR_MASK(SDmaRpyTagErr), + "SDmaRpyTag" }, + { ERR_MASK(SDmaDwEnErr), + "SDmaDwEn" }, + { ERR_MASK(SDmaMissingDwErr), + "SDmaMissingDw" }, + { ERR_MASK(SDmaUnexpDataErr), + "SDmaUnexpData" }, + { ERR_MASK(SDmaDescAddrMisalignErr), + "SDmaDescAddrMisalign" }, + { ERR_MASK(SendBufMisuseErr), + "SendBufMisuse" }, + { ERR_MASK(SDmaDisabledErr), + "SDmaDisabled" }, + }; + int i; + size_t bidx = 0; + + for (i = 0; i < ARRAY_SIZE(errs); i++) { + if (err & errs[i].err) + bidx += scnprintf(buf + bidx, blen - bidx, + "%s ", errs[i].msg); + } +} + +/* + * This is called as part of link down clean up so disarm and flush + * all send buffers so that SMP packets can be sent. + */ +static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd) +{ + /* This will trigger the Abort interrupt */ + sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | + QIB_SENDCTRL_AVAIL_BLIP); + ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ +} + +static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd) +{ + /* + * Set SendDmaLenGen and clear and set + * the MSB of the generation count to enable generation checking + * and load the internal generation counter. + */ + qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt); + qib_write_kreg(ppd->dd, kr_senddmalengen, + ppd->sdma_descq_cnt | + (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB)); +} + +static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd) +{ + qib_sdma_7220_setlengen(ppd); + qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ + ppd->sdma_head_dma[0] = 0; +} + +#define DISABLES_SDMA ( \ + ERR_MASK(SDmaDisabledErr) | \ + ERR_MASK(SDmaBaseErr) | \ + ERR_MASK(SDmaTailOutOfBoundErr) | \ + ERR_MASK(SDmaOutOfBoundErr) | \ + ERR_MASK(SDma1stDescErr) | \ + ERR_MASK(SDmaRpyTagErr) | \ + ERR_MASK(SDmaGenMismatchErr) | \ + ERR_MASK(SDmaDescAddrMisalignErr) | \ + ERR_MASK(SDmaMissingDwErr) | \ + ERR_MASK(SDmaDwEnErr)) + +static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) +{ + unsigned long flags; + struct qib_devdata *dd = ppd->dd; + char *msg; + + errs &= QLOGIC_IB_E_SDMAERRS; + + msg = dd->cspec->sdmamsgbuf; + qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); + spin_lock_irqsave(&ppd->sdma_lock, flags); + + if (errs & ERR_MASK(SendBufMisuseErr)) { + unsigned long sbuf[3]; + + sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); + sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); + sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); + + qib_dev_err(ppd->dd, + "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n", + ppd->dd->unit, ppd->port, sbuf[2], sbuf[1], + sbuf[0]); + } + + if (errs & ERR_MASK(SDmaUnexpDataErr)) + qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit, + ppd->port); + + switch (ppd->sdma_state.current_state) { + case qib_sdma_state_s00_hw_down: + /* not expecting any interrupts */ + break; + + case qib_sdma_state_s10_hw_start_up_wait: + /* handled in intr path */ + break; + + case qib_sdma_state_s20_idle: + /* not expecting any interrupts */ + break; + + case qib_sdma_state_s30_sw_clean_up_wait: + /* not expecting any interrupts */ + break; + + case qib_sdma_state_s40_hw_clean_up_wait: + if (errs & ERR_MASK(SDmaDisabledErr)) + __qib_sdma_process_event(ppd, + qib_sdma_event_e50_hw_cleaned); + break; + + case qib_sdma_state_s50_hw_halt_wait: + /* handled in intr path */ + break; + + case qib_sdma_state_s99_running: + if (errs & DISABLES_SDMA) + __qib_sdma_process_event(ppd, + qib_sdma_event_e7220_err_halted); + break; + } + + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +/* + * Decode the error status into strings, deciding whether to always + * print * it or not depending on "normal packet errors" vs everything + * else. Return 1 if "real" errors, otherwise 0 if only packet + * errors, so caller can decide what to print with the string. + */ +static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen, + u64 err) +{ + int iserr = 1; + + *buf = '\0'; + if (err & QLOGIC_IB_E_PKTERRS) { + if (!(err & ~QLOGIC_IB_E_PKTERRS)) + iserr = 0; + if ((err & ERR_MASK(RcvICRCErr)) && + !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr)))) + strlcat(buf, "CRC ", blen); + if (!iserr) + goto done; + } + if (err & ERR_MASK(RcvHdrLenErr)) + strlcat(buf, "rhdrlen ", blen); + if (err & ERR_MASK(RcvBadTidErr)) + strlcat(buf, "rbadtid ", blen); + if (err & ERR_MASK(RcvBadVersionErr)) + strlcat(buf, "rbadversion ", blen); + if (err & ERR_MASK(RcvHdrErr)) + strlcat(buf, "rhdr ", blen); + if (err & ERR_MASK(SendSpecialTriggerErr)) + strlcat(buf, "sendspecialtrigger ", blen); + if (err & ERR_MASK(RcvLongPktLenErr)) + strlcat(buf, "rlongpktlen ", blen); + if (err & ERR_MASK(RcvMaxPktLenErr)) + strlcat(buf, "rmaxpktlen ", blen); + if (err & ERR_MASK(RcvMinPktLenErr)) + strlcat(buf, "rminpktlen ", blen); + if (err & ERR_MASK(SendMinPktLenErr)) + strlcat(buf, "sminpktlen ", blen); + if (err & ERR_MASK(RcvFormatErr)) + strlcat(buf, "rformaterr ", blen); + if (err & ERR_MASK(RcvUnsupportedVLErr)) + strlcat(buf, "runsupvl ", blen); + if (err & ERR_MASK(RcvUnexpectedCharErr)) + strlcat(buf, "runexpchar ", blen); + if (err & ERR_MASK(RcvIBFlowErr)) + strlcat(buf, "ribflow ", blen); + if (err & ERR_MASK(SendUnderRunErr)) + strlcat(buf, "sunderrun ", blen); + if (err & ERR_MASK(SendPioArmLaunchErr)) + strlcat(buf, "spioarmlaunch ", blen); + if (err & ERR_MASK(SendUnexpectedPktNumErr)) + strlcat(buf, "sunexperrpktnum ", blen); + if (err & ERR_MASK(SendDroppedSmpPktErr)) + strlcat(buf, "sdroppedsmppkt ", blen); + if (err & ERR_MASK(SendMaxPktLenErr)) + strlcat(buf, "smaxpktlen ", blen); + if (err & ERR_MASK(SendUnsupportedVLErr)) + strlcat(buf, "sunsupVL ", blen); + if (err & ERR_MASK(InvalidAddrErr)) + strlcat(buf, "invalidaddr ", blen); + if (err & ERR_MASK(RcvEgrFullErr)) + strlcat(buf, "rcvegrfull ", blen); + if (err & ERR_MASK(RcvHdrFullErr)) + strlcat(buf, "rcvhdrfull ", blen); + if (err & ERR_MASK(IBStatusChanged)) + strlcat(buf, "ibcstatuschg ", blen); + if (err & ERR_MASK(RcvIBLostLinkErr)) + strlcat(buf, "riblostlink ", blen); + if (err & ERR_MASK(HardwareErr)) + strlcat(buf, "hardware ", blen); + if (err & ERR_MASK(ResetNegated)) + strlcat(buf, "reset ", blen); + if (err & QLOGIC_IB_E_SDMAERRS) + qib_decode_7220_sdma_errs(dd->pport, err, buf, blen); + if (err & ERR_MASK(InvalidEEPCmd)) + strlcat(buf, "invalideepromcmd ", blen); +done: + return iserr; +} + +static void reenable_7220_chase(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + ppd->cpspec->chase_timer.expires = 0; + qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, + QLOGIC_IB_IBCC_LINKINITCMD_POLL); +} + +static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst) +{ + u8 ibclt; + u64 tnow; + + ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState); + + /* + * Detect and handle the state chase issue, where we can + * get stuck if we are unlucky on timing on both sides of + * the link. If we are, we disable, set a timer, and + * then re-enable. + */ + switch (ibclt) { + case IB_7220_LT_STATE_CFGRCVFCFG: + case IB_7220_LT_STATE_CFGWAITRMT: + case IB_7220_LT_STATE_TXREVLANES: + case IB_7220_LT_STATE_CFGENH: + tnow = get_jiffies_64(); + if (ppd->cpspec->chase_end && + time_after64(tnow, ppd->cpspec->chase_end)) { + ppd->cpspec->chase_end = 0; + qib_set_ib_7220_lstate(ppd, + QLOGIC_IB_IBCC_LINKCMD_DOWN, + QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + ppd->cpspec->chase_timer.expires = jiffies + + QIB_CHASE_DIS_TIME; + add_timer(&ppd->cpspec->chase_timer); + } else if (!ppd->cpspec->chase_end) + ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; + break; + + default: + ppd->cpspec->chase_end = 0; + break; + } +} + +static void handle_7220_errors(struct qib_devdata *dd, u64 errs) +{ + char *msg; + u64 ignore_this_time = 0; + u64 iserr = 0; + int log_idx; + struct qib_pportdata *ppd = dd->pport; + u64 mask; + + /* don't report errors that are masked */ + errs &= dd->cspec->errormask; + msg = dd->cspec->emsgbuf; + + /* do these first, they are most important */ + if (errs & ERR_MASK(HardwareErr)) + qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + else + for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) + if (errs & dd->eep_st_masks[log_idx].errs_to_log) + qib_inc_eeprom_err(dd, log_idx, 1); + + if (errs & QLOGIC_IB_E_SDMAERRS) + sdma_7220_errors(ppd, errs); + + if (errs & ~IB_E_BITSEXTANT) + qib_dev_err(dd, "error interrupt with unknown errors " + "%llx set\n", (unsigned long long) + (errs & ~IB_E_BITSEXTANT)); + + if (errs & E_SUM_ERRS) { + qib_disarm_7220_senderrbufs(ppd); + if ((errs & E_SUM_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when trying to bring the link + * up, but the IB link changes state at the "wrong" + * time. The IB logic then complains that the packet + * isn't valid. We don't want to confuse people, so + * we just don't print them, except at debug + */ + ignore_this_time = errs & E_SUM_LINK_PKTERRS; + } + } else if ((errs & E_SUM_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when SMA is trying to bring the link + * up, but the IB link changes state at the "wrong" time. + * The IB logic then complains that the packet isn't + * valid. We don't want to confuse people, so we just + * don't print them, except at debug + */ + ignore_this_time = errs & E_SUM_LINK_PKTERRS; + } + + qib_write_kreg(dd, kr_errclear, errs); + + errs &= ~ignore_this_time; + if (!errs) + goto done; + + /* + * The ones we mask off are handled specially below + * or above. Also mask SDMADISABLED by default as it + * is too chatty. + */ + mask = ERR_MASK(IBStatusChanged) | + ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | + ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); + + qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + + if (errs & E_SUM_PKTERRS) + qib_stats.sps_rcverrs++; + if (errs & E_SUM_ERRS) + qib_stats.sps_txerrs++; + iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS | + ERR_MASK(SDmaDisabledErr)); + + if (errs & ERR_MASK(IBStatusChanged)) { + u64 ibcs; + + ibcs = qib_read_kreg64(dd, kr_ibcstatus); + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + handle_7220_chase(ppd, ibcs); + + /* Update our picture of width and speed from chip */ + ppd->link_width_active = + ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ? + IB_WIDTH_4X : IB_WIDTH_1X; + ppd->link_speed_active = + ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ? + QIB_IB_DDR : QIB_IB_SDR; + + /* + * Since going into a recovery state causes the link state + * to go down and since recovery is transitory, it is better + * if we "miss" ever seeing the link training state go into + * recovery (i.e., ignore this transition for link state + * special handling purposes) without updating lastibcstat. + */ + if (qib_7220_phys_portstate(ibcs) != + IB_PHYSPORTSTATE_LINK_ERR_RECOVER) + qib_handle_e_ibstatuschanged(ppd, ibcs); + } + + if (errs & ERR_MASK(ResetNegated)) { + qib_dev_err(dd, "Got reset, requires re-init " + "(unload and reload driver)\n"); + dd->flags &= ~QIB_INITTED; /* needs re-init */ + /* mark as having had error */ + *dd->devstatusp |= QIB_STATUS_HWERROR; + *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; + } + + if (*msg && iserr) + qib_dev_porterr(dd, ppd->port, "%s error\n", msg); + + if (ppd->state_wanted & ppd->lflags) + wake_up_interruptible(&ppd->state_wait); + + /* + * If there were hdrq or egrfull errors, wake up any processes + * waiting in poll. We used to try to check which contexts had + * the overflow, but given the cost of that and the chip reads + * to support it, it's better to just wake everybody up if we + * get an overflow; waiters can poll again if it's not them. + */ + if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { + qib_handle_urcv(dd, ~0U); + if (errs & ERR_MASK(RcvEgrFullErr)) + qib_stats.sps_buffull++; + else + qib_stats.sps_hdrfull++; + } +done: + return; +} + +/* enable/disable chip from delivering interrupts */ +static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + if (dd->flags & QIB_BADINTR) + return; + qib_write_kreg(dd, kr_intmask, ~0ULL); + /* force re-interrupt of any pending interrupts. */ + qib_write_kreg(dd, kr_intclear, 0ULL); + } else + qib_write_kreg(dd, kr_intmask, 0ULL); +} + +/* + * Try to cleanup as much as possible for anything that might have gone + * wrong while in freeze mode, such as pio buffers being written by user + * processes (causing armlaunch), send errors due to going into freeze mode, + * etc., and try to avoid causing extra interrupts while doing so. + * Forcibly update the in-memory pioavail register copies after cleanup + * because the chip won't do it while in freeze mode (the register values + * themselves are kept correct). + * Make sure that we don't lose any important interrupts by using the chip + * feature that says that writing 0 to a bit in *clear that is set in + * *status will cause an interrupt to be generated again (if allowed by + * the *mask value). + * This is in chip-specific code because of all of the register accesses, + * even though the details are similar on most chips. + */ +static void qib_7220_clear_freeze(struct qib_devdata *dd) +{ + /* disable error interrupts, to avoid confusion */ + qib_write_kreg(dd, kr_errmask, 0ULL); + + /* also disable interrupts; errormask is sometimes overwriten */ + qib_7220_set_intr_state(dd, 0); + + qib_cancel_sends(dd->pport); + + /* clear the freeze, and be sure chip saw it */ + qib_write_kreg(dd, kr_control, dd->control); + qib_read_kreg32(dd, kr_scratch); + + /* force in-memory update now we are out of freeze */ + qib_force_pio_avail_update(dd); + + /* + * force new interrupt if any hwerr, error or interrupt bits are + * still set, and clear "safe" send packet errors related to freeze + * and cancelling sends. Re-enable error interrupts before possible + * force of re-interrupt on pending interrupts. + */ + qib_write_kreg(dd, kr_hwerrclear, 0ULL); + qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); + qib_7220_set_intr_state(dd, 1); +} + +/** + * qib_7220_handle_hwerrors - display hardware errors. + * @dd: the qlogic_ib device + * @msg: the output buffer + * @msgl: the size of the output buffer + * + * Use same msg buffer as regular errors to avoid excessive stack + * use. Most hardware errors are catastrophic, but for right now, + * we'll print them and continue. We reuse the same message buffer as + * handle_7220_errors() to avoid excessive stack usage. + */ +static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, + size_t msgl) +{ + u64 hwerrs; + u32 bits, ctrl; + int isfatal = 0; + char *bitsmsg; + int log_idx; + + hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); + if (!hwerrs) + goto bail; + if (hwerrs == ~0ULL) { + qib_dev_err(dd, "Read of hardware error status failed " + "(all bits set); ignoring\n"); + goto bail; + } + qib_stats.sps_hwerrs++; + + /* + * Always clear the error status register, except MEMBISTFAIL, + * regardless of whether we continue or stop using the chip. + * We want that set so we know it failed, even across driver reload. + * We'll still ignore it in the hwerrmask. We do this partly for + * diagnostics, but also for support. + */ + qib_write_kreg(dd, kr_hwerrclear, + hwerrs & ~HWE_MASK(PowerOnBISTFailed)); + + hwerrs &= dd->cspec->hwerrmask; + + /* We log some errors to EEPROM, check if we have any of those. */ + for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) + if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) + qib_inc_eeprom_err(dd, log_idx, 1); + if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | + RXE_PARITY)) + qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " + "(cleared)\n", (unsigned long long) hwerrs); + + if (hwerrs & ~IB_HWE_BITSEXTANT) + qib_dev_err(dd, "hwerror interrupt with unknown errors " + "%llx set\n", (unsigned long long) + (hwerrs & ~IB_HWE_BITSEXTANT)); + + if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) + qib_sd7220_clr_ibpar(dd); + + ctrl = qib_read_kreg32(dd, kr_control); + if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { + /* + * Parity errors in send memory are recoverable by h/w + * just do housekeeping, exit freeze mode and continue. + */ + if (hwerrs & (TXEMEMPARITYERR_PIOBUF | + TXEMEMPARITYERR_PIOPBC)) { + qib_7220_txe_recover(dd); + hwerrs &= ~(TXEMEMPARITYERR_PIOBUF | + TXEMEMPARITYERR_PIOPBC); + } + if (hwerrs) + isfatal = 1; + else + qib_7220_clear_freeze(dd); + } + + *msg = '\0'; + + if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { + isfatal = 1; + strlcat(msg, "[Memory BIST test failed, " + "InfiniPath hardware unusable]", msgl); + /* ignore from now on, so disable until driver reloaded */ + dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs, + ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl); + + bitsmsg = dd->cspec->bitsmsgbuf; + if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { + bits = (u32) ((hwerrs >> + QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & + QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); + snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + "[PCIe Mem Parity Errs %x] ", bits); + strlcat(msg, bitsmsg, msgl); + } + +#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ + QLOGIC_IB_HWE_COREPLL_RFSLIP) + + if (hwerrs & _QIB_PLL_FAIL) { + isfatal = 1; + snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + "[PLL failed (%llx), InfiniPath hardware unusable]", + (unsigned long long) hwerrs & _QIB_PLL_FAIL); + strlcat(msg, bitsmsg, msgl); + /* ignore from now on, so disable until driver reloaded */ + dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { + /* + * If it occurs, it is left masked since the eternal + * interface is unused. + */ + dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + qib_dev_err(dd, "%s hardware error\n", msg); + + if (isfatal && !dd->diag_client) { + qib_dev_err(dd, "Fatal Hardware Error, no longer" + " usable, SN %.16s\n", dd->serial); + /* + * For /sys status file and user programs to print; if no + * trailing brace is copied, we'll know it was truncated. + */ + if (dd->freezemsg) + snprintf(dd->freezemsg, dd->freezelen, + "{%s}", msg); + qib_disable_after_error(dd); + } +bail:; +} + +/** + * qib_7220_init_hwerrors - enable hardware errors + * @dd: the qlogic_ib device + * + * now that we have finished initializing everything that might reasonably + * cause a hardware error, and cleared those errors bits as they occur, + * we can enable hardware errors in the mask (potentially enabling + * freeze mode), and enable hardware errors as errors (along with + * everything else) in errormask + */ +static void qib_7220_init_hwerrors(struct qib_devdata *dd) +{ + u64 val; + u64 extsval; + + extsval = qib_read_kreg64(dd, kr_extstatus); + + if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST | + QLOGIC_IB_EXTS_MEMBIST_DISABLED))) + qib_dev_err(dd, "MemBIST did not complete!\n"); + if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED) + qib_devinfo(dd->pcidev, "MemBIST is disabled.\n"); + + val = ~0ULL; /* default to all hwerrors become interrupts, */ + + val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; + dd->cspec->hwerrmask = val; + + qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + + /* clear all */ + qib_write_kreg(dd, kr_errclear, ~0ULL); + /* enable errors that are masked, at least this first time. */ + qib_write_kreg(dd, kr_errmask, ~0ULL); + dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); + /* clear any interrupts up to this point (ints still not enabled) */ + qib_write_kreg(dd, kr_intclear, ~0ULL); +} + +/* + * Disable and enable the armlaunch error. Used for PIO bandwidth testing + * on chips that are count-based, rather than trigger-based. There is no + * reference counting, but that's also fine, given the intended use. + * Only chip-specific because it's all register accesses + */ +static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr)); + dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); + } else + dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); +} + +/* + * Formerly took parameter <which> in pre-shifted, + * pre-merged form with LinkCmd and LinkInitCmd + * together, and assuming the zero was NOP. + */ +static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd, + u16 linitcmd) +{ + u64 mod_wd; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { + /* + * If we are told to disable, note that so link-recovery + * code does not attempt to bring us back up. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { + /* + * Any other linkinitcmd will lead to LINKDOWN and then + * to INIT (if all is well), so clear flag to let + * link-recovery code attempt to bring us back up. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } + + mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) | + (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + + qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd); + /* write to chip to prevent back-to-back writes of ibc reg */ + qib_write_kreg(dd, kr_scratch, 0); +} + +/* + * All detailed interaction with the SerDes has been moved to qib_sd7220.c + * + * The portion of IBA7220-specific bringup_serdes() that actually deals with + * registers and memory within the SerDes itself is qib_sd7220_init(). + */ + +/** + * qib_7220_bringup_serdes - bring up the serdes + * @ppd: physical port on the qlogic_ib device + */ +static int qib_7220_bringup_serdes(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u64 val, prev_val, guid, ibc; + int ret = 0; + + /* Put IBC in reset, sends disabled */ + dd->control &= ~QLOGIC_IB_C_LINKENABLE; + qib_write_kreg(dd, kr_control, 0ULL); + + if (qib_compat_ddr_negotiate) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = + read_7220_creg32(dd, cr_iblinkerrrecov); + } + + /* flowcontrolwatermark is in units of KBytes */ + ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); + /* + * How often flowctrl sent. More or less in usecs; balance against + * watermark value, so that in theory senders always get a flow + * control update in time to not let the IB link go idle. + */ + ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); + /* max error tolerance */ + ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold); + /* use "real" buffer space for */ + ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); + /* IB credit flow control. */ + ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); + /* + * set initial max size pkt IBC will send, including ICRC; it's the + * PIO buffer size in dwords, less 1; also see qib_set_mtu() + */ + ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); + ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ + + /* initially come up waiting for TS1, without sending anything. */ + val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << + QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + qib_write_kreg(dd, kr_ibcctrl, val); + + if (!ppd->cpspec->ibcddrctrl) { + /* not on re-init after reset */ + ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl); + + if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR)) + ppd->cpspec->ibcddrctrl |= + IBA7220_IBC_SPEED_AUTONEG_MASK | + IBA7220_IBC_IBTA_1_2_MASK; + else + ppd->cpspec->ibcddrctrl |= + ppd->link_speed_enabled == QIB_IB_DDR ? + IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; + if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == + (IB_WIDTH_1X | IB_WIDTH_4X)) + ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; + else + ppd->cpspec->ibcddrctrl |= + ppd->link_width_enabled == IB_WIDTH_4X ? + IBA7220_IBC_WIDTH_4X_ONLY : + IBA7220_IBC_WIDTH_1X_ONLY; + + /* always enable these on driver reload, not sticky */ + ppd->cpspec->ibcddrctrl |= + IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; + ppd->cpspec->ibcddrctrl |= + IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; + + /* enable automatic lane reversal detection for receive */ + ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED; + } else + /* write to chip to prevent back-to-back writes of ibc reg */ + qib_write_kreg(dd, kr_scratch, 0); + + qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); + qib_write_kreg(dd, kr_scratch, 0); + + qib_write_kreg(dd, kr_ncmodectrl, 0Ull); + qib_write_kreg(dd, kr_scratch, 0); + + ret = qib_sd7220_init(dd); + + val = qib_read_kreg64(dd, kr_xgxs_cfg); + prev_val = val; + val |= QLOGIC_IB_XGXS_FC_SAFE; + if (val != prev_val) { + qib_write_kreg(dd, kr_xgxs_cfg, val); + qib_read_kreg32(dd, kr_scratch); + } + if (val & QLOGIC_IB_XGXS_RESET) + val &= ~QLOGIC_IB_XGXS_RESET; + if (val != prev_val) + qib_write_kreg(dd, kr_xgxs_cfg, val); + + /* first time through, set port guid */ + if (!ppd->guid) + ppd->guid = dd->base_guid; + guid = be64_to_cpu(ppd->guid); + + qib_write_kreg(dd, kr_hrtbt_guid, guid); + if (!ret) { + dd->control |= QLOGIC_IB_C_LINKENABLE; + qib_write_kreg(dd, kr_control, dd->control); + } else + /* write to chip to prevent back-to-back writes of ibc reg */ + qib_write_kreg(dd, kr_scratch, 0); + return ret; +} + +/** + * qib_7220_quiet_serdes - set serdes to txidle + * @ppd: physical port of the qlogic_ib device + * Called when driver is being unloaded + */ +static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) +{ + u64 val; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + /* disable IBC */ + dd->control &= ~QLOGIC_IB_C_LINKENABLE; + qib_write_kreg(dd, kr_control, + dd->control | QLOGIC_IB_C_FREEZEMODE); + + ppd->cpspec->chase_end = 0; + if (ppd->cpspec->chase_timer.data) /* if initted */ + del_timer_sync(&ppd->cpspec->chase_timer); + + if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || + ppd->cpspec->ibdeltainprog) { + u64 diagc; + + /* enable counter writes */ + diagc = qib_read_kreg64(dd, kr_hwdiagctrl); + qib_write_kreg(dd, kr_hwdiagctrl, + diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); + + if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { + val = read_7220_creg32(dd, cr_ibsymbolerr); + if (ppd->cpspec->ibdeltainprog) + val -= val - ppd->cpspec->ibsymsnap; + val -= ppd->cpspec->ibsymdelta; + write_7220_creg(dd, cr_ibsymbolerr, val); + } + if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { + val = read_7220_creg32(dd, cr_iblinkerrrecov); + if (ppd->cpspec->ibdeltainprog) + val -= val - ppd->cpspec->iblnkerrsnap; + val -= ppd->cpspec->iblnkerrdelta; + write_7220_creg(dd, cr_iblinkerrrecov, val); + } + + /* and disable counter writes */ + qib_write_kreg(dd, kr_hwdiagctrl, diagc); + } + qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + wake_up(&ppd->cpspec->autoneg_wait); + cancel_delayed_work(&ppd->cpspec->autoneg_work); + flush_scheduled_work(); + + shutdown_7220_relock_poll(ppd->dd); + val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); + val |= QLOGIC_IB_XGXS_RESET; + qib_write_kreg(ppd->dd, kr_xgxs_cfg, val); +} + +/** + * qib_setup_7220_setextled - set the state of the two external LEDs + * @dd: the qlogic_ib device + * @on: whether the link is up or not + * + * The exact combo of LEDs if on is true is determined by looking + * at the ibcstatus. + * + * These LEDs indicate the physical and logical state of IB link. + * For this chip (at least with recommended board pinouts), LED1 + * is Yellow (logical state) and LED2 is Green (physical state), + * + * Note: We try to match the Mellanox HCA LED behavior as best + * we can. Green indicates physical link state is OK (something is + * plugged in, and we can train). + * Amber indicates the link is logically up (ACTIVE). + * Mellanox further blinks the amber LED to indicate data packet + * activity, but we have no hardware support for that, so it would + * require waking up every 10-20 msecs and checking the counters + * on the chip, and then turning the LED off if appropriate. That's + * visible overhead, so not something we will do. + * + */ +static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on) +{ + struct qib_devdata *dd = ppd->dd; + u64 extctl, ledblink = 0, val, lst, ltst; + unsigned long flags; + + /* + * The diags use the LED to indicate diag info, so we leave + * the external LED alone when the diags are running. + */ + if (dd->diag_client) + return; + + if (ppd->led_override) { + ltst = (ppd->led_override & QIB_LED_PHYS) ? + IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, + lst = (ppd->led_override & QIB_LED_LOG) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + } else if (on) { + val = qib_read_kreg64(dd, kr_ibcstatus); + ltst = qib_7220_phys_portstate(val); + lst = qib_7220_iblink_state(val); + } else { + ltst = 0; + lst = 0; + } + + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | + SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); + if (ltst == IB_PHYSPORTSTATE_LINKUP) { + extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); + /* + * counts are in chip clock (4ns) periods. + * This is 1/16 sec (66.6ms) on, + * 3/16 sec (187.5 ms) off, with packets rcvd + */ + ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT) + | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT); + } + if (lst == IB_PORT_ACTIVE) + extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); + dd->cspec->extctrl = extctl; + qib_write_kreg(dd, kr_extctrl, extctl); + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + + if (ledblink) /* blink the LED on packet receive */ + qib_write_kreg(dd, kr_rcvpktledcnt, ledblink); +} + +static void qib_7220_free_irq(struct qib_devdata *dd) +{ + if (dd->cspec->irq) { + free_irq(dd->cspec->irq, dd); + dd->cspec->irq = 0; + } + qib_nomsi(dd); +} + +/* + * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff + * @dd: the qlogic_ib device + * + * This is called during driver unload. + * + */ +static void qib_setup_7220_cleanup(struct qib_devdata *dd) +{ + qib_7220_free_irq(dd); + kfree(dd->cspec->cntrs); + kfree(dd->cspec->portcntrs); +} + +/* + * This is only called for SDmaInt. + * SDmaDisabled is handled on the error path. + */ +static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat) +{ + unsigned long flags; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + switch (ppd->sdma_state.current_state) { + case qib_sdma_state_s00_hw_down: + break; + + case qib_sdma_state_s10_hw_start_up_wait: + __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); + break; + + case qib_sdma_state_s20_idle: + break; + + case qib_sdma_state_s30_sw_clean_up_wait: + break; + + case qib_sdma_state_s40_hw_clean_up_wait: + break; + + case qib_sdma_state_s50_hw_halt_wait: + __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); + break; + + case qib_sdma_state_s99_running: + /* too chatty to print here */ + __qib_sdma_intr(ppd); + break; + } + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (needint) { + if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) + goto done; + /* + * blip the availupd off, next write will be on, so + * we ensure an avail update, regardless of threshold or + * buffers becoming free, whenever we want an interrupt + */ + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl & + ~SYM_MASK(SendCtrl, SendBufAvailUpd)); + qib_write_kreg(dd, kr_scratch, 0ULL); + dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); + } else + dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0ULL); +done: + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); +} + +/* + * Handle errors and unusual events first, separate function + * to improve cache hits for fast path interrupt handling. + */ +static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) +{ + if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) + qib_dev_err(dd, + "interrupt with unknown interrupts %Lx set\n", + istat & ~QLOGIC_IB_I_BITSEXTANT); + + if (istat & QLOGIC_IB_I_GPIO) { + u32 gpiostatus; + + /* + * Boards for this chip currently don't use GPIO interrupts, + * so clear by writing GPIOstatus to GPIOclear, and complain + * to alert developer. To avoid endless repeats, clear + * the bits in the mask, since there is some kind of + * programming error or chip problem. + */ + gpiostatus = qib_read_kreg32(dd, kr_gpio_status); + /* + * In theory, writing GPIOstatus to GPIOclear could + * have a bad side-effect on some diagnostic that wanted + * to poll for a status-change, but the various shadows + * make that problematic at best. Diags will just suppress + * all GPIO interrupts during such tests. + */ + qib_write_kreg(dd, kr_gpio_clear, gpiostatus); + + if (gpiostatus) { + const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); + u32 gpio_irq = mask & gpiostatus; + + /* + * A bit set in status and (chip) Mask register + * would cause an interrupt. Since we are not + * expecting any, report it. Also check that the + * chip reflects our shadow, report issues, + * and refresh from the shadow. + */ + /* + * Clear any troublemakers, and update chip + * from shadow + */ + dd->cspec->gpio_mask &= ~gpio_irq; + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + } + } + + if (istat & QLOGIC_IB_I_ERROR) { + u64 estat; + + qib_stats.sps_errints++; + estat = qib_read_kreg64(dd, kr_errstatus); + if (!estat) + qib_devinfo(dd->pcidev, "error interrupt (%Lx), " + "but no error bits set!\n", istat); + else + handle_7220_errors(dd, estat); + } +} + +static irqreturn_t qib_7220intr(int irq, void *data) +{ + struct qib_devdata *dd = data; + irqreturn_t ret; + u64 istat; + u64 ctxtrbits; + u64 rmask; + unsigned i; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + ret = IRQ_HANDLED; + goto bail; + } + + istat = qib_read_kreg64(dd, kr_intstatus); + + if (unlikely(!istat)) { + ret = IRQ_NONE; /* not our interrupt, or already handled */ + goto bail; + } + if (unlikely(istat == -1)) { + qib_bad_intrstatus(dd); + /* don't know if it was our interrupt or not */ + ret = IRQ_NONE; + goto bail; + } + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | + QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) + unlikely_7220_intr(dd, istat); + + /* + * Clear the interrupt bits we found set, relatively early, so we + * "know" know the chip will have seen this by the time we process + * the queue, and will re-interrupt if necessary. The processor + * itself won't take the interrupt again until we return. + */ + qib_write_kreg(dd, kr_intclear, istat); + + /* + * Handle kernel receive queues before checking for pio buffers + * available since receives can overflow; piobuf waiters can afford + * a few extra cycles, since they were waiting anyway. + */ + ctxtrbits = istat & + ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); + if (ctxtrbits) { + rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (1ULL << QLOGIC_IB_I_RCVURG_SHIFT); + for (i = 0; i < dd->first_user_ctxt; i++) { + if (ctxtrbits & rmask) { + ctxtrbits &= ~rmask; + qib_kreceive(dd->rcd[i], NULL, NULL); + } + rmask <<= 1; + } + if (ctxtrbits) { + ctxtrbits = + (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | + (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); + qib_handle_urcv(dd, ctxtrbits); + } + } + + /* only call for SDmaInt */ + if (istat & QLOGIC_IB_I_SDMAINT) + sdma_7220_intr(dd->pport, istat); + + if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) + qib_ib_piobufavail(dd); + + ret = IRQ_HANDLED; +bail: + return ret; +} + +/* + * Set up our chip-specific interrupt handler. + * The interrupt type has already been setup, so + * we just need to do the registration and error checking. + * If we are using MSI interrupts, we may fall back to + * INTx later, if the interrupt handler doesn't get called + * within 1/2 second (see verify_interrupt()). + */ +static void qib_setup_7220_interrupt(struct qib_devdata *dd) +{ + if (!dd->cspec->irq) + qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " + "work\n"); + else { + int ret = request_irq(dd->cspec->irq, qib_7220intr, + dd->msi_lo ? 0 : IRQF_SHARED, + QIB_DRV_NAME, dd); + + if (ret) + qib_dev_err(dd, "Couldn't setup %s interrupt " + "(irq=%d): %d\n", dd->msi_lo ? + "MSI" : "INTx", dd->cspec->irq, ret); + } +} + +/** + * qib_7220_boardname - fill in the board name + * @dd: the qlogic_ib device + * + * info is based on the board revision register + */ +static void qib_7220_boardname(struct qib_devdata *dd) +{ + char *n; + u32 boardid, namelen; + + boardid = SYM_FIELD(dd->revision, Revision, + BoardID); + + switch (boardid) { + case 1: + n = "InfiniPath_QLE7240"; + break; + case 2: + n = "InfiniPath_QLE7280"; + break; + default: + qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid); + n = "Unknown_InfiniPath_7220"; + break; + } + + namelen = strlen(n) + 1; + dd->boardname = kmalloc(namelen, GFP_KERNEL); + if (!dd->boardname) + qib_dev_err(dd, "Failed allocation for board name: %s\n", n); + else + snprintf(dd->boardname, namelen, "%s", n); + + if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) + qib_dev_err(dd, "Unsupported InfiniPath hardware " + "revision %u.%u!\n", + dd->majrev, dd->minrev); + + snprintf(dd->boardversion, sizeof(dd->boardversion), + "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", + QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, + (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), + dd->majrev, dd->minrev, + (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); +} + +/* + * This routine sleeps, so it can only be called from user context, not + * from interrupt context. + */ +static int qib_setup_7220_reset(struct qib_devdata *dd) +{ + u64 val; + int i; + int ret; + u16 cmdval; + u8 int_line, clinesz; + unsigned long flags; + + qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); + + /* Use dev_err so it shows up in logs, etc. */ + qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); + + /* no interrupts till re-initted */ + qib_7220_set_intr_state(dd, 0); + + dd->pport->cpspec->ibdeltainprog = 0; + dd->pport->cpspec->ibsymdelta = 0; + dd->pport->cpspec->iblnkerrdelta = 0; + + /* + * Keep chip from being accessed until we are ready. Use + * writeq() directly, to allow the write even though QIB_PRESENT + * isnt' set. + */ + dd->flags &= ~(QIB_INITTED | QIB_PRESENT); + dd->int_counter = 0; /* so we check interrupts work again */ + val = dd->control | QLOGIC_IB_C_RESET; + writeq(val, &dd->kregbase[kr_control]); + mb(); /* prevent compiler reordering around actual reset */ + + for (i = 1; i <= 5; i++) { + /* + * Allow MBIST, etc. to complete; longer on each retry. + * We sometimes get machine checks from bus timeout if no + * response, so for now, make it *really* long. + */ + msleep(1000 + (1 + i) * 2000); + + qib_pcie_reenable(dd, cmdval, int_line, clinesz); + + /* + * Use readq directly, so we don't need to mark it as PRESENT + * until we get a successful indication that all is well. + */ + val = readq(&dd->kregbase[kr_revision]); + if (val == dd->revision) { + dd->flags |= QIB_PRESENT; /* it's back */ + ret = qib_reinit_intr(dd); + goto bail; + } + } + ret = 0; /* failed */ + +bail: + if (ret) { + if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) + qib_dev_err(dd, "Reset failed to setup PCIe or " + "interrupts; continuing anyway\n"); + + /* hold IBC in reset, no sends, etc till later */ + qib_write_kreg(dd, kr_control, 0ULL); + + /* clear the reset error, init error/hwerror mask */ + qib_7220_init_hwerrors(dd); + + /* do setup similar to speed or link-width changes */ + if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) + dd->cspec->presets_needed = 1; + spin_lock_irqsave(&dd->pport->lflags_lock, flags); + dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY; + dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&dd->pport->lflags_lock, flags); + } + + return ret; +} + +/** + * qib_7220_put_tid - write a TID to the chip + * @dd: the qlogic_ib device + * @tidptr: pointer to the expected TID (in chip) to update + * @tidtype: 0 for eager, 1 for expected + * @pa: physical address of in memory buffer; tidinvalid if freeing + */ +static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, + u32 type, unsigned long pa) +{ + if (pa != dd->tidinvalid) { + u64 chippa = pa >> IBA7220_TID_PA_SHIFT; + + /* paranoia checks */ + if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { + qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", + pa); + return; + } + if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { + qib_dev_err(dd, "Physical page address 0x%lx " + "larger than supported\n", pa); + return; + } + + if (type == RCVHQ_RCV_TYPE_EAGER) + chippa |= dd->tidtemplate; + else /* for now, always full 4KB page */ + chippa |= IBA7220_TID_SZ_4K; + pa = chippa; + } + writeq(pa, tidptr); + mmiowb(); +} + +/** + * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager + * @dd: the qlogic_ib device + * @ctxt: the ctxt + * + * clear all TID entries for a ctxt, expected and eager. + * Used from qib_close(). On this chip, TIDs are only 32 bits, + * not 64, but they are still on 64 bit boundaries, so tidbase + * is declared as u64 * for the pointer math, even though we write 32 bits + */ +static void qib_7220_clear_tids(struct qib_devdata *dd, + struct qib_ctxtdata *rcd) +{ + u64 __iomem *tidbase; + unsigned long tidinv; + u32 ctxt; + int i; + + if (!dd->kregbase || !rcd) + return; + + ctxt = rcd->ctxt; + + tidinv = dd->tidinvalid; + tidbase = (u64 __iomem *) + ((char __iomem *)(dd->kregbase) + + dd->rcvtidbase + + ctxt * dd->rcvtidcnt * sizeof(*tidbase)); + + for (i = 0; i < dd->rcvtidcnt; i++) + qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, + tidinv); + + tidbase = (u64 __iomem *) + ((char __iomem *)(dd->kregbase) + + dd->rcvegrbase + + rcd->rcvegr_tid_base * sizeof(*tidbase)); + + for (i = 0; i < rcd->rcvegrcnt; i++) + qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, + tidinv); +} + +/** + * qib_7220_tidtemplate - setup constants for TID updates + * @dd: the qlogic_ib device + * + * We setup stuff that we use a lot, to avoid calculating each time + */ +static void qib_7220_tidtemplate(struct qib_devdata *dd) +{ + if (dd->rcvegrbufsize == 2048) + dd->tidtemplate = IBA7220_TID_SZ_2K; + else if (dd->rcvegrbufsize == 4096) + dd->tidtemplate = IBA7220_TID_SZ_4K; + dd->tidinvalid = 0; +} + +/** + * qib_init_7220_get_base_info - set chip-specific flags for user code + * @rcd: the qlogic_ib ctxt + * @kbase: qib_base_info pointer + * + * We set the PCIE flag because the lower bandwidth on PCIe vs + * HyperTransport can affect some user packet algorithims. + */ +static int qib_7220_get_base_info(struct qib_ctxtdata *rcd, + struct qib_base_info *kinfo) +{ + kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | + QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA; + + if (rcd->dd->flags & QIB_USE_SPCL_TRIG) + kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; + + return 0; +} + +static struct qib_message_header * +qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) +{ + u32 offset = qib_hdrget_offset(rhf_addr); + + return (struct qib_message_header *) + (rhf_addr - dd->rhf_offset + offset); +} + +static void qib_7220_config_ctxts(struct qib_devdata *dd) +{ + unsigned long flags; + u32 nchipctxts; + + nchipctxts = qib_read_kreg32(dd, kr_portcnt); + dd->cspec->numctxts = nchipctxts; + if (qib_n_krcv_queues > 1) { + dd->qpn_mask = 0x3f; + dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; + if (dd->first_user_ctxt > nchipctxts) + dd->first_user_ctxt = nchipctxts; + } else + dd->first_user_ctxt = dd->num_pports; + dd->n_krcv_queues = dd->first_user_ctxt; + + if (!qib_cfgctxts) { + int nctxts = dd->first_user_ctxt + num_online_cpus(); + + if (nctxts <= 5) + dd->ctxtcnt = 5; + else if (nctxts <= 9) + dd->ctxtcnt = 9; + else if (nctxts <= nchipctxts) + dd->ctxtcnt = nchipctxts; + } else if (qib_cfgctxts <= nchipctxts) + dd->ctxtcnt = qib_cfgctxts; + if (!dd->ctxtcnt) /* none of the above, set to max */ + dd->ctxtcnt = nchipctxts; + + /* + * Chip can be configured for 5, 9, or 17 ctxts, and choice + * affects number of eager TIDs per ctxt (1K, 2K, 4K). + * Lock to be paranoid about later motion, etc. + */ + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + if (dd->ctxtcnt > 9) + dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT; + else if (dd->ctxtcnt > 5) + dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT; + /* else configure for default 5 receive ctxts */ + if (dd->qpn_mask) + dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB; + qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); + + /* kr_rcvegrcnt changes based on the number of contexts enabled */ + dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); + dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT); +} + +static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which) +{ + int lsb, ret = 0; + u64 maskr; /* right-justified mask */ + + switch (which) { + case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ + ret = ppd->link_width_enabled; + goto done; + + case QIB_IB_CFG_LWID: /* Get currently active Link-width */ + ret = ppd->link_width_active; + goto done; + + case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ + ret = ppd->link_speed_enabled; + goto done; + + case QIB_IB_CFG_SPD: /* Get current Link spd */ + ret = ppd->link_speed_active; + goto done; + + case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ + lsb = IBA7220_IBC_RXPOL_SHIFT; + maskr = IBA7220_IBC_RXPOL_MASK; + break; + + case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ + lsb = IBA7220_IBC_LREV_SHIFT; + maskr = IBA7220_IBC_LREV_MASK; + break; + + case QIB_IB_CFG_LINKLATENCY: + ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus) + & IBA7220_DDRSTAT_LINKLAT_MASK; + goto done; + + case QIB_IB_CFG_OP_VLS: + ret = ppd->vls_operational; + goto done; + + case QIB_IB_CFG_VL_HIGH_CAP: + ret = 0; + goto done; + + case QIB_IB_CFG_VL_LOW_CAP: + ret = 0; + goto done; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, + OverrunThreshold); + goto done; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, + PhyerrThreshold); + goto done; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + ret = (ppd->cpspec->ibcctrl & + SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? + IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; + goto done; + + case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ + lsb = IBA7220_IBC_HRTBT_SHIFT; + maskr = IBA7220_IBC_HRTBT_MASK; + break; + + case QIB_IB_CFG_PMA_TICKS: + /* + * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs + * Since the clock is always 250MHz, the value is 1 or 0. + */ + ret = (ppd->link_speed_active == QIB_IB_DDR); + goto done; + + default: + ret = -EINVAL; + goto done; + } + ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr); +done: + return ret; +} + +static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) +{ + struct qib_devdata *dd = ppd->dd; + u64 maskr; /* right-justified mask */ + int lsb, ret = 0, setforce = 0; + u16 lcmd, licmd; + unsigned long flags; + + switch (which) { + case QIB_IB_CFG_LIDLMC: + /* + * Set LID and LMC. Combined to avoid possible hazard + * caller puts LMC in 16MSbits, DLID in 16LSbits of val + */ + lsb = IBA7220_IBC_DLIDLMC_SHIFT; + maskr = IBA7220_IBC_DLIDLMC_MASK; + break; + + case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ + /* + * As with speed, only write the actual register if + * the link is currently down, otherwise takes effect + * on next link change. + */ + ppd->link_width_enabled = val; + if (!(ppd->lflags & QIBL_LINKDOWN)) + goto bail; + /* + * We set the QIBL_IB_FORCE_NOTIFY bit so updown + * will get called because we want update + * link_width_active, and the change may not take + * effect for some time (if we are in POLL), so this + * flag will force the updown routine to be called + * on the next ibstatuschange down interrupt, even + * if it's not an down->up transition. + */ + val--; /* convert from IB to chip */ + maskr = IBA7220_IBC_WIDTH_MASK; + lsb = IBA7220_IBC_WIDTH_SHIFT; + setforce = 1; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_FORCE_NOTIFY; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + break; + + case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ + /* + * If we turn off IB1.2, need to preset SerDes defaults, + * but not right now. Set a flag for the next time + * we command the link down. As with width, only write the + * actual register if the link is currently down, otherwise + * takes effect on next link change. Since setting is being + * explictly requested (via MAD or sysfs), clear autoneg + * failure status if speed autoneg is enabled. + */ + ppd->link_speed_enabled = val; + if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) && + !(val & (val - 1))) + dd->cspec->presets_needed = 1; + if (!(ppd->lflags & QIBL_LINKDOWN)) + goto bail; + /* + * We set the QIBL_IB_FORCE_NOTIFY bit so updown + * will get called because we want update + * link_speed_active, and the change may not take + * effect for some time (if we are in POLL), so this + * flag will force the updown routine to be called + * on the next ibstatuschange down interrupt, even + * if it's not an down->up transition. + */ + if (val == (QIB_IB_SDR | QIB_IB_DDR)) { + val = IBA7220_IBC_SPEED_AUTONEG_MASK | + IBA7220_IBC_IBTA_1_2_MASK; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } else + val = val == QIB_IB_DDR ? + IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; + maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | + IBA7220_IBC_IBTA_1_2_MASK; + /* IBTA 1.2 mode + speed bits are contiguous */ + lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE); + setforce = 1; + break; + + case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ + lsb = IBA7220_IBC_RXPOL_SHIFT; + maskr = IBA7220_IBC_RXPOL_MASK; + break; + + case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ + lsb = IBA7220_IBC_LREV_SHIFT; + maskr = IBA7220_IBC_LREV_MASK; + break; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, + OverrunThreshold); + if (maskr != val) { + ppd->cpspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, OverrunThreshold); + ppd->cpspec->ibcctrl |= (u64) val << + SYM_LSB(IBCCtrl, OverrunThreshold); + qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + goto bail; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, + PhyerrThreshold); + if (maskr != val) { + ppd->cpspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, PhyerrThreshold); + ppd->cpspec->ibcctrl |= (u64) val << + SYM_LSB(IBCCtrl, PhyerrThreshold); + qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + goto bail; + + case QIB_IB_CFG_PKEYS: /* update pkeys */ + maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | + ((u64) ppd->pkeys[2] << 32) | + ((u64) ppd->pkeys[3] << 48); + qib_write_kreg(dd, kr_partitionkey, maskr); + goto bail; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + if (val == IB_LINKINITCMD_POLL) + ppd->cpspec->ibcctrl &= + ~SYM_MASK(IBCCtrl, LinkDownDefaultState); + else /* SLEEP */ + ppd->cpspec->ibcctrl |= + SYM_MASK(IBCCtrl, LinkDownDefaultState); + qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + goto bail; + + case QIB_IB_CFG_MTU: /* update the MTU in IBC */ + /* + * Update our housekeeping variables, and set IBC max + * size, same as init code; max IBC is max we allow in + * buffer, less the qword pbc, plus 1 for ICRC, in dwords + * Set even if it's unchanged, print debug message only + * on changes. + */ + val = (ppd->ibmaxlen >> 2) + 1; + ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); + ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen); + qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); + qib_write_kreg(dd, kr_scratch, 0); + goto bail; + + case QIB_IB_CFG_LSTATE: /* set the IB link state */ + switch (val & 0xffff0000) { + case IB_LINKCMD_DOWN: + lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; + if (!ppd->cpspec->ibdeltainprog && + qib_compat_ddr_negotiate) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = + read_7220_creg32(dd, cr_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = + read_7220_creg32(dd, cr_iblinkerrrecov); + } + break; + + case IB_LINKCMD_ARMED: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; + break; + + case IB_LINKCMD_ACTIVE: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); + goto bail; + } + switch (val & 0xffff) { + case IB_LINKINITCMD_NOP: + licmd = 0; + break; + + case IB_LINKINITCMD_POLL: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; + break; + + case IB_LINKINITCMD_SLEEP: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; + break; + + case IB_LINKINITCMD_DISABLE: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; + ppd->cpspec->chase_end = 0; + /* + * stop state chase counter and timer, if running. + * wait forpending timer, but don't clear .data (ppd)! + */ + if (ppd->cpspec->chase_timer.expires) { + del_timer_sync(&ppd->cpspec->chase_timer); + ppd->cpspec->chase_timer.expires = 0; + } + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", + val & 0xffff); + goto bail; + } + qib_set_ib_7220_lstate(ppd, lcmd, licmd); + goto bail; + + case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ + if (val > IBA7220_IBC_HRTBT_MASK) { + ret = -EINVAL; + goto bail; + } + lsb = IBA7220_IBC_HRTBT_SHIFT; + maskr = IBA7220_IBC_HRTBT_MASK; + break; + + default: + ret = -EINVAL; + goto bail; + } + ppd->cpspec->ibcddrctrl &= ~(maskr << lsb); + ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb); + qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); + qib_write_kreg(dd, kr_scratch, 0); + if (setforce) { + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_FORCE_NOTIFY; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } +bail: + return ret; +} + +static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) +{ + int ret = 0; + u64 val, ddr; + + if (!strncmp(what, "ibc", 3)) { + ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); + val = 0; /* disable heart beat, so link will come up */ + qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", + ppd->dd->unit, ppd->port); + } else if (!strncmp(what, "off", 3)) { + ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); + /* enable heart beat again */ + val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; + qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " + "(normal)\n", ppd->dd->unit, ppd->port); + } else + ret = -EINVAL; + if (!ret) { + qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl); + ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK + << IBA7220_IBC_HRTBT_SHIFT); + ppd->cpspec->ibcddrctrl = ddr | val; + qib_write_kreg(ppd->dd, kr_ibcddrctrl, + ppd->cpspec->ibcddrctrl); + qib_write_kreg(ppd->dd, kr_scratch, 0); + } + return ret; +} + +static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, + u32 updegr, u32 egrhd) +{ + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + if (updegr) + qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); +} + +static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) +{ + u32 head, tail; + + head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); + if (rcd->rcvhdrtail_kvaddr) + tail = qib_get_rcvhdrtail(rcd); + else + tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); + return head == tail; +} + +/* + * Modify the RCVCTRL register in chip-specific way. This + * is a function because bit positions and (future) register + * location is chip-specifc, but the needed operations are + * generic. <op> is a bit-mask because we often want to + * do multiple modifications. + */ +static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, + int ctxt) +{ + struct qib_devdata *dd = ppd->dd; + u64 mask, val; + unsigned long flags; + + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + if (op & QIB_RCVCTRL_TAILUPD_ENB) + dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT); + if (op & QIB_RCVCTRL_TAILUPD_DIS) + dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT); + if (op & QIB_RCVCTRL_PKEY_ENB) + dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT); + if (op & QIB_RCVCTRL_PKEY_DIS) + dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT); + if (ctxt < 0) + mask = (1ULL << dd->ctxtcnt) - 1; + else + mask = (1ULL << ctxt); + if (op & QIB_RCVCTRL_CTXT_ENB) { + /* always done for specific ctxt */ + dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); + if (!(dd->flags & QIB_NODMA_RTAIL)) + dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT; + /* Write these registers before the context is enabled. */ + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, + dd->rcd[ctxt]->rcvhdrqtailaddr_phys); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, + dd->rcd[ctxt]->rcvhdrq_phys); + dd->rcd[ctxt]->seq_cnt = 1; + } + if (op & QIB_RCVCTRL_CTXT_DIS) + dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); + if (op & QIB_RCVCTRL_INTRAVAIL_ENB) + dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT); + if (op & QIB_RCVCTRL_INTRAVAIL_DIS) + dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT); + qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); + if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { + /* arm rcv interrupt */ + val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | + dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } + if (op & QIB_RCVCTRL_CTXT_ENB) { + /* + * Init the context registers also; if we were + * disabled, tail and head should both be zero + * already from the enable, but since we don't + * know, we have to do it explictly. + */ + val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); + qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); + + val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); + dd->rcd[ctxt]->head = val; + /* If kctxt, interrupt on next receive. */ + if (ctxt < dd->first_user_ctxt) + val |= dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } + if (op & QIB_RCVCTRL_CTXT_DIS) { + if (ctxt >= 0) { + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); + } else { + unsigned i; + + for (i = 0; i < dd->cfgctxts; i++) { + qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, + i, 0); + qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0); + } + } + } + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); +} + +/* + * Modify the SENDCTRL register in chip-specific way. This + * is a function there may be multiple such registers with + * slightly different layouts. To start, we assume the + * "canonical" register layout of the first chips. + * Chip requires no back-back sendctrl writes, so write + * scratch register after writing sendctrl + */ +static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op) +{ + struct qib_devdata *dd = ppd->dd; + u64 tmp_dd_sendctrl; + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + + /* First the ones that are "sticky", saved in shadow */ + if (op & QIB_SENDCTRL_CLEAR) + dd->sendctrl = 0; + if (op & QIB_SENDCTRL_SEND_DIS) + dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable); + else if (op & QIB_SENDCTRL_SEND_ENB) { + dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable); + if (dd->flags & QIB_USE_SPCL_TRIG) + dd->sendctrl |= SYM_MASK(SendCtrl, + SSpecialTriggerEn); + } + if (op & QIB_SENDCTRL_AVAIL_DIS) + dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); + else if (op & QIB_SENDCTRL_AVAIL_ENB) + dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); + + if (op & QIB_SENDCTRL_DISARM_ALL) { + u32 i, last; + + tmp_dd_sendctrl = dd->sendctrl; + /* + * disarm any that are not yet launched, disabling sends + * and updates until done. + */ + last = dd->piobcnt2k + dd->piobcnt4k; + tmp_dd_sendctrl &= + ~(SYM_MASK(SendCtrl, SPioEnable) | + SYM_MASK(SendCtrl, SendBufAvailUpd)); + for (i = 0; i < last; i++) { + qib_write_kreg(dd, kr_sendctrl, + tmp_dd_sendctrl | + SYM_MASK(SendCtrl, Disarm) | i); + qib_write_kreg(dd, kr_scratch, 0); + } + } + + tmp_dd_sendctrl = dd->sendctrl; + + if (op & QIB_SENDCTRL_FLUSH) + tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); + if (op & QIB_SENDCTRL_DISARM) + tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | + ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) << + SYM_LSB(SendCtrl, DisarmPIOBuf)); + if ((op & QIB_SENDCTRL_AVAIL_BLIP) && + (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) + tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); + + qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + + if (op & QIB_SENDCTRL_AVAIL_BLIP) { + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + + if (op & QIB_SENDCTRL_FLUSH) { + u32 v; + /* + * ensure writes have hit chip, then do a few + * more reads, to allow DMA of pioavail registers + * to occur, so in-memory copy is in sync with + * the chip. Not always safe to sleep. + */ + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + qib_read_kreg32(dd, kr_scratch); + } +} + +/** + * qib_portcntr_7220 - read a per-port counter + * @dd: the qlogic_ib device + * @creg: the counter to snapshot + */ +static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg) +{ + u64 ret = 0ULL; + struct qib_devdata *dd = ppd->dd; + u16 creg; + /* 0xffff for unimplemented or synthesized counters */ + static const u16 xlator[] = { + [QIBPORTCNTR_PKTSEND] = cr_pktsend, + [QIBPORTCNTR_WORDSEND] = cr_wordsend, + [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount, + [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount, + [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount, + [QIBPORTCNTR_SENDSTALL] = cr_sendstall, + [QIBPORTCNTR_PKTRCV] = cr_pktrcv, + [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount, + [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount, + [QIBPORTCNTR_RCVEBP] = cr_rcvebp, + [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, + [QIBPORTCNTR_WORDRCV] = cr_wordrcv, + [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, + [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr, + [QIBPORTCNTR_RXVLERR] = cr_rxvlerr, + [QIBPORTCNTR_ERRICRC] = cr_erricrc, + [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, + [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, + [QIBPORTCNTR_BADFORMAT] = cr_badformat, + [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, + [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, + [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, + [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, + [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl, + [QIBPORTCNTR_ERRLINK] = cr_errlink, + [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, + [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, + [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr, + [QIBPORTCNTR_PSINTERVAL] = cr_psinterval, + [QIBPORTCNTR_PSSTART] = cr_psstart, + [QIBPORTCNTR_PSSTAT] = cr_psstat, + [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt, + [QIBPORTCNTR_ERRPKEY] = cr_errpkey, + [QIBPORTCNTR_KHDROVFL] = 0xffff, + }; + + if (reg >= ARRAY_SIZE(xlator)) { + qib_devinfo(ppd->dd->pcidev, + "Unimplemented portcounter %u\n", reg); + goto done; + } + creg = xlator[reg]; + + if (reg == QIBPORTCNTR_KHDROVFL) { + int i; + + /* sum over all kernel contexts */ + for (i = 0; i < dd->first_user_ctxt; i++) + ret += read_7220_creg32(dd, cr_portovfl + i); + } + if (creg == 0xffff) + goto done; + + /* + * only fast incrementing counters are 64bit; use 32 bit reads to + * avoid two independent reads when on opteron + */ + if ((creg == cr_wordsend || creg == cr_wordrcv || + creg == cr_pktsend || creg == cr_pktrcv)) + ret = read_7220_creg(dd, creg); + else + ret = read_7220_creg32(dd, creg); + if (creg == cr_ibsymbolerr) { + if (dd->pport->cpspec->ibdeltainprog) + ret -= ret - ppd->cpspec->ibsymsnap; + ret -= dd->pport->cpspec->ibsymdelta; + } else if (creg == cr_iblinkerrrecov) { + if (dd->pport->cpspec->ibdeltainprog) + ret -= ret - ppd->cpspec->iblnkerrsnap; + ret -= dd->pport->cpspec->iblnkerrdelta; + } +done: + return ret; +} + +/* + * Device counter names (not port-specific), one line per stat, + * single string. Used by utilities like ipathstats to print the stats + * in a way which works for different versions of drivers, without changing + * the utility. Names need to be 12 chars or less (w/o newline), for proper + * display by utility. + * Non-error counters are first. + * Start of "error" conters is indicated by a leading "E " on the first + * "error" counter, and doesn't count in label length. + * The EgrOvfl list needs to be last so we truncate them at the configured + * context count for the device. + * cntr7220indices contains the corresponding register indices. + */ +static const char cntr7220names[] = + "Interrupts\n" + "HostBusStall\n" + "E RxTIDFull\n" + "RxTIDInvalid\n" + "Ctxt0EgrOvfl\n" + "Ctxt1EgrOvfl\n" + "Ctxt2EgrOvfl\n" + "Ctxt3EgrOvfl\n" + "Ctxt4EgrOvfl\n" + "Ctxt5EgrOvfl\n" + "Ctxt6EgrOvfl\n" + "Ctxt7EgrOvfl\n" + "Ctxt8EgrOvfl\n" + "Ctxt9EgrOvfl\n" + "Ctx10EgrOvfl\n" + "Ctx11EgrOvfl\n" + "Ctx12EgrOvfl\n" + "Ctx13EgrOvfl\n" + "Ctx14EgrOvfl\n" + "Ctx15EgrOvfl\n" + "Ctx16EgrOvfl\n"; + +static const size_t cntr7220indices[] = { + cr_lbint, + cr_lbflowstall, + cr_errtidfull, + cr_errtidvalid, + cr_portovfl + 0, + cr_portovfl + 1, + cr_portovfl + 2, + cr_portovfl + 3, + cr_portovfl + 4, + cr_portovfl + 5, + cr_portovfl + 6, + cr_portovfl + 7, + cr_portovfl + 8, + cr_portovfl + 9, + cr_portovfl + 10, + cr_portovfl + 11, + cr_portovfl + 12, + cr_portovfl + 13, + cr_portovfl + 14, + cr_portovfl + 15, + cr_portovfl + 16, +}; + +/* + * same as cntr7220names and cntr7220indices, but for port-specific counters. + * portcntr7220indices is somewhat complicated by some registers needing + * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG + */ +static const char portcntr7220names[] = + "TxPkt\n" + "TxFlowPkt\n" + "TxWords\n" + "RxPkt\n" + "RxFlowPkt\n" + "RxWords\n" + "TxFlowStall\n" + "TxDmaDesc\n" /* 7220 and 7322-only */ + "E RxDlidFltr\n" /* 7220 and 7322-only */ + "IBStatusChng\n" + "IBLinkDown\n" + "IBLnkRecov\n" + "IBRxLinkErr\n" + "IBSymbolErr\n" + "RxLLIErr\n" + "RxBadFormat\n" + "RxBadLen\n" + "RxBufOvrfl\n" + "RxEBP\n" + "RxFlowCtlErr\n" + "RxICRCerr\n" + "RxLPCRCerr\n" + "RxVCRCerr\n" + "RxInvalLen\n" + "RxInvalPKey\n" + "RxPktDropped\n" + "TxBadLength\n" + "TxDropped\n" + "TxInvalLen\n" + "TxUnderrun\n" + "TxUnsupVL\n" + "RxLclPhyErr\n" /* 7220 and 7322-only */ + "RxVL15Drop\n" /* 7220 and 7322-only */ + "RxVlErr\n" /* 7220 and 7322-only */ + "XcessBufOvfl\n" /* 7220 and 7322-only */ + ; + +#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ +static const size_t portcntr7220indices[] = { + QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, + cr_pktsendflow, + QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, + QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, + cr_pktrcvflowctrl, + QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, + QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, + cr_txsdmadesc, + cr_rxdlidfltr, + cr_ibstatuschange, + QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, + QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, + cr_rcvflowctrl_err, + QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, + QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, + cr_invalidslen, + cr_senddropped, + cr_errslen, + cr_sendunderrun, + cr_txunsupvl, + QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, + QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, +}; + +/* do all the setup to make the counter reads efficient later */ +static void init_7220_cntrnames(struct qib_devdata *dd) +{ + int i, j = 0; + char *s; + + for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts; + i++) { + /* we always have at least one counter before the egrovfl */ + if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) + j = 1; + s = strchr(s + 1, '\n'); + if (s && j) + j++; + } + dd->cspec->ncntrs = i; + if (!s) + /* full list; size is without terminating null */ + dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1; + else + dd->cspec->cntrnamelen = 1 + s - cntr7220names; + dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->cspec->cntrs) + qib_dev_err(dd, "Failed allocation for counters\n"); + + for (i = 0, s = (char *)portcntr7220names; s; i++) + s = strchr(s + 1, '\n'); + dd->cspec->nportcntrs = i - 1; + dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1; + dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->cspec->portcntrs) + qib_dev_err(dd, "Failed allocation for portcounters\n"); +} + +static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep, + u64 **cntrp) +{ + u32 ret; + + if (!dd->cspec->cntrs) { + ret = 0; + goto done; + } + + if (namep) { + *namep = (char *)cntr7220names; + ret = dd->cspec->cntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + } else { + u64 *cntr = dd->cspec->cntrs; + int i; + + ret = dd->cspec->ncntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + + *cntrp = cntr; + for (i = 0; i < dd->cspec->ncntrs; i++) + *cntr++ = read_7220_creg32(dd, cntr7220indices[i]); + } +done: + return ret; +} + +static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, + char **namep, u64 **cntrp) +{ + u32 ret; + + if (!dd->cspec->portcntrs) { + ret = 0; + goto done; + } + if (namep) { + *namep = (char *)portcntr7220names; + ret = dd->cspec->portcntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + } else { + u64 *cntr = dd->cspec->portcntrs; + struct qib_pportdata *ppd = &dd->pport[port]; + int i; + + ret = dd->cspec->nportcntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + *cntrp = cntr; + for (i = 0; i < dd->cspec->nportcntrs; i++) { + if (portcntr7220indices[i] & _PORT_VIRT_FLAG) + *cntr++ = qib_portcntr_7220(ppd, + portcntr7220indices[i] & + ~_PORT_VIRT_FLAG); + else + *cntr++ = read_7220_creg32(dd, + portcntr7220indices[i]); + } + } +done: + return ret; +} + +/** + * qib_get_7220_faststats - get word counters from chip before they overflow + * @opaque - contains a pointer to the qlogic_ib device qib_devdata + * + * This needs more work; in particular, decision on whether we really + * need traffic_wds done the way it is + * called from add_timer + */ +static void qib_get_7220_faststats(unsigned long opaque) +{ + struct qib_devdata *dd = (struct qib_devdata *) opaque; + struct qib_pportdata *ppd = dd->pport; + unsigned long flags; + u64 traffic_wds; + + /* + * don't access the chip while running diags, or memory diags can + * fail + */ + if (!(dd->flags & QIB_INITTED) || dd->diag_client) + /* but re-arm the timer, for diags case; won't hurt other */ + goto done; + + /* + * We now try to maintain an activity timer, based on traffic + * exceeding a threshold, so we need to check the word-counts + * even if they are 64-bit. + */ + traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) + + qib_portcntr_7220(ppd, cr_wordrcv); + spin_lock_irqsave(&dd->eep_st_lock, flags); + traffic_wds -= dd->traffic_wds; + dd->traffic_wds += traffic_wds; + if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) + atomic_add(5, &dd->active_time); /* S/B #define */ + spin_unlock_irqrestore(&dd->eep_st_lock, flags); +done: + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); +} + +/* + * If we are using MSI, try to fallback to INTx. + */ +static int qib_7220_intr_fallback(struct qib_devdata *dd) +{ + if (!dd->msi_lo) + return 0; + + qib_devinfo(dd->pcidev, "MSI interrupt not detected," + " trying INTx interrupts\n"); + qib_7220_free_irq(dd); + qib_enable_intx(dd->pcidev); + /* + * Some newer kernels require free_irq before disable_msi, + * and irq can be changed during disable and INTx enable + * and we need to therefore use the pcidev->irq value, + * not our saved MSI value. + */ + dd->cspec->irq = dd->pcidev->irq; + qib_setup_7220_interrupt(dd); + return 1; +} + +/* + * Reset the XGXS (between serdes and IBC). Slightly less intrusive + * than resetting the IBC or external link state, and useful in some + * cases to cause some retraining. To do this right, we reset IBC + * as well. + */ +static void qib_7220_xgxs_reset(struct qib_pportdata *ppd) +{ + u64 val, prev_val; + struct qib_devdata *dd = ppd->dd; + + prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); + val = prev_val | QLOGIC_IB_XGXS_RESET; + prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ + qib_write_kreg(dd, kr_control, + dd->control & ~QLOGIC_IB_C_LINKENABLE); + qib_write_kreg(dd, kr_xgxs_cfg, val); + qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_xgxs_cfg, prev_val); + qib_write_kreg(dd, kr_control, dd->control); +} + +/* + * For this chip, we want to use the same buffer every time + * when we are trying to bring the link up (they are always VL15 + * packets). At that link state the packet should always go out immediately + * (or at least be discarded at the tx interface if the link is down). + * If it doesn't, and the buffer isn't available, that means some other + * sender has gotten ahead of us, and is preventing our packet from going + * out. In that case, we flush all packets, and try again. If that still + * fails, we fail the request, and hope things work the next time around. + * + * We don't need very complicated heuristics on whether the packet had + * time to go out or not, since even at SDR 1X, it goes out in very short + * time periods, covered by the chip reads done here and as part of the + * flush. + */ +static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum) +{ + u32 __iomem *buf; + u32 lbuf = ppd->dd->cspec->lastbuf_for_pio; + int do_cleanup; + unsigned long flags; + + /* + * always blip to get avail list updated, since it's almost + * always needed, and is fairly cheap. + */ + sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ + buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); + if (buf) + goto done; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle && + ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) { + __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); + do_cleanup = 0; + } else { + do_cleanup = 1; + qib_7220_sdma_hw_clean_up(ppd); + } + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + + if (do_cleanup) { + qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ + buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); + } +done: + return buf; +} + +/* + * This code for non-IBTA-compliant IB speed negotiation is only known to + * work for the SDR to DDR transition, and only between an HCA and a switch + * with recent firmware. It is based on observed heuristics, rather than + * actual knowledge of the non-compliant speed negotiation. + * It has a number of hard-coded fields, since the hope is to rewrite this + * when a spec is available on how the negoation is intended to work. + */ +static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr, + u32 dcnt, u32 *data) +{ + int i; + u64 pbc; + u32 __iomem *piobuf; + u32 pnum; + struct qib_devdata *dd = ppd->dd; + + i = 0; + pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ + pbc |= PBC_7220_VL15_SEND; + while (!(piobuf = get_7220_link_buf(ppd, &pnum))) { + if (i++ > 5) + return; + udelay(2); + } + sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum)); + writeq(pbc, piobuf); + qib_flush_wc(); + qib_pio_copy(piobuf + 2, hdr, 7); + qib_pio_copy(piobuf + 9, data, dcnt); + if (dd->flags & QIB_USE_SPCL_TRIG) { + u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; + + qib_flush_wc(); + __raw_writel(0xaebecede, piobuf + spcl_off); + } + qib_flush_wc(); + qib_sendbuf_done(dd, pnum); +} + +/* + * _start packet gets sent twice at start, _done gets sent twice at end + */ +static void autoneg_7220_send(struct qib_pportdata *ppd, int which) +{ + struct qib_devdata *dd = ppd->dd; + static u32 swapped; + u32 dw, i, hcnt, dcnt, *data; + static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; + static u32 madpayload_start[0x40] = { + 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, + 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ + }; + static u32 madpayload_done[0x40] = { + 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, + 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x40000001, 0x1388, 0x15e, /* rest 0's */ + }; + + dcnt = ARRAY_SIZE(madpayload_start); + hcnt = ARRAY_SIZE(hdr); + if (!swapped) { + /* for maintainability, do it at runtime */ + for (i = 0; i < hcnt; i++) { + dw = (__force u32) cpu_to_be32(hdr[i]); + hdr[i] = dw; + } + for (i = 0; i < dcnt; i++) { + dw = (__force u32) cpu_to_be32(madpayload_start[i]); + madpayload_start[i] = dw; + dw = (__force u32) cpu_to_be32(madpayload_done[i]); + madpayload_done[i] = dw; + } + swapped = 1; + } + + data = which ? madpayload_done : madpayload_start; + + autoneg_7220_sendpkt(ppd, hdr, dcnt, data); + qib_read_kreg64(dd, kr_scratch); + udelay(2); + autoneg_7220_sendpkt(ppd, hdr, dcnt, data); + qib_read_kreg64(dd, kr_scratch); + udelay(2); +} + +/* + * Do the absolute minimum to cause an IB speed change, and make it + * ready, but don't actually trigger the change. The caller will + * do that when ready (if link is in Polling training state, it will + * happen immediately, otherwise when link next goes down) + * + * This routine should only be used as part of the DDR autonegotation + * code for devices that are not compliant with IB 1.2 (or code that + * fixes things up for same). + * + * When link has gone down, and autoneg enabled, or autoneg has + * failed and we give up until next time we set both speeds, and + * then we want IBTA enabled as well as "use max enabled speed. + */ +static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) +{ + ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | + IBA7220_IBC_IBTA_1_2_MASK); + + if (speed == (QIB_IB_SDR | QIB_IB_DDR)) + ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | + IBA7220_IBC_IBTA_1_2_MASK; + else + ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ? + IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; + + qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); + qib_write_kreg(ppd->dd, kr_scratch, 0); +} + +/* + * This routine is only used when we are not talking to another + * IB 1.2-compliant device that we think can do DDR. + * (This includes all existing switch chips as of Oct 2007.) + * 1.2-compliant devices go directly to DDR prior to reaching INIT + */ +static void try_7220_autoneg(struct qib_pportdata *ppd) +{ + unsigned long flags; + + /* + * Required for older non-IB1.2 DDR switches. Newer + * non-IB-compliant switches don't need it, but so far, + * aren't bothered by it either. "Magic constant" + */ + qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07); + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_AUTONEG_INPROG; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + autoneg_7220_send(ppd, 0); + set_7220_ibspeed_fast(ppd, QIB_IB_DDR); + + toggle_7220_rclkrls(ppd->dd); + /* 2 msec is minimum length of a poll cycle */ + schedule_delayed_work(&ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); +} + +/* + * Handle the empirically determined mechanism for auto-negotiation + * of DDR speed with switches. + */ +static void autoneg_7220_work(struct work_struct *work) +{ + struct qib_pportdata *ppd; + struct qib_devdata *dd; + u64 startms; + u32 i; + unsigned long flags; + + ppd = &container_of(work, struct qib_chippport_specific, + autoneg_work.work)->pportdata; + dd = ppd->dd; + + startms = jiffies_to_msecs(jiffies); + + /* + * Busy wait for this first part, it should be at most a + * few hundred usec, since we scheduled ourselves for 2msec. + */ + for (i = 0; i < 25; i++) { + if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState) + == IB_7220_LT_STATE_POLLQUIET) { + qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); + break; + } + udelay(100); + } + + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + goto done; /* we got there early or told to stop */ + + /* we expect this to timeout */ + if (wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(90))) + goto done; + + toggle_7220_rclkrls(dd); + + /* we expect this to timeout */ + if (wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(1700))) + goto done; + + set_7220_ibspeed_fast(ppd, QIB_IB_SDR); + toggle_7220_rclkrls(dd); + + /* + * Wait up to 250 msec for link to train and get to INIT at DDR; + * this should terminate early. + */ + wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(250)); +done: + if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; + if (dd->cspec->autoneg_tries == AUTONEG_TRIES) { + ppd->lflags |= QIBL_IB_AUTONEG_FAILED; + dd->cspec->autoneg_tries = 0; + } + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); + } +} + +static u32 qib_7220_iblink_state(u64 ibcs) +{ + u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); + + switch (state) { + case IB_7220_L_STATE_INIT: + state = IB_PORT_INIT; + break; + case IB_7220_L_STATE_ARM: + state = IB_PORT_ARMED; + break; + case IB_7220_L_STATE_ACTIVE: + /* fall through */ + case IB_7220_L_STATE_ACT_DEFER: + state = IB_PORT_ACTIVE; + break; + default: /* fall through */ + case IB_7220_L_STATE_DOWN: + state = IB_PORT_DOWN; + break; + } + return state; +} + +/* returns the IBTA port state, rather than the IBC link training state */ +static u8 qib_7220_phys_portstate(u64 ibcs) +{ + u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); + return qib_7220_physportstate[state]; +} + +static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) +{ + int ret = 0, symadj = 0; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + + if (!ibup) { + /* + * When the link goes down we don't want AEQ running, so it + * won't interfere with IBC training, etc., and we need + * to go back to the static SerDes preset values. + */ + if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | + QIBL_IB_AUTONEG_INPROG))) + set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + qib_sd7220_presets(dd); + qib_cancel_sends(ppd); /* initial disarm, etc. */ + spin_lock_irqsave(&ppd->sdma_lock, flags); + if (__qib_sdma_running(ppd)) + __qib_sdma_process_event(ppd, + qib_sdma_event_e70_go_idle); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + } + /* this might better in qib_sd7220_presets() */ + set_7220_relock_poll(dd, ibup); + } else { + if (qib_compat_ddr_negotiate && + !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | + QIBL_IB_AUTONEG_INPROG)) && + ppd->link_speed_active == QIB_IB_SDR && + (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) == + (QIB_IB_DDR | QIB_IB_SDR) && + dd->cspec->autoneg_tries < AUTONEG_TRIES) { + /* we are SDR, and DDR auto-negotiation enabled */ + ++dd->cspec->autoneg_tries; + if (!ppd->cpspec->ibdeltainprog) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = read_7220_creg32(dd, + cr_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, + cr_iblinkerrrecov); + } + try_7220_autoneg(ppd); + ret = 1; /* no other IB status change processing */ + } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && + ppd->link_speed_active == QIB_IB_SDR) { + autoneg_7220_send(ppd, 1); + set_7220_ibspeed_fast(ppd, QIB_IB_DDR); + udelay(2); + toggle_7220_rclkrls(dd); + ret = 1; /* no other IB status change processing */ + } else { + if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && + (ppd->link_speed_active & QIB_IB_DDR)) { + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | + QIBL_IB_AUTONEG_FAILED); + spin_unlock_irqrestore(&ppd->lflags_lock, + flags); + dd->cspec->autoneg_tries = 0; + /* re-enable SDR, for next link down */ + set_7220_ibspeed_fast(ppd, + ppd->link_speed_enabled); + wake_up(&ppd->cpspec->autoneg_wait); + symadj = 1; + } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { + /* + * Clear autoneg failure flag, and do setup + * so we'll try next time link goes down and + * back to INIT (possibly connected to a + * different device). + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&ppd->lflags_lock, + flags); + ppd->cpspec->ibcddrctrl |= + IBA7220_IBC_IBTA_1_2_MASK; + qib_write_kreg(dd, kr_ncmodectrl, 0); + symadj = 1; + } + } + + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + symadj = 1; + + if (!ret) { + ppd->delay_mult = rate_to_delay + [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1] + [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1]; + + set_7220_relock_poll(dd, ibup); + spin_lock_irqsave(&ppd->sdma_lock, flags); + /* + * Unlike 7322, the 7220 needs this, due to lack of + * interrupt in some cases when we have sdma active + * when the link goes down. + */ + if (ppd->sdma_state.current_state != + qib_sdma_state_s20_idle) + __qib_sdma_process_event(ppd, + qib_sdma_event_e00_go_hw_down); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + } + } + + if (symadj) { + if (ppd->cpspec->ibdeltainprog) { + ppd->cpspec->ibdeltainprog = 0; + ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd, + cr_ibsymbolerr) - ppd->cpspec->ibsymsnap; + ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd, + cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; + } + } else if (!ibup && qib_compat_ddr_negotiate && + !ppd->cpspec->ibdeltainprog && + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd, + cr_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd, + cr_iblinkerrrecov); + } + + if (!ret) + qib_setup_7220_setextled(ppd, ibup); + return ret; +} + +/* + * Does read/modify/write to appropriate registers to + * set output and direction bits selected by mask. + * these are in their canonical postions (e.g. lsb of + * dir will end up in D48 of extctrl on existing chips). + * returns contents of GP Inputs. + */ +static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) +{ + u64 read_val, new_out; + unsigned long flags; + + if (mask) { + /* some bits being written, lock access to GPIO */ + dir &= mask; + out &= mask; + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); + dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); + new_out = (dd->cspec->gpio_out & ~mask) | out; + + qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); + qib_write_kreg(dd, kr_gpio_out, new_out); + dd->cspec->gpio_out = new_out; + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + } + /* + * It is unlikely that a read at this time would get valid + * data on a pin whose direction line was set in the same + * call to this function. We include the read here because + * that allows us to potentially combine a change on one pin with + * a read on another, and because the old code did something like + * this. + */ + read_val = qib_read_kreg64(dd, kr_extstatus); + return SYM_FIELD(read_val, EXTStatus, GPIOIn); +} + +/* + * Read fundamental info we need to use the chip. These are + * the registers that describe chip capabilities, and are + * saved in shadow registers. + */ +static void get_7220_chip_params(struct qib_devdata *dd) +{ + u64 val; + u32 piobufs; + int mtu; + + dd->uregbase = qib_read_kreg32(dd, kr_userregbase); + + dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); + dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); + dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); + dd->palign = qib_read_kreg32(dd, kr_palign); + dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); + dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; + + val = qib_read_kreg64(dd, kr_sendpiosize); + dd->piosize2k = val & ~0U; + dd->piosize4k = val >> 32; + + mtu = ib_mtu_enum_to_int(qib_ibmtu); + if (mtu == -1) + mtu = QIB_DEFAULT_MTU; + dd->pport->ibmtu = (u32)mtu; + + val = qib_read_kreg64(dd, kr_sendpiobufcnt); + dd->piobcnt2k = val & ~0U; + dd->piobcnt4k = val >> 32; + /* these may be adjusted in init_chip_wc_pat() */ + dd->pio2kbase = (u32 __iomem *) + ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); + if (dd->piobcnt4k) { + dd->pio4kbase = (u32 __iomem *) + ((char __iomem *) dd->kregbase + + (dd->piobufbase >> 32)); + /* + * 4K buffers take 2 pages; we use roundup just to be + * paranoid; we calculate it once here, rather than on + * ever buf allocate + */ + dd->align4k = ALIGN(dd->piosize4k, dd->palign); + } + + piobufs = dd->piobcnt4k + dd->piobcnt2k; + + dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / + (sizeof(u64) * BITS_PER_BYTE / 2); +} + +/* + * The chip base addresses in cspec and cpspec have to be set + * after possible init_chip_wc_pat(), rather than in + * qib_get_7220_chip_params(), so split out as separate function + */ +static void set_7220_baseaddrs(struct qib_devdata *dd) +{ + u32 cregbase; + /* init after possible re-map in init_chip_wc_pat() */ + cregbase = qib_read_kreg32(dd, kr_counterregbase); + dd->cspec->cregbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + cregbase); + + dd->egrtidbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + dd->rcvegrbase); +} + + +#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \ + SYM_MASK(SendCtrl, SPioEnable) | \ + SYM_MASK(SendCtrl, SSpecialTriggerEn) | \ + SYM_MASK(SendCtrl, SendBufAvailUpd) | \ + SYM_MASK(SendCtrl, AvailUpdThld) | \ + SYM_MASK(SendCtrl, SDmaEnable) | \ + SYM_MASK(SendCtrl, SDmaIntEnable) | \ + SYM_MASK(SendCtrl, SDmaHalt) | \ + SYM_MASK(SendCtrl, SDmaSingleDescriptor)) + +static int sendctrl_hook(struct qib_devdata *dd, + const struct diag_observer *op, + u32 offs, u64 *data, u64 mask, int only_32) +{ + unsigned long flags; + unsigned idx = offs / sizeof(u64); + u64 local_data, all_bits; + + if (idx != kr_sendctrl) { + qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n", + offs, only_32 ? "32" : "64"); + return 0; + } + + all_bits = ~0ULL; + if (only_32) + all_bits >>= 32; + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if ((mask & all_bits) != all_bits) { + /* + * At least some mask bits are zero, so we need + * to read. The judgement call is whether from + * reg or shadow. First-cut: read reg, and complain + * if any bits which should be shadowed are different + * from their shadowed value. + */ + if (only_32) + local_data = (u64)qib_read_kreg32(dd, idx); + else + local_data = qib_read_kreg64(dd, idx); + qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n", + (u32)local_data, (u32)dd->sendctrl); + if ((local_data & SENDCTRL_SHADOWED) != + (dd->sendctrl & SENDCTRL_SHADOWED)) + qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n", + (u32)local_data, (u32) dd->sendctrl); + *data = (local_data & ~mask) | (*data & mask); + } + if (mask) { + /* + * At least some mask bits are one, so we need + * to write, but only shadow some bits. + */ + u64 sval, tval; /* Shadowed, transient */ + + /* + * New shadow val is bits we don't want to touch, + * ORed with bits we do, that are intended for shadow. + */ + sval = (dd->sendctrl & ~mask); + sval |= *data & SENDCTRL_SHADOWED & mask; + dd->sendctrl = sval; + tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); + qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n", + (u32)tval, (u32)sval); + qib_write_kreg(dd, kr_sendctrl, tval); + qib_write_kreg(dd, kr_scratch, 0Ull); + } + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + + return only_32 ? 4 : 8; +} + +static const struct diag_observer sendctrl_observer = { + sendctrl_hook, kr_sendctrl * sizeof(u64), + kr_sendctrl * sizeof(u64) +}; + +/* + * write the final few registers that depend on some of the + * init setup. Done late in init, just before bringing up + * the serdes. + */ +static int qib_late_7220_initreg(struct qib_devdata *dd) +{ + int ret = 0; + u64 val; + + qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); + qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); + qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); + qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); + val = qib_read_kreg64(dd, kr_sendpioavailaddr); + if (val != dd->pioavailregs_phys) { + qib_dev_err(dd, "Catastrophic software error, " + "SendPIOAvailAddr written as %lx, " + "read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); + ret = -EINVAL; + } + qib_register_observer(dd, &sendctrl_observer); + return ret; +} + +static int qib_init_7220_variables(struct qib_devdata *dd) +{ + struct qib_chippport_specific *cpspec; + struct qib_pportdata *ppd; + int ret = 0; + u32 sbufs, updthresh; + + cpspec = (struct qib_chippport_specific *)(dd + 1); + ppd = &cpspec->pportdata; + dd->pport = ppd; + dd->num_pports = 1; + + dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports); + ppd->cpspec = cpspec; + + spin_lock_init(&dd->cspec->sdepb_lock); + spin_lock_init(&dd->cspec->rcvmod_lock); + spin_lock_init(&dd->cspec->gpio_lock); + + /* we haven't yet set QIB_PRESENT, so use read directly */ + dd->revision = readq(&dd->kregbase[kr_revision]); + + if ((dd->revision & 0xffffffffU) == 0xffffffffU) { + qib_dev_err(dd, "Revision register read failure, " + "giving up initialization\n"); + ret = -ENODEV; + goto bail; + } + dd->flags |= QIB_PRESENT; /* now register routines work */ + + dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, + ChipRevMajor); + dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, + ChipRevMinor); + + get_7220_chip_params(dd); + qib_7220_boardname(dd); + + /* + * GPIO bits for TWSI data and clock, + * used for serial EEPROM. + */ + dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; + dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; + dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; + + dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | + QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE; + dd->flags |= qib_special_trigger ? + QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; + + /* + * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. + * 2 is Some Misc, 3 is reserved for future. + */ + dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); + + dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); + + dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); + + init_waitqueue_head(&cpspec->autoneg_wait); + INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); + + qib_init_pportdata(ppd, dd, 0, 1); + ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; + ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; + + ppd->link_width_enabled = ppd->link_width_supported; + ppd->link_speed_enabled = ppd->link_speed_supported; + /* + * Set the initial values to reasonable default, will be set + * for real when link is up. + */ + ppd->link_width_active = IB_WIDTH_4X; + ppd->link_speed_active = QIB_IB_SDR; + ppd->delay_mult = rate_to_delay[0][1]; + ppd->vls_supported = IB_VL_VL0; + ppd->vls_operational = ppd->vls_supported; + + if (!qib_mini_init) + qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP); + + init_timer(&ppd->cpspec->chase_timer); + ppd->cpspec->chase_timer.function = reenable_7220_chase; + ppd->cpspec->chase_timer.data = (unsigned long)ppd; + + qib_num_cfg_vls = 1; /* if any 7220's, only one VL */ + + dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; + dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; + dd->rhf_offset = + dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); + + /* we always allocate at least 2048 bytes for eager buffers */ + ret = ib_mtu_enum_to_int(qib_ibmtu); + dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; + + qib_7220_tidtemplate(dd); + + /* + * We can request a receive interrupt for 1 or + * more packets from current offset. For now, we set this + * up for a single packet. + */ + dd->rhdrhead_intr_off = 1ULL << 32; + + /* setup the stats timer; the add_timer is done at end of init */ + init_timer(&dd->stats_timer); + dd->stats_timer.function = qib_get_7220_faststats; + dd->stats_timer.data = (unsigned long) dd; + dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ; + + /* + * Control[4] has been added to change the arbitration within + * the SDMA engine between favoring data fetches over descriptor + * fetches. qib_sdma_fetch_arb==0 gives data fetches priority. + */ + if (qib_sdma_fetch_arb) + dd->control |= 1 << 4; + + dd->ureg_align = 0x10000; /* 64KB alignment */ + + dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1; + qib_7220_config_ctxts(dd); + qib_set_ctxtcnt(dd); /* needed for PAT setup */ + + if (qib_wc_pat) { + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; + } + set_7220_baseaddrs(dd); /* set chip access pointers now */ + + ret = 0; + if (qib_mini_init) + goto bail; + + ret = qib_create_ctxts(dd); + init_7220_cntrnames(dd); + + /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. + * reserve the update threshold amount for other kernel use, such + * as sending SMI, MAD, and ACKs, or 3, whichever is greater, + * unless we aren't enabling SDMA, in which case we want to use + * all the 4k bufs for the kernel. + * if this was less than the update threshold, we could wait + * a long time for an update. Coded this way because we + * sometimes change the update threshold for various reasons, + * and we want this to remain robust. + */ + updthresh = 8U; /* update threshold */ + if (dd->flags & QIB_HAS_SEND_DMA) { + dd->cspec->sdmabufcnt = dd->piobcnt4k; + sbufs = updthresh > 3 ? updthresh : 3; + } else { + dd->cspec->sdmabufcnt = 0; + sbufs = dd->piobcnt4k; + } + + dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - + dd->cspec->sdmabufcnt; + dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; + dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ + dd->pbufsctxt = dd->lastctxt_piobuf / + (dd->cfgctxts - dd->first_user_ctxt); + + /* + * if we are at 16 user contexts, we will have one 7 sbufs + * per context, so drop the update threshold to match. We + * want to update before we actually run out, at low pbufs/ctxt + * so give ourselves some margin + */ + if ((dd->pbufsctxt - 2) < updthresh) + updthresh = dd->pbufsctxt - 2; + + dd->cspec->updthresh_dflt = updthresh; + dd->cspec->updthresh = updthresh; + + /* before full enable, no interrupts, no locking needed */ + dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) + << SYM_LSB(SendCtrl, AvailUpdThld); + + dd->psxmitwait_supported = 1; + dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE; +bail: + return ret; +} + +static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc, + u32 *pbufnum) +{ + u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; + struct qib_devdata *dd = ppd->dd; + u32 __iomem *buf; + + if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) && + !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) + buf = get_7220_link_buf(ppd, pbufnum); + else { + if ((plen + 1) > dd->piosize2kmax_dwords) + first = dd->piobcnt2k; + else + first = 0; + /* try 4k if all 2k busy, so same last for both sizes */ + last = dd->cspec->lastbuf_for_pio; + buf = qib_getsendbuf_range(dd, pbufnum, first, last); + } + return buf; +} + +/* these 2 "counters" are really control registers, and are always RW */ +static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv, + u32 start) +{ + write_7220_creg(ppd->dd, cr_psinterval, intv); + write_7220_creg(ppd->dd, cr_psstart, start); +} + +/* + * NOTE: no real attempt is made to generalize the SDMA stuff. + * At some point "soon" we will have a new more generalized + * set of sdma interface, and then we'll clean this up. + */ + +/* Must be called with sdma_lock held, or before init finished */ +static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail) +{ + /* Commit writes to memory and advance the tail on the chip */ + wmb(); + ppd->sdma_descq_tail = tail; + qib_write_kreg(ppd->dd, kr_senddmatail, tail); +} + +static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) +{ +} + +static struct sdma_set_state_action sdma_7220_action_table[] = { + [qib_sdma_state_s00_hw_down] = { + .op_enable = 0, + .op_intenable = 0, + .op_halt = 0, + .go_s99_running_tofalse = 1, + }, + [qib_sdma_state_s10_hw_start_up_wait] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + }, + [qib_sdma_state_s20_idle] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + }, + [qib_sdma_state_s30_sw_clean_up_wait] = { + .op_enable = 0, + .op_intenable = 1, + .op_halt = 0, + }, + [qib_sdma_state_s40_hw_clean_up_wait] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + }, + [qib_sdma_state_s50_hw_halt_wait] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + }, + [qib_sdma_state_s99_running] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 0, + .go_s99_running_totrue = 1, + }, +}; + +static void qib_7220_sdma_init_early(struct qib_pportdata *ppd) +{ + ppd->sdma_state.set_state_action = sdma_7220_action_table; +} + +static int init_sdma_7220_regs(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + unsigned i, n; + u64 senddmabufmask[3] = { 0 }; + + /* Set SendDmaBase */ + qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys); + qib_sdma_7220_setlengen(ppd); + qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ + /* Set SendDmaHeadAddr */ + qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys); + + /* + * Reserve all the former "kernel" piobufs, using high number range + * so we get as many 4K buffers as possible + */ + n = dd->piobcnt2k + dd->piobcnt4k; + i = n - dd->cspec->sdmabufcnt; + + for (; i < n; ++i) { + unsigned word = i / 64; + unsigned bit = i & 63; + + BUG_ON(word >= 3); + senddmabufmask[word] |= 1ULL << bit; + } + qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]); + qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]); + qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]); + + ppd->sdma_state.first_sendbuf = i; + ppd->sdma_state.last_sendbuf = n; + + return 0; +} + +/* sdma_lock must be held */ +static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + int sane; + int use_dmahead; + u16 swhead; + u16 swtail; + u16 cnt; + u16 hwhead; + + use_dmahead = __qib_sdma_running(ppd) && + (dd->flags & QIB_HAS_SDMA_TIMEOUT); +retry: + hwhead = use_dmahead ? + (u16)le64_to_cpu(*ppd->sdma_head_dma) : + (u16)qib_read_kreg32(dd, kr_senddmahead); + + swhead = ppd->sdma_descq_head; + swtail = ppd->sdma_descq_tail; + cnt = ppd->sdma_descq_cnt; + + if (swhead < swtail) { + /* not wrapped */ + sane = (hwhead >= swhead) & (hwhead <= swtail); + } else if (swhead > swtail) { + /* wrapped around */ + sane = ((hwhead >= swhead) && (hwhead < cnt)) || + (hwhead <= swtail); + } else { + /* empty */ + sane = (hwhead == swhead); + } + + if (unlikely(!sane)) { + if (use_dmahead) { + /* try one more time, directly from the register */ + use_dmahead = 0; + goto retry; + } + /* assume no progress */ + hwhead = swhead; + } + + return hwhead; +} + +static int qib_sdma_7220_busy(struct qib_pportdata *ppd) +{ + u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus); + + return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) || + (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) || + (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) || + !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty)); +} + +/* + * Compute the amount of delay before sending the next packet if the + * port's send rate differs from the static rate set for the QP. + * Since the delay affects this packet but the amount of the delay is + * based on the length of the previous packet, use the last delay computed + * and save the delay count for this packet to be used next time + * we get here. + */ +static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen, + u8 srate, u8 vl) +{ + u8 snd_mult = ppd->delay_mult; + u8 rcv_mult = ib_rate_to_delay[srate]; + u32 ret = ppd->cpspec->last_delay_mult; + + ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ? + (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; + + /* Indicate VL15, if necessary */ + if (vl == 15) + ret |= PBC_7220_VL15_SEND_CTRL; + return ret; +} + +static void qib_7220_initvl15_bufs(struct qib_devdata *dd) +{ +} + +static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd) +{ + if (!rcd->ctxt) { + rcd->rcvegrcnt = IBA7220_KRCVEGRCNT; + rcd->rcvegr_tid_base = 0; + } else { + rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; + rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT + + (rcd->ctxt - 1) * rcd->rcvegrcnt; + } +} + +static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start, + u32 len, u32 which, struct qib_ctxtdata *rcd) +{ + int i; + unsigned long flags; + + switch (which) { + case TXCHK_CHG_TYPE_KERN: + /* see if we need to raise avail update threshold */ + spin_lock_irqsave(&dd->uctxt_lock, flags); + for (i = dd->first_user_ctxt; + dd->cspec->updthresh != dd->cspec->updthresh_dflt + && i < dd->cfgctxts; i++) + if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && + ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) + < dd->cspec->updthresh_dflt) + break; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + if (i == dd->cfgctxts) { + spin_lock_irqsave(&dd->sendctrl_lock, flags); + dd->cspec->updthresh = dd->cspec->updthresh_dflt; + dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); + dd->sendctrl |= (dd->cspec->updthresh & + SYM_RMASK(SendCtrl, AvailUpdThld)) << + SYM_LSB(SendCtrl, AvailUpdThld); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + } + break; + case TXCHK_CHG_TYPE_USER: + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (rcd && rcd->subctxt_cnt && ((rcd->piocnt + / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { + dd->cspec->updthresh = (rcd->piocnt / + rcd->subctxt_cnt) - 1; + dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); + dd->sendctrl |= (dd->cspec->updthresh & + SYM_RMASK(SendCtrl, AvailUpdThld)) + << SYM_LSB(SendCtrl, AvailUpdThld); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + } else + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + break; + } +} + +static void writescratch(struct qib_devdata *dd, u32 val) +{ + qib_write_kreg(dd, kr_scratch, val); +} + +#define VALID_TS_RD_REG_MASK 0xBF +/** + * qib_7220_tempsense_read - read register of temp sensor via TWSI + * @dd: the qlogic_ib device + * @regnum: register to read from + * + * returns reg contents (0..255) or < 0 for error + */ +static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum) +{ + int ret; + u8 rdata; + + if (regnum > 7) { + ret = -EINVAL; + goto bail; + } + + /* return a bogus value for (the one) register we do not have */ + if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) { + ret = 0; + goto bail; + } + + ret = mutex_lock_interruptible(&dd->eep_lock); + if (ret) + goto bail; + + ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1); + if (!ret) + ret = rdata; + + mutex_unlock(&dd->eep_lock); + + /* + * There are three possibilities here: + * ret is actual value (0..255) + * ret is -ENXIO or -EINVAL from twsi code or this file + * ret is -EINTR from mutex_lock_interruptible. + */ +bail: + return ret; +} + +/* Dummy function, as 7220 boards never disable EEPROM Write */ +static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) +{ + return 1; +} + +/** + * qib_init_iba7220_funcs - set up the chip-specific function pointers + * @dev: the pci_dev for qlogic_ib device + * @ent: pci_device_id struct for this dev + * + * This is global, and is called directly at init to set up the + * chip-specific function pointers for later use. + */ +struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct qib_devdata *dd; + int ret; + u32 boardid, minwidth; + + dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) + + sizeof(struct qib_chippport_specific)); + if (IS_ERR(dd)) + goto bail; + + dd->f_bringup_serdes = qib_7220_bringup_serdes; + dd->f_cleanup = qib_setup_7220_cleanup; + dd->f_clear_tids = qib_7220_clear_tids; + dd->f_free_irq = qib_7220_free_irq; + dd->f_get_base_info = qib_7220_get_base_info; + dd->f_get_msgheader = qib_7220_get_msgheader; + dd->f_getsendbuf = qib_7220_getsendbuf; + dd->f_gpio_mod = gpio_7220_mod; + dd->f_eeprom_wen = qib_7220_eeprom_wen; + dd->f_hdrqempty = qib_7220_hdrqempty; + dd->f_ib_updown = qib_7220_ib_updown; + dd->f_init_ctxt = qib_7220_init_ctxt; + dd->f_initvl15_bufs = qib_7220_initvl15_bufs; + dd->f_intr_fallback = qib_7220_intr_fallback; + dd->f_late_initreg = qib_late_7220_initreg; + dd->f_setpbc_control = qib_7220_setpbc_control; + dd->f_portcntr = qib_portcntr_7220; + dd->f_put_tid = qib_7220_put_tid; + dd->f_quiet_serdes = qib_7220_quiet_serdes; + dd->f_rcvctrl = rcvctrl_7220_mod; + dd->f_read_cntrs = qib_read_7220cntrs; + dd->f_read_portcntrs = qib_read_7220portcntrs; + dd->f_reset = qib_setup_7220_reset; + dd->f_init_sdma_regs = init_sdma_7220_regs; + dd->f_sdma_busy = qib_sdma_7220_busy; + dd->f_sdma_gethead = qib_sdma_7220_gethead; + dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl; + dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt; + dd->f_sdma_update_tail = qib_sdma_update_7220_tail; + dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up; + dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up; + dd->f_sdma_init_early = qib_7220_sdma_init_early; + dd->f_sendctrl = sendctrl_7220_mod; + dd->f_set_armlaunch = qib_set_7220_armlaunch; + dd->f_set_cntr_sample = qib_set_cntr_7220_sample; + dd->f_iblink_state = qib_7220_iblink_state; + dd->f_ibphys_portstate = qib_7220_phys_portstate; + dd->f_get_ib_cfg = qib_7220_get_ib_cfg; + dd->f_set_ib_cfg = qib_7220_set_ib_cfg; + dd->f_set_ib_loopback = qib_7220_set_loopback; + dd->f_set_intr_state = qib_7220_set_intr_state; + dd->f_setextled = qib_setup_7220_setextled; + dd->f_txchk_change = qib_7220_txchk_change; + dd->f_update_usrhead = qib_update_7220_usrhead; + dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr; + dd->f_xgxs_reset = qib_7220_xgxs_reset; + dd->f_writescratch = writescratch; + dd->f_tempsense_rd = qib_7220_tempsense_rd; + /* + * Do remaining pcie setup and save pcie values in dd. + * Any error printing is already done by the init code. + * On return, we have the chip mapped, but chip registers + * are not set up until start of qib_init_7220_variables. + */ + ret = qib_pcie_ddinit(dd, pdev, ent); + if (ret < 0) + goto bail_free; + + /* initialize chip-specific variables */ + ret = qib_init_7220_variables(dd); + if (ret) + goto bail_cleanup; + + if (qib_mini_init) + goto bail; + + boardid = SYM_FIELD(dd->revision, Revision, + BoardID); + switch (boardid) { + case 0: + case 2: + case 10: + case 12: + minwidth = 16; /* x16 capable boards */ + break; + default: + minwidth = 8; /* x8 capable boards */ + break; + } + if (qib_pcie_params(dd, minwidth, NULL, NULL)) + qib_dev_err(dd, "Failed to setup PCIe or interrupts; " + "continuing anyway\n"); + + /* save IRQ for possible later use */ + dd->cspec->irq = pdev->irq; + + if (qib_read_kreg64(dd, kr_hwerrstatus) & + QLOGIC_IB_HWE_SERDESPLLFAILED) + qib_write_kreg(dd, kr_hwerrclear, + QLOGIC_IB_HWE_SERDESPLLFAILED); + + /* setup interrupt handler (interrupt type handled above) */ + qib_setup_7220_interrupt(dd); + qib_7220_init_hwerrors(dd); + + /* clear diagctrl register, in case diags were running and crashed */ + qib_write_kreg(dd, kr_hwdiagctrl, 0); + + goto bail; + +bail_cleanup: + qib_pcie_ddcleanup(dd); +bail_free: + qib_free_devdata(dd); + dd = ERR_PTR(ret); +bail: + return dd; +} diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c new file mode 100644 index 000000000000..503992d9c5ce --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -0,0 +1,7645 @@ +/* + * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file contains all of the code that is specific to the + * InfiniPath 7322 chip + */ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_smi.h> + +#include "qib.h" +#include "qib_7322_regs.h" +#include "qib_qsfp.h" + +#include "qib_mad.h" + +static void qib_setup_7322_setextled(struct qib_pportdata *, u32); +static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); +static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); +static irqreturn_t qib_7322intr(int irq, void *data); +static irqreturn_t qib_7322bufavail(int irq, void *data); +static irqreturn_t sdma_intr(int irq, void *data); +static irqreturn_t sdma_idle_intr(int irq, void *data); +static irqreturn_t sdma_progress_intr(int irq, void *data); +static irqreturn_t sdma_cleanup_intr(int irq, void *data); +static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32, + struct qib_ctxtdata *rcd); +static u8 qib_7322_phys_portstate(u64); +static u32 qib_7322_iblink_state(u64); +static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, + u16 linitcmd); +static void force_h1(struct qib_pportdata *); +static void adj_tx_serdes(struct qib_pportdata *); +static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8); +static void qib_7322_mini_pcs_reset(struct qib_pportdata *); + +static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); +static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); + +#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) + +/* LE2 serdes values for different cases */ +#define LE2_DEFAULT 5 +#define LE2_5m 4 +#define LE2_QME 0 + +/* Below is special-purpose, so only really works for the IB SerDes blocks. */ +#define IBSD(hw_pidx) (hw_pidx + 2) + +/* these are variables for documentation and experimentation purposes */ +static const unsigned rcv_int_timeout = 375; +static const unsigned rcv_int_count = 16; +static const unsigned sdma_idle_cnt = 64; + +/* Time to stop altering Rx Equalization parameters, after link up. */ +#define RXEQ_DISABLE_MSECS 2500 + +/* + * Number of VLs we are configured to use (to allow for more + * credits per vl, etc.) + */ +ushort qib_num_cfg_vls = 2; +module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); +MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); + +static ushort qib_chase = 1; +module_param_named(chase, qib_chase, ushort, S_IRUGO); +MODULE_PARM_DESC(chase, "Enable state chase handling"); + +static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ +module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); +MODULE_PARM_DESC(long_attenuation, \ + "attenuation cutoff (dB) for long copper cable setup"); + +static ushort qib_singleport; +module_param_named(singleport, qib_singleport, ushort, S_IRUGO); +MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); + +#define MAX_ATTEN_LEN 64 /* plenty for any real system */ +/* for read back, default index is ~5m copper cable */ +static char txselect_list[MAX_ATTEN_LEN] = "10"; +static struct kparam_string kp_txselect = { + .string = txselect_list, + .maxlen = MAX_ATTEN_LEN +}; +static int setup_txselect(const char *, struct kernel_param *); +module_param_call(txselect, setup_txselect, param_get_string, + &kp_txselect, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(txselect, \ + "Tx serdes indices (for no QSFP or invalid QSFP data)"); + +#define BOARD_QME7342 5 +#define BOARD_QMH7342 6 +#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ + BOARD_QMH7342) +#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ + BOARD_QME7342) + +#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64)) + +#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64))) + +#define MASK_ACROSS(lsb, msb) \ + (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb)) + +#define SYM_RMASK(regname, fldname) ((u64) \ + QIB_7322_##regname##_##fldname##_RMASK) + +#define SYM_MASK(regname, fldname) ((u64) \ + QIB_7322_##regname##_##fldname##_RMASK << \ + QIB_7322_##regname##_##fldname##_LSB) + +#define SYM_FIELD(value, regname, fldname) ((u64) \ + (((value) >> SYM_LSB(regname, fldname)) & \ + SYM_RMASK(regname, fldname))) + +/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */ +#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \ + (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits)) + +#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) +#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) +#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask) +#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask) +#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port) +/* Below because most, but not all, fields of IntMask have that full suffix */ +#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port) + + +#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB) + +/* + * the size bits give us 2^N, in KB units. 0 marks as invalid, + * and 7 is reserved. We currently use only 2KB and 4KB + */ +#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB +#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */ +#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */ +#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ + +#define SendIBSLIDAssignMask \ + QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK +#define SendIBSLMCMask \ + QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK + +#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn) +#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn) +#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn) +#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn) +#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN) +#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN) + +#define _QIB_GPIO_SDA_NUM 1 +#define _QIB_GPIO_SCL_NUM 0 +#define QIB_EEPROM_WEN_NUM 14 +#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */ + +/* HW counter clock is at 4nsec */ +#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000 + +/* full speed IB port 1 only */ +#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR) +#define PORT_SPD_CAP_SHIFT 3 + +/* full speed featuremask, both ports */ +#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT)) + +/* + * This file contains almost all the chip-specific register information and + * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip. + */ + +/* Use defines to tie machine-generated names to lower-case names */ +#define kr_contextcnt KREG_IDX(ContextCnt) +#define kr_control KREG_IDX(Control) +#define kr_counterregbase KREG_IDX(CntrRegBase) +#define kr_errclear KREG_IDX(ErrClear) +#define kr_errmask KREG_IDX(ErrMask) +#define kr_errstatus KREG_IDX(ErrStatus) +#define kr_extctrl KREG_IDX(EXTCtrl) +#define kr_extstatus KREG_IDX(EXTStatus) +#define kr_gpio_clear KREG_IDX(GPIOClear) +#define kr_gpio_mask KREG_IDX(GPIOMask) +#define kr_gpio_out KREG_IDX(GPIOOut) +#define kr_gpio_status KREG_IDX(GPIOStatus) +#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) +#define kr_debugportval KREG_IDX(DebugPortValueReg) +#define kr_fmask KREG_IDX(feature_mask) +#define kr_act_fmask KREG_IDX(active_feature_mask) +#define kr_hwerrclear KREG_IDX(HwErrClear) +#define kr_hwerrmask KREG_IDX(HwErrMask) +#define kr_hwerrstatus KREG_IDX(HwErrStatus) +#define kr_intclear KREG_IDX(IntClear) +#define kr_intmask KREG_IDX(IntMask) +#define kr_intredirect KREG_IDX(IntRedirect0) +#define kr_intstatus KREG_IDX(IntStatus) +#define kr_pagealign KREG_IDX(PageAlign) +#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0) +#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */ +#define kr_rcvegrbase KREG_IDX(RcvEgrBase) +#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) +#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) +#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) +#define kr_rcvhdrsize KREG_IDX(RcvHdrSize) +#define kr_rcvtidbase KREG_IDX(RcvTIDBase) +#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) +#define kr_revision KREG_IDX(Revision) +#define kr_scratch KREG_IDX(Scratch) +#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */ +#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */ +#define kr_sendctrl KREG_IDX(SendCtrl) +#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */ +#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */ +#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) +#define kr_sendpiobufbase KREG_IDX(SendBufBase) +#define kr_sendpiobufcnt KREG_IDX(SendBufCnt) +#define kr_sendpiosize KREG_IDX(SendBufSize) +#define kr_sendregbase KREG_IDX(SendRegBase) +#define kr_sendbufavail0 KREG_IDX(SendBufAvail0) +#define kr_userregbase KREG_IDX(UserRegBase) +#define kr_intgranted KREG_IDX(Int_Granted) +#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int) +#define kr_intblocked KREG_IDX(IntBlocked) +#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG) + +/* + * per-port kernel registers. Access only with qib_read_kreg_port() + * or qib_write_kreg_port() + */ +#define krp_errclear KREG_IBPORT_IDX(ErrClear) +#define krp_errmask KREG_IBPORT_IDX(ErrMask) +#define krp_errstatus KREG_IBPORT_IDX(ErrStatus) +#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0) +#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit) +#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID) +#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig) +#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA) +#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB) +#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC) +#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA) +#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB) +#define krp_txestatus KREG_IBPORT_IDX(TXEStatus) +#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0) +#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl) +#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey) +#define krp_psinterval KREG_IBPORT_IDX(PSInterval) +#define krp_psstart KREG_IBPORT_IDX(PSStart) +#define krp_psstat KREG_IBPORT_IDX(PSStat) +#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP) +#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl) +#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt) +#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA) +#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0) +#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15) +#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl) +#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl) +#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase) +#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0) +#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1) +#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2) +#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0) +#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1) +#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2) +#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt) +#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead) +#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr) +#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt) +#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen) +#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld) +#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt) +#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus) +#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail) +#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom) +#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign) +#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask) +#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX) +#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD) +#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE) +#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) + +/* + * Per-context kernel registers. Acess only with qib_read_kreg_ctxt() + * or qib_write_kreg_ctxt() + */ +#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) +#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) + +/* + * TID Flow table, per context. Reduces + * number of hdrq updates to one per flow (or on errors). + * context 0 and 1 share same memory, but have distinct + * addresses. Since for now, we never use expected sends + * on kernel contexts, we don't worry about that (we initialize + * those entries for ctxt 0/1 on driver load twice, for example). + */ +#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */ +#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0)) + +/* these are the error bits in the tid flows, and are W1C */ +#define TIDFLOW_ERRBITS ( \ + (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \ + SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \ + (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \ + SYM_LSB(RcvTIDFlowTable0, SeqMismatch))) + +/* Most (not all) Counters are per-IBport. + * Requires LBIntCnt is at offset 0 in the group + */ +#define CREG_IDX(regname) \ +((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) + +#define crp_badformat CREG_IDX(RxVersionErrCnt) +#define crp_err_rlen CREG_IDX(RxLenErrCnt) +#define crp_erricrc CREG_IDX(RxICRCErrCnt) +#define crp_errlink CREG_IDX(RxLinkMalformCnt) +#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt) +#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt) +#define crp_errvcrc CREG_IDX(RxVCRCErrCnt) +#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) +#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt) +#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) +#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt) +#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt) +#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) +#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) +#define crp_pktrcv CREG_IDX(RxDataPktCnt) +#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) +#define crp_pktsend CREG_IDX(TxDataPktCnt) +#define crp_pktsendflow CREG_IDX(TxFlowPktCnt) +#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount) +#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount) +#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount) +#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount) +#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount) +#define crp_rcvebp CREG_IDX(RxEBPCnt) +#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt) +#define crp_rcvovfl CREG_IDX(RxBufOvflCnt) +#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt) +#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt) +#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) +#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt) +#define crp_rxvlerr CREG_IDX(RxVlErrCnt) +#define crp_sendstall CREG_IDX(TxFlowStallCnt) +#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) +#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) +#define crp_txlenerr CREG_IDX(TxLenErrCnt) +#define crp_txlenerr CREG_IDX(TxLenErrCnt) +#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) +#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) +#define crp_txunderrun CREG_IDX(TxUnderrunCnt) +#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt) +#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) +#define crp_wordrcv CREG_IDX(RxDwordCnt) +#define crp_wordsend CREG_IDX(TxDwordCnt) +#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut) + +/* these are the (few) counters that are not port-specific */ +#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \ + QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) +#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt) +#define cr_lbint CREG_DEVIDX(LBIntCnt) +#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt) +#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt) +#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt) +#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt) +#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt) + +/* no chip register for # of IB ports supported, so define */ +#define NUM_IB_PORTS 2 + +/* 1 VL15 buffer per hardware IB port, no register for this, so define */ +#define NUM_VL15_BUFS NUM_IB_PORTS + +/* + * context 0 and 1 are special, and there is no chip register that + * defines this value, so we have to define it here. + * These are all allocated to either 0 or 1 for single port + * hardware configuration, otherwise each gets half + */ +#define KCTXT0_EGRCNT 2048 + +/* values for vl and port fields in PBC, 7322-specific */ +#define PBC_PORT_SEL_LSB 26 +#define PBC_PORT_SEL_RMASK 1 +#define PBC_VL_NUM_LSB 27 +#define PBC_VL_NUM_RMASK 7 +#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ +#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ + +static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { + [IB_RATE_2_5_GBPS] = 16, + [IB_RATE_5_GBPS] = 8, + [IB_RATE_10_GBPS] = 4, + [IB_RATE_20_GBPS] = 2, + [IB_RATE_30_GBPS] = 2, + [IB_RATE_40_GBPS] = 1 +}; + +#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive) +#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive) + +/* link training states, from IBC */ +#define IB_7322_LT_STATE_DISABLED 0x00 +#define IB_7322_LT_STATE_LINKUP 0x01 +#define IB_7322_LT_STATE_POLLACTIVE 0x02 +#define IB_7322_LT_STATE_POLLQUIET 0x03 +#define IB_7322_LT_STATE_SLEEPDELAY 0x04 +#define IB_7322_LT_STATE_SLEEPQUIET 0x05 +#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08 +#define IB_7322_LT_STATE_CFGRCVFCFG 0x09 +#define IB_7322_LT_STATE_CFGWAITRMT 0x0a +#define IB_7322_LT_STATE_CFGIDLE 0x0b +#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c +#define IB_7322_LT_STATE_TXREVLANES 0x0d +#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e +#define IB_7322_LT_STATE_RECOVERIDLE 0x0f +#define IB_7322_LT_STATE_CFGENH 0x10 +#define IB_7322_LT_STATE_CFGTEST 0x11 + +/* link state machine states from IBC */ +#define IB_7322_L_STATE_DOWN 0x0 +#define IB_7322_L_STATE_INIT 0x1 +#define IB_7322_L_STATE_ARM 0x2 +#define IB_7322_L_STATE_ACTIVE 0x3 +#define IB_7322_L_STATE_ACT_DEFER 0x4 + +static const u8 qib_7322_physportstate[0x20] = { + [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, + [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, + [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, + [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, + [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, + [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, + [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7322_LT_STATE_CFGRCVFCFG] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7322_LT_STATE_CFGWAITRMT] = + IB_PHYSPORTSTATE_CFG_TRAIN, + [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE, + [IB_7322_LT_STATE_RECOVERRETRAIN] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_7322_LT_STATE_RECOVERWAITRMT] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_7322_LT_STATE_RECOVERIDLE] = + IB_PHYSPORTSTATE_LINK_ERR_RECOVER, + [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, + [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, + [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, + [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN +}; + +struct qib_chip_specific { + u64 __iomem *cregbase; + u64 *cntrs; + spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ + spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ + u64 main_int_mask; /* clear bits which have dedicated handlers */ + u64 int_enable_mask; /* for per port interrupts in single port mode */ + u64 errormask; + u64 hwerrmask; + u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ + u64 gpio_mask; /* shadow the gpio mask register */ + u64 extctrl; /* shadow the gpio output enable, etc... */ + u32 ncntrs; + u32 nportcntrs; + u32 cntrnamelen; + u32 portcntrnamelen; + u32 numctxts; + u32 rcvegrcnt; + u32 updthresh; /* current AvailUpdThld */ + u32 updthresh_dflt; /* default AvailUpdThld */ + u32 r1; + int irq; + u32 num_msix_entries; + u32 sdmabufcnt; + u32 lastbuf_for_pio; + u32 stay_in_freeze; + u32 recovery_ports_initted; + struct msix_entry *msix_entries; + void **msix_arg; + unsigned long *sendchkenable; + unsigned long *sendgrhchk; + unsigned long *sendibchk; + u32 rcvavail_timeout[18]; + char emsgbuf[128]; /* for device error interrupt msg buffer */ +}; + +/* Table of entries in "human readable" form Tx Emphasis. */ +struct txdds_ent { + u8 amp; + u8 pre; + u8 main; + u8 post; +}; + +struct vendor_txdds_ent { + u8 oui[QSFP_VOUI_LEN]; + u8 *partnum; + struct txdds_ent sdr; + struct txdds_ent ddr; + struct txdds_ent qdr; +}; + +static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); + +#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ +#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ +#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ + +#define H1_FORCE_VAL 8 +#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ +#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ + +/* The static and dynamic registers are paired, and the pairs indexed by spd */ +#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ + + ((spd) * 2)) + +#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */ +#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */ +#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */ +#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ +#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ + +struct qib_chippport_specific { + u64 __iomem *kpregbase; + u64 __iomem *cpregbase; + u64 *portcntrs; + struct qib_pportdata *ppd; + wait_queue_head_t autoneg_wait; + struct delayed_work autoneg_work; + struct delayed_work ipg_work; + struct timer_list chase_timer; + /* + * these 5 fields are used to establish deltas for IB symbol + * errors and linkrecovery errors. They can be reported on + * some chips during link negotiation prior to INIT, and with + * DDR when faking DDR negotiations with non-IBTA switches. + * The chip counters are adjusted at driver unload if there is + * a non-zero delta. + */ + u64 ibdeltainprog; + u64 ibsymdelta; + u64 ibsymsnap; + u64 iblnkerrdelta; + u64 iblnkerrsnap; + u64 iblnkdownsnap; + u64 iblnkdowndelta; + u64 ibmalfdelta; + u64 ibmalfsnap; + u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ + u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ + u64 qdr_dfe_time; + u64 chase_end; + u32 autoneg_tries; + u32 recovery_init; + u32 qdr_dfe_on; + u32 qdr_reforce; + /* + * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. + * entry zero is unused, to simplify indexing + */ + u8 h1_val; + u8 no_eep; /* txselect table index to use if no qsfp info */ + u8 ipg_tries; + u8 ibmalfusesnap; + struct qib_qsfp_data qsfp_data; + char epmsgbuf[192]; /* for port error interrupt msg buffer */ +}; + +static struct { + const char *name; + irq_handler_t handler; + int lsb; + int port; /* 0 if not port-specific, else port # */ +} irq_table[] = { + { QIB_DRV_NAME, qib_7322intr, -1, 0 }, + { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, + SYM_LSB(IntStatus, SendBufAvail), 0 }, + { QIB_DRV_NAME " (sdma 0)", sdma_intr, + SYM_LSB(IntStatus, SDmaInt_0), 1 }, + { QIB_DRV_NAME " (sdma 1)", sdma_intr, + SYM_LSB(IntStatus, SDmaInt_1), 2 }, + { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, + SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, + { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, + SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, + { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, + SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, + { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, + SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, + { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, + SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, + { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, + SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, +}; + +/* ibcctrl bits */ +#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 +/* cycle through TS1/TS2 till OK */ +#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 +/* wait for TS1, then go on */ +#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 +#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 + +#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ +#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ +#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ + +#define BLOB_7322_IBCHG 0x101 + +static inline void qib_write_kreg(const struct qib_devdata *dd, + const u32 regno, u64 value); +static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32); +static void write_7322_initregs(struct qib_devdata *); +static void write_7322_init_portregs(struct qib_pportdata *); +static void setup_7322_link_recovery(struct qib_pportdata *, u32); +static void check_7322_rxe_status(struct qib_pportdata *); +static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); + +/** + * qib_read_ureg32 - read 32-bit virtualized per-context register + * @dd: device + * @regno: register number + * @ctxt: context number + * + * Return the contents of a register that is virtualized to be per context. + * Returns -1 on errors (not distinguishable from valid contents at + * runtime; we may add a separate error variable at some point). + */ +static inline u32 qib_read_ureg32(const struct qib_devdata *dd, + enum qib_ureg regno, int ctxt) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readl(regno + (u64 __iomem *)( + (dd->ureg_align * ctxt) + (dd->userbase ? + (char __iomem *)dd->userbase : + (char __iomem *)dd->kregbase + dd->uregbase))); +} + +/** + * qib_read_ureg - read virtualized per-context register + * @dd: device + * @regno: register number + * @ctxt: context number + * + * Return the contents of a register that is virtualized to be per context. + * Returns -1 on errors (not distinguishable from valid contents at + * runtime; we may add a separate error variable at some point). + */ +static inline u64 qib_read_ureg(const struct qib_devdata *dd, + enum qib_ureg regno, int ctxt) +{ + + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readq(regno + (u64 __iomem *)( + (dd->ureg_align * ctxt) + (dd->userbase ? + (char __iomem *)dd->userbase : + (char __iomem *)dd->kregbase + dd->uregbase))); +} + +/** + * qib_write_ureg - write virtualized per-context register + * @dd: device + * @regno: register number + * @value: value + * @ctxt: context + * + * Write the contents of a register that is virtualized to be per context. + */ +static inline void qib_write_ureg(const struct qib_devdata *dd, + enum qib_ureg regno, u64 value, int ctxt) +{ + u64 __iomem *ubase; + if (dd->userbase) + ubase = (u64 __iomem *) + ((char __iomem *) dd->userbase + + dd->ureg_align * ctxt); + else + ubase = (u64 __iomem *) + (dd->uregbase + + (char __iomem *) dd->kregbase + + dd->ureg_align * ctxt); + + if (dd->kregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &ubase[regno]); +} + +static inline u32 qib_read_kreg32(const struct qib_devdata *dd, + const u32 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + return readl((u32 __iomem *) &dd->kregbase[regno]); +} + +static inline u64 qib_read_kreg64(const struct qib_devdata *dd, + const u32 regno) +{ + if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) + return -1; + return readq(&dd->kregbase[regno]); +} + +static inline void qib_write_kreg(const struct qib_devdata *dd, + const u32 regno, u64 value) +{ + if (dd->kregbase && (dd->flags & QIB_PRESENT)) + writeq(value, &dd->kregbase[regno]); +} + +/* + * not many sanity checks for the port-specific kernel register routines, + * since they are only used when it's known to be safe. +*/ +static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, + const u16 regno) +{ + if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) + return 0ULL; + return readq(&ppd->cpspec->kpregbase[regno]); +} + +static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, + const u16 regno, u64 value) +{ + if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && + (ppd->dd->flags & QIB_PRESENT)) + writeq(value, &ppd->cpspec->kpregbase[regno]); +} + +/** + * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register + * @dd: the qlogic_ib device + * @regno: the register number to write + * @ctxt: the context containing the register + * @value: the value to write + */ +static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, + const u16 regno, unsigned ctxt, + u64 value) +{ + qib_write_kreg(dd, regno + ctxt, value); +} + +static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readq(&dd->cspec->cregbase[regno]); + + +} + +static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) +{ + if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) + return 0; + return readl(&dd->cspec->cregbase[regno]); + + +} + +static inline void write_7322_creg_port(const struct qib_pportdata *ppd, + u16 regno, u64 value) +{ + if (ppd->cpspec && ppd->cpspec->cpregbase && + (ppd->dd->flags & QIB_PRESENT)) + writeq(value, &ppd->cpspec->cpregbase[regno]); +} + +static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd, + u16 regno) +{ + if (!ppd->cpspec || !ppd->cpspec->cpregbase || + !(ppd->dd->flags & QIB_PRESENT)) + return 0; + return readq(&ppd->cpspec->cpregbase[regno]); +} + +static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd, + u16 regno) +{ + if (!ppd->cpspec || !ppd->cpspec->cpregbase || + !(ppd->dd->flags & QIB_PRESENT)) + return 0; + return readl(&ppd->cpspec->cpregbase[regno]); +} + +/* bits in Control register */ +#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset) +#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn) + +/* bits in general interrupt regs */ +#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask) +#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17) +#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB) +#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask) +#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17) +#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB) +#define QIB_I_C_ERROR INT_MASK(Err) + +#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1)) +#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail) +#define QIB_I_GPIO INT_MASK(AssertGPIO) +#define QIB_I_P_SDMAINT(pidx) \ + (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ + INT_MASK_P(SDmaProgress, pidx) | \ + INT_MASK_PM(SDmaCleanupDone, pidx)) + +/* Interrupt bits that are "per port" */ +#define QIB_I_P_BITSEXTANT(pidx) \ + (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \ + INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ + INT_MASK_P(SDmaProgress, pidx) | \ + INT_MASK_PM(SDmaCleanupDone, pidx)) + +/* Interrupt bits that are common to a device */ +/* currently unused: QIB_I_SPIOSENT */ +#define QIB_I_C_BITSEXTANT \ + (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \ + QIB_I_SPIOSENT | \ + QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO) + +#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \ + QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1)) + +/* + * Error bits that are "per port". + */ +#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged) +#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr) +#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr) +#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr) +#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr) +#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr) +#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr) +#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr) +#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr) +#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr) +#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr) +#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr) +#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr) +#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr) +#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr) +#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr) +#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr) +#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr) +#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr) +#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr) +#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr) +#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr) +#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr) +#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr) +#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr) +#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr) +#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr) +#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr) + +#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr) +#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr) +#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr) +#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr) +#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr) +#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr) +#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr) +#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr) +#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr) +#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr) +#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr) + +/* Error bits that are common to a device */ +#define QIB_E_RESET ERR_MASK(ResetNegated) +#define QIB_E_HARDWARE ERR_MASK(HardwareErr) +#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr) + + +/* + * Per chip (rather than per-port) errors. Most either do + * nothing but trigger a print (because they self-recover, or + * always occur in tandem with other errors that handle the + * issue), or because they indicate errors with no recovery, + * but we want to know that they happened. + */ +#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr) +#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd) +#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr) +#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr) +#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr) +#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr) +#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr) +#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr) + +/* SDMA chip errors (not per port) + * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get + * the SDMAHALT error immediately, so we just print the dup error via the + * E_AUTO mechanism. This is true of most of the per-port fatal errors + * as well, but since this is port-independent, by definition, it's + * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per + * packet send errors, and so are handled in the same manner as other + * per-packet errors. + */ +#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err) +#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr) +#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr) + +/* + * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS + * it is used to print "common" packet errors. + */ +#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\ + QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\ + QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\ + QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ + QIB_E_P_REBP) + +/* Error Bits that Packet-related (Receive, per-port) */ +#define QIB_E_P_RPKTERRS (\ + QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \ + QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \ + QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\ + QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \ + QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \ + QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP) + +/* + * Error bits that are Send-related (per port) + * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling). + * All of these potentially need to have a buffer disarmed + */ +#define QIB_E_P_SPKTERRS (\ + QIB_E_P_SUNEXP_PKTNUM |\ + QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ + QIB_E_P_SMAXPKTLEN |\ + QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ + QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \ + QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL) + +#define QIB_E_SPKTERRS ( \ + QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \ + ERR_MASK_N(SendUnsupportedVLErr) | \ + QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT) + +#define QIB_E_P_SDMAERRS ( \ + QIB_E_P_SDMAHALT | \ + QIB_E_P_SDMADESCADDRMISALIGN | \ + QIB_E_P_SDMAUNEXPDATA | \ + QIB_E_P_SDMAMISSINGDW | \ + QIB_E_P_SDMADWEN | \ + QIB_E_P_SDMARPYTAG | \ + QIB_E_P_SDMA1STDESC | \ + QIB_E_P_SDMABASE | \ + QIB_E_P_SDMATAILOUTOFBOUND | \ + QIB_E_P_SDMAOUTOFBOUND | \ + QIB_E_P_SDMAGENMISMATCH) + +/* + * This sets some bits more than once, but makes it more obvious which + * bits are not handled under other categories, and the repeat definition + * is not a problem. + */ +#define QIB_E_P_BITSEXTANT ( \ + QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \ + QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \ + QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \ + QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \ + ) + +/* + * These are errors that can occur when the link + * changes state while a packet is being sent or received. This doesn't + * cover things like EBP or VCRC that can be the result of a sending + * having the link change state, so we receive a "known bad" packet. + * All of these are "per port", so renamed: + */ +#define QIB_E_P_LINK_PKTERRS (\ + QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ + QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\ + QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\ + QIB_E_P_RUNEXPCHAR) + +/* + * This sets some bits more than once, but makes it more obvious which + * bits are not handled under other categories (such as QIB_E_SPKTERRS), + * and the repeat definition is not a problem. + */ +#define QIB_E_C_BITSEXTANT (\ + QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\ + QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\ + QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE) + +/* Likewise Neuter E_SPKT_ERRS_IGNORE */ +#define E_SPKT_ERRS_IGNORE 0 + +#define QIB_EXTS_MEMBIST_DISABLED \ + SYM_MASK(EXTStatus, MemBISTDisabled) +#define QIB_EXTS_MEMBIST_ENDTEST \ + SYM_MASK(EXTStatus, MemBISTEndTest) + +#define QIB_E_SPIOARMLAUNCH \ + ERR_MASK(SendArmLaunchErr) + +#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd) +#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd) + +/* + * IBTA_1_2 is set when multiple speeds are enabled (normal), + * and also if forced QDR (only QDR enabled). It's enabled for the + * forced QDR case so that scrambling will be enabled by the TS3 + * exchange, when supported by both sides of the link. + */ +#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE) +#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED) +#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR) +#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) +#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) +#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \ + SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)) +#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR) + +#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod) +#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod) + +#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS) +#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) +#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) + +#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP) +#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP) +#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \ + SYM_MASK(IBCCtrlB_0, HRTBT_ENB)) +#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \ + SYM_LSB(IBCCtrlB_0, HRTBT_ENB)) +#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB) + +#define IBA7322_REDIRECT_VEC_PER_REG 12 + +#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En) +#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En) +#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En) +#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En) +#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En) + +#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ + +#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ + .msg = #fldname } +#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ + fldname##Mask##_##port), .msg = #fldname } +static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { + HWE_AUTO_P(IBSerdesPClkNotDetect, 1), + HWE_AUTO_P(IBSerdesPClkNotDetect, 0), + HWE_AUTO(PCIESerdesPClkNotDetect), + HWE_AUTO(PowerOnBISTFailed), + HWE_AUTO(TempsenseTholdReached), + HWE_AUTO(MemoryErr), + HWE_AUTO(PCIeBusParityErr), + HWE_AUTO(PcieCplTimeout), + HWE_AUTO(PciePoisonedTLP), + HWE_AUTO_P(SDmaMemReadErr, 1), + HWE_AUTO_P(SDmaMemReadErr, 0), + HWE_AUTO_P(IBCBusFromSPCParityErr, 1), + HWE_AUTO_P(IBCBusFromSPCParityErr, 0), + HWE_AUTO_P(statusValidNoEop, 1), + HWE_AUTO_P(statusValidNoEop, 0), + HWE_AUTO(LATriggered), + { .mask = 0 } +}; + +#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ + .msg = #fldname } +#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ + .msg = #fldname } +static const struct qib_hwerror_msgs qib_7322error_msgs[] = { + E_AUTO(ResetNegated), + E_AUTO(HardwareErr), + E_AUTO(InvalidAddrErr), + E_AUTO(SDmaVL15Err), + E_AUTO(SBufVL15MisUseErr), + E_AUTO(InvalidEEPCmd), + E_AUTO(RcvContextShareErr), + E_AUTO(SendVLMismatchErr), + E_AUTO(SendArmLaunchErr), + E_AUTO(SendSpecialTriggerErr), + E_AUTO(SDmaWrongPortErr), + E_AUTO(SDmaBufMaskDuplicateErr), + E_AUTO(RcvHdrFullErr), + E_AUTO(RcvEgrFullErr), + { .mask = 0 } +}; + +static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { + E_P_AUTO(IBStatusChanged), + E_P_AUTO(SHeadersErr), + E_P_AUTO(VL15BufMisuseErr), + /* + * SDmaHaltErr is not really an error, make it clearer; + */ + {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"}, + E_P_AUTO(SDmaDescAddrMisalignErr), + E_P_AUTO(SDmaUnexpDataErr), + E_P_AUTO(SDmaMissingDwErr), + E_P_AUTO(SDmaDwEnErr), + E_P_AUTO(SDmaRpyTagErr), + E_P_AUTO(SDma1stDescErr), + E_P_AUTO(SDmaBaseErr), + E_P_AUTO(SDmaTailOutOfBoundErr), + E_P_AUTO(SDmaOutOfBoundErr), + E_P_AUTO(SDmaGenMismatchErr), + E_P_AUTO(SendBufMisuseErr), + E_P_AUTO(SendUnsupportedVLErr), + E_P_AUTO(SendUnexpectedPktNumErr), + E_P_AUTO(SendDroppedDataPktErr), + E_P_AUTO(SendDroppedSmpPktErr), + E_P_AUTO(SendPktLenErr), + E_P_AUTO(SendUnderRunErr), + E_P_AUTO(SendMaxPktLenErr), + E_P_AUTO(SendMinPktLenErr), + E_P_AUTO(RcvIBLostLinkErr), + E_P_AUTO(RcvHdrErr), + E_P_AUTO(RcvHdrLenErr), + E_P_AUTO(RcvBadTidErr), + E_P_AUTO(RcvBadVersionErr), + E_P_AUTO(RcvIBFlowErr), + E_P_AUTO(RcvEBPErr), + E_P_AUTO(RcvUnsupportedVLErr), + E_P_AUTO(RcvUnexpectedCharErr), + E_P_AUTO(RcvShortPktLenErr), + E_P_AUTO(RcvLongPktLenErr), + E_P_AUTO(RcvMaxPktLenErr), + E_P_AUTO(RcvMinPktLenErr), + E_P_AUTO(RcvICRCErr), + E_P_AUTO(RcvVCRCErr), + E_P_AUTO(RcvFormatErr), + { .mask = 0 } +}; + +/* + * Below generates "auto-message" for interrupts not specific to any port or + * context + */ +#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ + .msg = #fldname } +/* Below generates "auto-message" for interrupts specific to a port */ +#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ + SYM_LSB(IntMask, fldname##Mask##_0), \ + SYM_LSB(IntMask, fldname##Mask##_1)), \ + .msg = #fldname "_P" } +/* For some reason, the SerDesTrimDone bits are reversed */ +#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ + SYM_LSB(IntMask, fldname##Mask##_1), \ + SYM_LSB(IntMask, fldname##Mask##_0)), \ + .msg = #fldname "_P" } +/* + * Below generates "auto-message" for interrupts specific to a context, + * with ctxt-number appended + */ +#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ + SYM_LSB(IntMask, fldname##0IntMask), \ + SYM_LSB(IntMask, fldname##17IntMask)), \ + .msg = #fldname "_C"} + +static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { + INTR_AUTO_P(SDmaInt), + INTR_AUTO_P(SDmaProgressInt), + INTR_AUTO_P(SDmaIdleInt), + INTR_AUTO_P(SDmaCleanupDone), + INTR_AUTO_C(RcvUrg), + INTR_AUTO_P(ErrInt), + INTR_AUTO(ErrInt), /* non-port-specific errs */ + INTR_AUTO(AssertGPIOInt), + INTR_AUTO_P(SendDoneInt), + INTR_AUTO(SendBufAvailInt), + INTR_AUTO_C(RcvAvail), + { .mask = 0 } +}; + +#define TXSYMPTOM_AUTO_P(fldname) \ + { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname } +static const struct qib_hwerror_msgs hdrchk_msgs[] = { + TXSYMPTOM_AUTO_P(NonKeyPacket), + TXSYMPTOM_AUTO_P(GRHFail), + TXSYMPTOM_AUTO_P(PkeyFail), + TXSYMPTOM_AUTO_P(QPFail), + TXSYMPTOM_AUTO_P(SLIDFail), + TXSYMPTOM_AUTO_P(RawIPV6), + TXSYMPTOM_AUTO_P(PacketTooSmall), + { .mask = 0 } +}; + +#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ + +/* + * Called when we might have an error that is specific to a particular + * PIO buffer, and may need to cancel that buffer, so it can be re-used, + * because we don't need to force the update of pioavail + */ +static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u32 i; + int any; + u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; + u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; + unsigned long sbuf[4]; + + /* + * It's possible that sendbuffererror could have bits set; might + * have already done this as a result of hardware error handling. + */ + any = 0; + for (i = 0; i < regcnt; ++i) { + sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); + if (sbuf[i]) { + any = 1; + qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); + } + } + + if (any) + qib_disarm_piobufs_set(dd, sbuf, piobcnt); +} + +/* No txe_recover yet, if ever */ + +/* No decode__errors yet */ +static void err_decode(char *msg, size_t len, u64 errs, + const struct qib_hwerror_msgs *msp) +{ + u64 these, lmask; + int took, multi, n = 0; + + while (msp && msp->mask) { + multi = (msp->mask & (msp->mask - 1)); + while (errs & msp->mask) { + these = (errs & msp->mask); + lmask = (these & (these - 1)) ^ these; + if (len) { + if (n++) { + /* separate the strings */ + *msg++ = ','; + len--; + } + took = scnprintf(msg, len, "%s", msp->msg); + len -= took; + msg += took; + } + errs &= ~lmask; + if (len && multi) { + /* More than one bit this mask */ + int idx = -1; + + while (lmask & msp->mask) { + ++idx; + lmask >>= 1; + } + took = scnprintf(msg, len, "_%d", idx); + len -= took; + msg += took; + } + } + ++msp; + } + /* If some bits are left, show in hex. */ + if (len && errs) + snprintf(msg, len, "%sMORE:%llX", n ? "," : "", + (unsigned long long) errs); +} + +/* only called if r1 set */ +static void flush_fifo(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u32 __iomem *piobuf; + u32 bufn; + u32 *hdr; + u64 pbc; + const unsigned hdrwords = 7; + static struct qib_ib_header ibhdr = { + .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH), + .lrh[1] = IB_LID_PERMISSIVE, + .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC), + .lrh[3] = IB_LID_PERMISSIVE, + .u.oth.bth[0] = cpu_to_be32( + (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY), + .u.oth.bth[1] = cpu_to_be32(0), + .u.oth.bth[2] = cpu_to_be32(0), + .u.oth.u.ud.deth[0] = cpu_to_be32(0), + .u.oth.u.ud.deth[1] = cpu_to_be32(0), + }; + + /* + * Send a dummy VL15 packet to flush the launch FIFO. + * This will not actually be sent since the TxeBypassIbc bit is set. + */ + pbc = PBC_7322_VL15_SEND | + (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | + (hdrwords + SIZE_OF_CRC); + piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn); + if (!piobuf) + return; + writeq(pbc, piobuf); + hdr = (u32 *) &ibhdr; + if (dd->flags & QIB_PIO_FLUSH_WC) { + qib_flush_wc(); + qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); + qib_flush_wc(); + __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); + qib_flush_wc(); + } else + qib_pio_copy(piobuf + 2, hdr, hdrwords); + qib_sendbuf_done(dd, bufn); +} + +/* + * This is called with interrupts disabled and sdma_lock held. + */ +static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) +{ + struct qib_devdata *dd = ppd->dd; + u64 set_sendctrl = 0; + u64 clr_sendctrl = 0; + + if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) + set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); + else + clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); + + if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) + set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); + else + clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); + + if (op & QIB_SDMA_SENDCTRL_OP_HALT) + set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); + else + clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); + + if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) + set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | + SYM_MASK(SendCtrl_0, TxeAbortIbc) | + SYM_MASK(SendCtrl_0, TxeDrainRmFifo); + else + clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | + SYM_MASK(SendCtrl_0, TxeAbortIbc) | + SYM_MASK(SendCtrl_0, TxeDrainRmFifo); + + spin_lock(&dd->sendctrl_lock); + + /* If we are draining everything, block sends first */ + if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { + ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); + qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + ppd->p_sendctrl |= set_sendctrl; + ppd->p_sendctrl &= ~clr_sendctrl; + + if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP) + qib_write_kreg_port(ppd, krp_sendctrl, + ppd->p_sendctrl | + SYM_MASK(SendCtrl_0, SDmaCleanup)); + else + qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + + if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { + ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); + qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + spin_unlock(&dd->sendctrl_lock); + + if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) + flush_fifo(ppd); +} + +static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd) +{ + __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); +} + +static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd) +{ + /* + * Set SendDmaLenGen and clear and set + * the MSB of the generation count to enable generation checking + * and load the internal generation counter. + */ + qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); + qib_write_kreg_port(ppd, krp_senddmalengen, + ppd->sdma_descq_cnt | + (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB)); +} + +/* + * Must be called with sdma_lock held, or before init finished. + */ +static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail) +{ + /* Commit writes to memory and advance the tail on the chip */ + wmb(); + ppd->sdma_descq_tail = tail; + qib_write_kreg_port(ppd, krp_senddmatail, tail); +} + +/* + * This is called with interrupts disabled and sdma_lock held. + */ +static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd) +{ + /* + * Drain all FIFOs. + * The hardware doesn't require this but we do it so that verbs + * and user applications don't wait for link active to send stale + * data. + */ + sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH); + + qib_sdma_7322_setlengen(ppd); + qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ + ppd->sdma_head_dma[0] = 0; + qib_7322_sdma_sendctrl(ppd, + ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); +} + +#define DISABLES_SDMA ( \ + QIB_E_P_SDMAHALT | \ + QIB_E_P_SDMADESCADDRMISALIGN | \ + QIB_E_P_SDMAMISSINGDW | \ + QIB_E_P_SDMADWEN | \ + QIB_E_P_SDMARPYTAG | \ + QIB_E_P_SDMA1STDESC | \ + QIB_E_P_SDMABASE | \ + QIB_E_P_SDMATAILOUTOFBOUND | \ + QIB_E_P_SDMAOUTOFBOUND | \ + QIB_E_P_SDMAGENMISMATCH) + +static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) +{ + unsigned long flags; + struct qib_devdata *dd = ppd->dd; + + errs &= QIB_E_P_SDMAERRS; + + if (errs & QIB_E_P_SDMAUNEXPDATA) + qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, + ppd->port); + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + switch (ppd->sdma_state.current_state) { + case qib_sdma_state_s00_hw_down: + break; + + case qib_sdma_state_s10_hw_start_up_wait: + if (errs & QIB_E_P_SDMAHALT) + __qib_sdma_process_event(ppd, + qib_sdma_event_e20_hw_started); + break; + + case qib_sdma_state_s20_idle: + break; + + case qib_sdma_state_s30_sw_clean_up_wait: + break; + + case qib_sdma_state_s40_hw_clean_up_wait: + if (errs & QIB_E_P_SDMAHALT) + __qib_sdma_process_event(ppd, + qib_sdma_event_e50_hw_cleaned); + break; + + case qib_sdma_state_s50_hw_halt_wait: + if (errs & QIB_E_P_SDMAHALT) + __qib_sdma_process_event(ppd, + qib_sdma_event_e60_hw_halted); + break; + + case qib_sdma_state_s99_running: + __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted); + __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); + break; + } + + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +/* + * handle per-device errors (not per-port errors) + */ +static noinline void handle_7322_errors(struct qib_devdata *dd) +{ + char *msg; + u64 iserr = 0; + u64 errs; + u64 mask; + int log_idx; + + qib_stats.sps_errints++; + errs = qib_read_kreg64(dd, kr_errstatus); + if (!errs) { + qib_devinfo(dd->pcidev, "device error interrupt, " + "but no error bits set!\n"); + goto done; + } + + /* don't report errors that are masked */ + errs &= dd->cspec->errormask; + msg = dd->cspec->emsgbuf; + + /* do these first, they are most important */ + if (errs & QIB_E_HARDWARE) { + *msg = '\0'; + qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + } else + for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) + if (errs & dd->eep_st_masks[log_idx].errs_to_log) + qib_inc_eeprom_err(dd, log_idx, 1); + + if (errs & QIB_E_SPKTERRS) { + qib_disarm_7322_senderrbufs(dd->pport); + qib_stats.sps_txerrs++; + } else if (errs & QIB_E_INVALIDADDR) + qib_stats.sps_txerrs++; + else if (errs & QIB_E_ARMLAUNCH) { + qib_stats.sps_txerrs++; + qib_disarm_7322_senderrbufs(dd->pport); + } + qib_write_kreg(dd, kr_errclear, errs); + + /* + * The ones we mask off are handled specially below + * or above. Also mask SDMADISABLED by default as it + * is too chatty. + */ + mask = QIB_E_HARDWARE; + *msg = '\0'; + + err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, + qib_7322error_msgs); + + /* + * Getting reset is a tragedy for all ports. Mark the device + * _and_ the ports as "offline" in way meaningful to each. + */ + if (errs & QIB_E_RESET) { + int pidx; + + qib_dev_err(dd, "Got reset, requires re-init " + "(unload and reload driver)\n"); + dd->flags &= ~QIB_INITTED; /* needs re-init */ + /* mark as having had error */ + *dd->devstatusp |= QIB_STATUS_HWERROR; + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].link_speed_supported) + *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; + } + + if (*msg && iserr) + qib_dev_err(dd, "%s error\n", msg); + + /* + * If there were hdrq or egrfull errors, wake up any processes + * waiting in poll. We used to try to check which contexts had + * the overflow, but given the cost of that and the chip reads + * to support it, it's better to just wake everybody up if we + * get an overflow; waiters can poll again if it's not them. + */ + if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { + qib_handle_urcv(dd, ~0U); + if (errs & ERR_MASK(RcvEgrFullErr)) + qib_stats.sps_buffull++; + else + qib_stats.sps_hdrfull++; + } + +done: + return; +} + +static void reenable_chase(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + + ppd->cpspec->chase_timer.expires = 0; + qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, + QLOGIC_IB_IBCC_LINKINITCMD_POLL); +} + +static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt) +{ + ppd->cpspec->chase_end = 0; + + if (!qib_chase) + return; + + qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, + QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; + add_timer(&ppd->cpspec->chase_timer); +} + +static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) +{ + u8 ibclt; + u64 tnow; + + ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState); + + /* + * Detect and handle the state chase issue, where we can + * get stuck if we are unlucky on timing on both sides of + * the link. If we are, we disable, set a timer, and + * then re-enable. + */ + switch (ibclt) { + case IB_7322_LT_STATE_CFGRCVFCFG: + case IB_7322_LT_STATE_CFGWAITRMT: + case IB_7322_LT_STATE_TXREVLANES: + case IB_7322_LT_STATE_CFGENH: + tnow = get_jiffies_64(); + if (ppd->cpspec->chase_end && + time_after64(tnow, ppd->cpspec->chase_end)) + disable_chase(ppd, tnow, ibclt); + else if (!ppd->cpspec->chase_end) + ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; + break; + default: + ppd->cpspec->chase_end = 0; + break; + } + + if (ibclt == IB_7322_LT_STATE_CFGTEST && + (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { + force_h1(ppd); + ppd->cpspec->qdr_reforce = 1; + } else if (ppd->cpspec->qdr_reforce && + (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && + (ibclt == IB_7322_LT_STATE_CFGENH || + ibclt == IB_7322_LT_STATE_CFGIDLE || + ibclt == IB_7322_LT_STATE_LINKUP)) + force_h1(ppd); + + if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && + ppd->link_speed_enabled == QIB_IB_QDR && + (ibclt == IB_7322_LT_STATE_CFGTEST || + ibclt == IB_7322_LT_STATE_CFGENH || + (ibclt >= IB_7322_LT_STATE_POLLACTIVE && + ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) + adj_tx_serdes(ppd); + + if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && + ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { + ppd->cpspec->qdr_dfe_on = 1; + ppd->cpspec->qdr_dfe_time = 0; + /* On link down, reenable QDR adaptation */ + qib_write_kreg_port(ppd, krp_static_adapt_dis(2), + ppd->dd->cspec->r1 ? + QDR_STATIC_ADAPT_DOWN_R1 : + QDR_STATIC_ADAPT_DOWN); + } +} + +/* + * This is per-pport error handling. + * will likely get it's own MSIx interrupt (one for each port, + * although just a single handler). + */ +static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) +{ + char *msg; + u64 ignore_this_time = 0, iserr = 0, errs, fmask; + struct qib_devdata *dd = ppd->dd; + + /* do this as soon as possible */ + fmask = qib_read_kreg64(dd, kr_act_fmask); + if (!fmask) + check_7322_rxe_status(ppd); + + errs = qib_read_kreg_port(ppd, krp_errstatus); + if (!errs) + qib_devinfo(dd->pcidev, + "Port%d error interrupt, but no error bits set!\n", + ppd->port); + if (!fmask) + errs &= ~QIB_E_P_IBSTATUSCHANGED; + if (!errs) + goto done; + + msg = ppd->cpspec->epmsgbuf; + *msg = '\0'; + + if (errs & ~QIB_E_P_BITSEXTANT) { + err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); + if (!*msg) + snprintf(msg, sizeof ppd->cpspec->epmsgbuf, + "no others"); + qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" + " errors 0x%016Lx set (and %s)\n", + (errs & ~QIB_E_P_BITSEXTANT), msg); + *msg = '\0'; + } + + if (errs & QIB_E_P_SHDR) { + u64 symptom; + + /* determine cause, then write to clear */ + symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); + qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); + err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, + hdrchk_msgs); + *msg = '\0'; + /* senderrbuf cleared in SPKTERRS below */ + } + + if (errs & QIB_E_P_SPKTERRS) { + if ((errs & QIB_E_P_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when trying to bring the link + * up, but the IB link changes state at the "wrong" + * time. The IB logic then complains that the packet + * isn't valid. We don't want to confuse people, so + * we just don't print them, except at debug + */ + err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + (errs & QIB_E_P_LINK_PKTERRS), + qib_7322p_error_msgs); + *msg = '\0'; + ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; + } + qib_disarm_7322_senderrbufs(ppd); + } else if ((errs & QIB_E_P_LINK_PKTERRS) && + !(ppd->lflags & QIBL_LINKACTIVE)) { + /* + * This can happen when SMA is trying to bring the link + * up, but the IB link changes state at the "wrong" time. + * The IB logic then complains that the packet isn't + * valid. We don't want to confuse people, so we just + * don't print them, except at debug + */ + err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, + qib_7322p_error_msgs); + ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; + *msg = '\0'; + } + + qib_write_kreg_port(ppd, krp_errclear, errs); + + errs &= ~ignore_this_time; + if (!errs) + goto done; + + if (errs & QIB_E_P_RPKTERRS) + qib_stats.sps_rcverrs++; + if (errs & QIB_E_P_SPKTERRS) + qib_stats.sps_txerrs++; + + iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS); + + if (errs & QIB_E_P_SDMAERRS) + sdma_7322_p_errors(ppd, errs); + + if (errs & QIB_E_P_IBSTATUSCHANGED) { + u64 ibcs; + u8 ltstate; + + ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a); + ltstate = qib_7322_phys_portstate(ibcs); + + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + handle_serdes_issues(ppd, ibcs); + if (!(ppd->cpspec->ibcctrl_a & + SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) { + /* + * We got our interrupt, so init code should be + * happy and not try alternatives. Now squelch + * other "chatter" from link-negotiation (pre Init) + */ + ppd->cpspec->ibcctrl_a |= + SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + } + + /* Update our picture of width and speed from chip */ + ppd->link_width_active = + (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ? + IB_WIDTH_4X : IB_WIDTH_1X; + ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, + LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs & + SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ? + QIB_IB_DDR : QIB_IB_SDR; + + if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != + IB_PHYSPORTSTATE_DISABLED) + qib_set_ib_7322_lstate(ppd, 0, + QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + else + /* + * Since going into a recovery state causes the link + * state to go down and since recovery is transitory, + * it is better if we "miss" ever seeing the link + * training state go into recovery (i.e., ignore this + * transition for link state special handling purposes) + * without updating lastibcstat. + */ + if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && + ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && + ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && + ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) + qib_handle_e_ibstatuschanged(ppd, ibcs); + } + if (*msg && iserr) + qib_dev_porterr(dd, ppd->port, "%s error\n", msg); + + if (ppd->state_wanted & ppd->lflags) + wake_up_interruptible(&ppd->state_wait); +done: + return; +} + +/* enable/disable chip from delivering interrupts */ +static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + if (dd->flags & QIB_BADINTR) + return; + qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); + /* cause any pending enabled interrupts to be re-delivered */ + qib_write_kreg(dd, kr_intclear, 0ULL); + if (dd->cspec->num_msix_entries) { + /* and same for MSIx */ + u64 val = qib_read_kreg64(dd, kr_intgranted); + if (val) + qib_write_kreg(dd, kr_intgranted, val); + } + } else + qib_write_kreg(dd, kr_intmask, 0ULL); +} + +/* + * Try to cleanup as much as possible for anything that might have gone + * wrong while in freeze mode, such as pio buffers being written by user + * processes (causing armlaunch), send errors due to going into freeze mode, + * etc., and try to avoid causing extra interrupts while doing so. + * Forcibly update the in-memory pioavail register copies after cleanup + * because the chip won't do it while in freeze mode (the register values + * themselves are kept correct). + * Make sure that we don't lose any important interrupts by using the chip + * feature that says that writing 0 to a bit in *clear that is set in + * *status will cause an interrupt to be generated again (if allowed by + * the *mask value). + * This is in chip-specific code because of all of the register accesses, + * even though the details are similar on most chips. + */ +static void qib_7322_clear_freeze(struct qib_devdata *dd) +{ + int pidx; + + /* disable error interrupts, to avoid confusion */ + qib_write_kreg(dd, kr_errmask, 0ULL); + + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].link_speed_supported) + qib_write_kreg_port(dd->pport + pidx, krp_errmask, + 0ULL); + + /* also disable interrupts; errormask is sometimes overwriten */ + qib_7322_set_intr_state(dd, 0); + + /* clear the freeze, and be sure chip saw it */ + qib_write_kreg(dd, kr_control, dd->control); + qib_read_kreg32(dd, kr_scratch); + + /* + * Force new interrupt if any hwerr, error or interrupt bits are + * still set, and clear "safe" send packet errors related to freeze + * and cancelling sends. Re-enable error interrupts before possible + * force of re-interrupt on pending interrupts. + */ + qib_write_kreg(dd, kr_hwerrclear, 0ULL); + qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); + /* We need to purge per-port errs and reset mask, too */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + if (!dd->pport[pidx].link_speed_supported) + continue; + qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); + qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); + } + qib_7322_set_intr_state(dd, 1); +} + +/* no error handling to speak of */ +/** + * qib_7322_handle_hwerrors - display hardware errors. + * @dd: the qlogic_ib device + * @msg: the output buffer + * @msgl: the size of the output buffer + * + * Use same msg buffer as regular errors to avoid excessive stack + * use. Most hardware errors are catastrophic, but for right now, + * we'll print them and continue. We reuse the same message buffer as + * qib_handle_errors() to avoid excessive stack usage. + */ +static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, + size_t msgl) +{ + u64 hwerrs; + u32 ctrl; + int isfatal = 0; + + hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); + if (!hwerrs) + goto bail; + if (hwerrs == ~0ULL) { + qib_dev_err(dd, "Read of hardware error status failed " + "(all bits set); ignoring\n"); + goto bail; + } + qib_stats.sps_hwerrs++; + + /* Always clear the error status register, except BIST fail */ + qib_write_kreg(dd, kr_hwerrclear, hwerrs & + ~HWE_MASK(PowerOnBISTFailed)); + + hwerrs &= dd->cspec->hwerrmask; + + /* no EEPROM logging, yet */ + + if (hwerrs) + qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " + "(cleared)\n", (unsigned long long) hwerrs); + + ctrl = qib_read_kreg32(dd, kr_control); + if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { + /* + * No recovery yet... + */ + if ((hwerrs & ~HWE_MASK(LATriggered)) || + dd->cspec->stay_in_freeze) { + /* + * If any set that we aren't ignoring only make the + * complaint once, in case it's stuck or recurring, + * and we get here multiple times + * Force link down, so switch knows, and + * LEDs are turned off. + */ + if (dd->flags & QIB_INITTED) + isfatal = 1; + } else + qib_7322_clear_freeze(dd); + } + + if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { + isfatal = 1; + strlcpy(msg, "[Memory BIST test failed, " + "InfiniPath hardware unusable]", msgl); + /* ignore from now on, so disable until driver reloaded */ + dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + } + + err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs); + + /* Ignore esoteric PLL failures et al. */ + + qib_dev_err(dd, "%s hardware error\n", msg); + + if (isfatal && !dd->diag_client) { + qib_dev_err(dd, "Fatal Hardware Error, no longer" + " usable, SN %.16s\n", dd->serial); + /* + * for /sys status file and user programs to print; if no + * trailing brace is copied, we'll know it was truncated. + */ + if (dd->freezemsg) + snprintf(dd->freezemsg, dd->freezelen, + "{%s}", msg); + qib_disable_after_error(dd); + } +bail:; +} + +/** + * qib_7322_init_hwerrors - enable hardware errors + * @dd: the qlogic_ib device + * + * now that we have finished initializing everything that might reasonably + * cause a hardware error, and cleared those errors bits as they occur, + * we can enable hardware errors in the mask (potentially enabling + * freeze mode), and enable hardware errors as errors (along with + * everything else) in errormask + */ +static void qib_7322_init_hwerrors(struct qib_devdata *dd) +{ + int pidx; + u64 extsval; + + extsval = qib_read_kreg64(dd, kr_extstatus); + if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED | + QIB_EXTS_MEMBIST_ENDTEST))) + qib_dev_err(dd, "MemBIST did not complete!\n"); + + /* never clear BIST failure, so reported on each driver load */ + qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); + qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); + + /* clear all */ + qib_write_kreg(dd, kr_errclear, ~0ULL); + /* enable errors that are masked, at least this first time. */ + qib_write_kreg(dd, kr_errmask, ~0ULL); + dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].link_speed_supported) + qib_write_kreg_port(dd->pport + pidx, krp_errmask, + ~0ULL); +} + +/* + * Disable and enable the armlaunch error. Used for PIO bandwidth testing + * on chips that are count-based, rather than trigger-based. There is no + * reference counting, but that's also fine, given the intended use. + * Only chip-specific because it's all register accesses + */ +static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) +{ + if (enable) { + qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); + dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; + } else + dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; + qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); +} + +/* + * Formerly took parameter <which> in pre-shifted, + * pre-merged form with LinkCmd and LinkInitCmd + * together, and assuming the zero was NOP. + */ +static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, + u16 linitcmd) +{ + u64 mod_wd; + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { + /* + * If we are told to disable, note that so link-recovery + * code does not attempt to bring us back up. + * Also reset everything that we can, so we start + * completely clean when re-enabled (before we + * actually issue the disable to the IBC) + */ + qib_7322_mini_pcs_reset(ppd); + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { + /* + * Any other linkinitcmd will lead to LINKDOWN and then + * to INIT (if all is well), so clear flag to let + * link-recovery code attempt to bring us back up. + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + /* + * Clear status change interrupt reduction so the + * new state is seen. + */ + ppd->cpspec->ibcctrl_a &= + ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); + } + + mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) | + (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + + qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | + mod_wd); + /* write to chip to prevent back-to-back writes of ibc reg */ + qib_write_kreg(dd, kr_scratch, 0); + +} + +/* + * The total RCV buffer memory is 64KB, used for both ports, and is + * in units of 64 bytes (same as IB flow control credit unit). + * The consumedVL unit in the same registers are in 32 byte units! + * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks, + * and we can therefore allocate just 9 IB credits for 2 VL15 packets + * in krp_rxcreditvl15, rather than 10. + */ +#define RCV_BUF_UNITSZ 64 +#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) + +static void set_vls(struct qib_pportdata *ppd) +{ + int i, numvls, totcred, cred_vl, vl0extra; + struct qib_devdata *dd = ppd->dd; + u64 val; + + numvls = qib_num_vls(ppd->vls_operational); + + /* + * Set up per-VL credits. Below is kluge based on these assumptions: + * 1) port is disabled at the time early_init is called. + * 2) give VL15 17 credits, for two max-plausible packets. + * 3) Give VL0-N the rest, with any rounding excess used for VL0 + */ + /* 2 VL15 packets @ 288 bytes each (including IB headers) */ + totcred = NUM_RCV_BUF_UNITS(dd); + cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; + totcred -= cred_vl; + qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl); + cred_vl = totcred / numvls; + vl0extra = totcred - cred_vl * numvls; + qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra); + for (i = 1; i < numvls; i++) + qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl); + for (; i < 8; i++) /* no buffer space for other VLs */ + qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); + + /* Notify IBC that credits need to be recalculated */ + val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); + val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); + qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); + qib_write_kreg(dd, kr_scratch, 0ULL); + val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); + qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); + + for (i = 0; i < numvls; i++) + val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i); + val = qib_read_kreg_port(ppd, krp_rxcreditvl15); + + /* Change the number of operational VLs */ + ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & + ~SYM_MASK(IBCCtrlA_0, NumVLane)) | + ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); + qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); +} + +/* + * The code that deals with actual SerDes is in serdes_7322_init(). + * Compared to the code for iba7220, it is minimal. + */ +static int serdes_7322_init(struct qib_pportdata *ppd); + +/** + * qib_7322_bringup_serdes - bring up the serdes + * @ppd: physical port on the qlogic_ib device + */ +static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u64 val, guid, ibc; + unsigned long flags; + int ret = 0; + + /* + * SerDes model not in Pd, but still need to + * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere + * eventually. + */ + /* Put IBC in reset, sends disabled (should be in reset already) */ + ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); + qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); + + if (qib_compat_ddr_negotiate) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, + crp_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, + crp_iblinkerrrecov); + } + + /* flowcontrolwatermark is in units of KBytes */ + ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark); + /* + * Flow control is sent this often, even if no changes in + * buffer space occur. Units are 128ns for this chip. + * Set to 3usec. + */ + ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod); + /* max error tolerance */ + ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); + /* IB credit flow control. */ + ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold); + /* + * set initial max size pkt IBC will send, including ICRC; it's the + * PIO buffer size in dwords, less 1; also see qib_set_mtu() + */ + ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << + SYM_LSB(IBCCtrlA_0, MaxPktLen); + ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ + + /* initially come up waiting for TS1, without sending anything. */ + val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << + QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + + /* + * Reset the PCS interface to the serdes (and also ibc, which is still + * in reset from above). Writes new value of ibcctrl_a as last step. + */ + qib_7322_mini_pcs_reset(ppd); + qib_write_kreg(dd, kr_scratch, 0ULL); + + if (!ppd->cpspec->ibcctrl_b) { + unsigned lse = ppd->link_speed_enabled; + + /* + * Not on re-init after reset, establish shadow + * and force initial config. + */ + ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, + krp_ibcctrl_b); + ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | + IBA7322_IBC_SPEED_DDR | + IBA7322_IBC_SPEED_SDR | + IBA7322_IBC_WIDTH_AUTONEG | + SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED)); + if (lse & (lse - 1)) /* Muliple speeds enabled */ + ppd->cpspec->ibcctrl_b |= + (lse << IBA7322_IBC_SPEED_LSB) | + IBA7322_IBC_IBTA_1_2_MASK | + IBA7322_IBC_MAX_SPEED_MASK; + else + ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? + IBA7322_IBC_SPEED_QDR | + IBA7322_IBC_IBTA_1_2_MASK : + (lse == QIB_IB_DDR) ? + IBA7322_IBC_SPEED_DDR : + IBA7322_IBC_SPEED_SDR; + if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == + (IB_WIDTH_1X | IB_WIDTH_4X)) + ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; + else + ppd->cpspec->ibcctrl_b |= + ppd->link_width_enabled == IB_WIDTH_4X ? + IBA7322_IBC_WIDTH_4X_ONLY : + IBA7322_IBC_WIDTH_1X_ONLY; + + /* always enable these on driver reload, not sticky */ + ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | + IBA7322_IBC_HRTBT_MASK); + } + qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); + + /* setup so we have more time at CFGTEST to change H1 */ + val = qib_read_kreg_port(ppd, krp_ibcctrl_c); + val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH); + val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH); + qib_write_kreg_port(ppd, krp_ibcctrl_c, val); + + serdes_7322_init(ppd); + + guid = be64_to_cpu(ppd->guid); + if (!guid) { + if (dd->base_guid) + guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; + ppd->guid = cpu_to_be64(guid); + } + + qib_write_kreg_port(ppd, krp_hrtbt_guid, guid); + /* write to chip to prevent back-to-back writes of ibc reg */ + qib_write_kreg(dd, kr_scratch, 0); + + /* Enable port */ + ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); + set_vls(ppd); + + /* be paranoid against later code motion, etc. */ + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); + qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); + + /* Also enable IBSTATUSCHG interrupt. */ + val = qib_read_kreg_port(ppd, krp_errmask); + qib_write_kreg_port(ppd, krp_errmask, + val | ERR_MASK_N(IBStatusChanged)); + + /* Always zero until we start messing with SerDes for real */ + return ret; +} + +/** + * qib_7322_quiet_serdes - set serdes to txidle + * @dd: the qlogic_ib device + * Called when driver is being unloaded + */ +static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) +{ + u64 val; + unsigned long flags; + + qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + wake_up(&ppd->cpspec->autoneg_wait); + cancel_delayed_work(&ppd->cpspec->autoneg_work); + if (ppd->dd->cspec->r1) + cancel_delayed_work(&ppd->cpspec->ipg_work); + flush_scheduled_work(); + + ppd->cpspec->chase_end = 0; + if (ppd->cpspec->chase_timer.data) /* if initted */ + del_timer_sync(&ppd->cpspec->chase_timer); + + /* + * Despite the name, actually disables IBC as well. Do it when + * we are as sure as possible that no more packets can be + * received, following the down and the PCS reset. + * The actual disabling happens in qib_7322_mini_pci_reset(), + * along with the PCS being reset. + */ + ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); + qib_7322_mini_pcs_reset(ppd); + + /* + * Update the adjusted counters so the adjustment persists + * across driver reload. + */ + if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || + ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { + struct qib_devdata *dd = ppd->dd; + u64 diagc; + + /* enable counter writes */ + diagc = qib_read_kreg64(dd, kr_hwdiagctrl); + qib_write_kreg(dd, kr_hwdiagctrl, + diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); + + if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { + val = read_7322_creg32_port(ppd, crp_ibsymbolerr); + if (ppd->cpspec->ibdeltainprog) + val -= val - ppd->cpspec->ibsymsnap; + val -= ppd->cpspec->ibsymdelta; + write_7322_creg_port(ppd, crp_ibsymbolerr, val); + } + if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { + val = read_7322_creg32_port(ppd, crp_iblinkerrrecov); + if (ppd->cpspec->ibdeltainprog) + val -= val - ppd->cpspec->iblnkerrsnap; + val -= ppd->cpspec->iblnkerrdelta; + write_7322_creg_port(ppd, crp_iblinkerrrecov, val); + } + if (ppd->cpspec->iblnkdowndelta) { + val = read_7322_creg32_port(ppd, crp_iblinkdown); + val += ppd->cpspec->iblnkdowndelta; + write_7322_creg_port(ppd, crp_iblinkdown, val); + } + /* + * No need to save ibmalfdelta since IB perfcounters + * are cleared on driver reload. + */ + + /* and disable counter writes */ + qib_write_kreg(dd, kr_hwdiagctrl, diagc); + } +} + +/** + * qib_setup_7322_setextled - set the state of the two external LEDs + * @ppd: physical port on the qlogic_ib device + * @on: whether the link is up or not + * + * The exact combo of LEDs if on is true is determined by looking + * at the ibcstatus. + * + * These LEDs indicate the physical and logical state of IB link. + * For this chip (at least with recommended board pinouts), LED1 + * is Yellow (logical state) and LED2 is Green (physical state), + * + * Note: We try to match the Mellanox HCA LED behavior as best + * we can. Green indicates physical link state is OK (something is + * plugged in, and we can train). + * Amber indicates the link is logically up (ACTIVE). + * Mellanox further blinks the amber LED to indicate data packet + * activity, but we have no hardware support for that, so it would + * require waking up every 10-20 msecs and checking the counters + * on the chip, and then turning the LED off if appropriate. That's + * visible overhead, so not something we will do. + */ +static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) +{ + struct qib_devdata *dd = ppd->dd; + u64 extctl, ledblink = 0, val; + unsigned long flags; + int yel, grn; + + /* + * The diags use the LED to indicate diag info, so we leave + * the external LED alone when the diags are running. + */ + if (dd->diag_client) + return; + + /* Allow override of LED display for, e.g. Locating system in rack */ + if (ppd->led_override) { + grn = (ppd->led_override & QIB_LED_PHYS); + yel = (ppd->led_override & QIB_LED_LOG); + } else if (on) { + val = qib_read_kreg_port(ppd, krp_ibcstatus_a); + grn = qib_7322_phys_portstate(val) == + IB_PHYSPORTSTATE_LINKUP; + yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE; + } else { + grn = 0; + yel = 0; + } + + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + extctl = dd->cspec->extctrl & (ppd->port == 1 ? + ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK); + if (grn) { + extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; + /* + * Counts are in chip clock (4ns) periods. + * This is 1/16 sec (66.6ms) on, + * 3/16 sec (187.5 ms) off, with packets rcvd. + */ + ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) | + ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT); + } + if (yel) + extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; + dd->cspec->extctrl = extctl; + qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + + if (ledblink) /* blink the LED on packet receive */ + qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); +} + +/* + * Disable MSIx interrupt if enabled, call generic MSIx code + * to cleanup, and clear pending MSIx interrupts. + * Used for fallback to INTx, after reset, and when MSIx setup fails. + */ +static void qib_7322_nomsix(struct qib_devdata *dd) +{ + u64 intgranted; + int n; + + dd->cspec->main_int_mask = ~0ULL; + n = dd->cspec->num_msix_entries; + if (n) { + int i; + + dd->cspec->num_msix_entries = 0; + for (i = 0; i < n; i++) + free_irq(dd->cspec->msix_entries[i].vector, + dd->cspec->msix_arg[i]); + qib_nomsix(dd); + } + /* make sure no MSIx interrupts are left pending */ + intgranted = qib_read_kreg64(dd, kr_intgranted); + if (intgranted) + qib_write_kreg(dd, kr_intgranted, intgranted); +} + +static void qib_7322_free_irq(struct qib_devdata *dd) +{ + if (dd->cspec->irq) { + free_irq(dd->cspec->irq, dd); + dd->cspec->irq = 0; + } + qib_7322_nomsix(dd); +} + +static void qib_setup_7322_cleanup(struct qib_devdata *dd) +{ + int i; + + qib_7322_free_irq(dd); + kfree(dd->cspec->cntrs); + kfree(dd->cspec->sendchkenable); + kfree(dd->cspec->sendgrhchk); + kfree(dd->cspec->sendibchk); + kfree(dd->cspec->msix_entries); + kfree(dd->cspec->msix_arg); + for (i = 0; i < dd->num_pports; i++) { + unsigned long flags; + u32 mask = QSFP_GPIO_MOD_PRS_N | + (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT); + + kfree(dd->pport[i].cpspec->portcntrs); + if (dd->flags & QIB_HAS_QSFP) { + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + dd->cspec->gpio_mask &= ~mask; + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); + } + if (dd->pport[i].ibport_data.smi_ah) + ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); + } +} + +/* handle SDMA interrupts */ +static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) +{ + struct qib_pportdata *ppd0 = &dd->pport[0]; + struct qib_pportdata *ppd1 = &dd->pport[1]; + u64 intr0 = istat & (INT_MASK_P(SDma, 0) | + INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0)); + u64 intr1 = istat & (INT_MASK_P(SDma, 1) | + INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1)); + + if (intr0) + qib_sdma_intr(ppd0); + if (intr1) + qib_sdma_intr(ppd1); + + if (istat & INT_MASK_PM(SDmaCleanupDone, 0)) + qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started); + if (istat & INT_MASK_PM(SDmaCleanupDone, 1)) + qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started); +} + +/* + * Set or clear the Send buffer available interrupt enable bit. + */ +static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (needint) + dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); + else + dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0ULL); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); +} + +/* + * Somehow got an interrupt with reserved bits set in interrupt status. + * Print a message so we know it happened, then clear them. + * keep mainline interrupt handler cache-friendly + */ +static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) +{ + u64 kills; + char msg[128]; + + kills = istat & ~QIB_I_BITSEXTANT; + qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" + " %s\n", (unsigned long long) kills, msg); + qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); +} + +/* keep mainline interrupt handler cache-friendly */ +static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) +{ + u32 gpiostatus; + int handled = 0; + int pidx; + + /* + * Boards for this chip currently don't use GPIO interrupts, + * so clear by writing GPIOstatus to GPIOclear, and complain + * to developer. To avoid endless repeats, clear + * the bits in the mask, since there is some kind of + * programming error or chip problem. + */ + gpiostatus = qib_read_kreg32(dd, kr_gpio_status); + /* + * In theory, writing GPIOstatus to GPIOclear could + * have a bad side-effect on some diagnostic that wanted + * to poll for a status-change, but the various shadows + * make that problematic at best. Diags will just suppress + * all GPIO interrupts during such tests. + */ + qib_write_kreg(dd, kr_gpio_clear, gpiostatus); + /* + * Check for QSFP MOD_PRS changes + * only works for single port if IB1 != pidx1 + */ + for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); + ++pidx) { + struct qib_pportdata *ppd; + struct qib_qsfp_data *qd; + u32 mask; + if (!dd->pport[pidx].link_speed_supported) + continue; + mask = QSFP_GPIO_MOD_PRS_N; + ppd = dd->pport + pidx; + mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); + if (gpiostatus & dd->cspec->gpio_mask & mask) { + u64 pins; + qd = &ppd->cpspec->qsfp_data; + gpiostatus &= ~mask; + pins = qib_read_kreg64(dd, kr_extstatus); + pins >>= SYM_LSB(EXTStatus, GPIOIn); + if (!(pins & mask)) { + ++handled; + qd->t_insert = get_jiffies_64(); + schedule_work(&qd->work); + } + } + } + + if (gpiostatus && !handled) { + const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); + u32 gpio_irq = mask & gpiostatus; + + /* + * Clear any troublemakers, and update chip from shadow + */ + dd->cspec->gpio_mask &= ~gpio_irq; + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + } +} + +/* + * Handle errors and unusual events first, separate function + * to improve cache hits for fast path interrupt handling. + */ +static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) +{ + if (istat & ~QIB_I_BITSEXTANT) + unknown_7322_ibits(dd, istat); + if (istat & QIB_I_GPIO) + unknown_7322_gpio_intr(dd); + if (istat & QIB_I_C_ERROR) + handle_7322_errors(dd); + if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) + handle_7322_p_errors(dd->rcd[0]->ppd); + if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) + handle_7322_p_errors(dd->rcd[1]->ppd); +} + +/* + * Dynamically adjust the rcv int timeout for a context based on incoming + * packet rate. + */ +static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts) +{ + struct qib_devdata *dd = rcd->dd; + u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; + + /* + * Dynamically adjust idle timeout on chip + * based on number of packets processed. + */ + if (npkts < rcv_int_count && timeout > 2) + timeout >>= 1; + else if (npkts >= rcv_int_count && timeout < rcv_int_timeout) + timeout = min(timeout << 1, rcv_int_timeout); + else + return; + + dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; + qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); +} + +/* + * This is the main interrupt handler. + * It will normally only be used for low frequency interrupts but may + * have to handle all interrupts if INTx is enabled or fewer than normal + * MSIx interrupts were allocated. + * This routine should ignore the interrupt bits for any of the + * dedicated MSIx handlers. + */ +static irqreturn_t qib_7322intr(int irq, void *data) +{ + struct qib_devdata *dd = data; + irqreturn_t ret; + u64 istat; + u64 ctxtrbits; + u64 rmask; + unsigned i; + u32 npkts; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + ret = IRQ_HANDLED; + goto bail; + } + + istat = qib_read_kreg64(dd, kr_intstatus); + + if (unlikely(istat == ~0ULL)) { + qib_bad_intrstatus(dd); + qib_dev_err(dd, "Interrupt status all f's, skipping\n"); + /* don't know if it was our interrupt or not */ + ret = IRQ_NONE; + goto bail; + } + + istat &= dd->cspec->main_int_mask; + if (unlikely(!istat)) { + /* already handled, or shared and not us */ + ret = IRQ_NONE; + goto bail; + } + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* handle "errors" of various kinds first, device ahead of port */ + if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | + QIB_I_C_ERROR | INT_MASK_P(Err, 0) | + INT_MASK_P(Err, 1)))) + unlikely_7322_intr(dd, istat); + + /* + * Clear the interrupt bits we found set, relatively early, so we + * "know" know the chip will have seen this by the time we process + * the queue, and will re-interrupt if necessary. The processor + * itself won't take the interrupt again until we return. + */ + qib_write_kreg(dd, kr_intclear, istat); + + /* + * Handle kernel receive queues before checking for pio buffers + * available since receives can overflow; piobuf waiters can afford + * a few extra cycles, since they were waiting anyway. + */ + ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK); + if (ctxtrbits) { + rmask = (1ULL << QIB_I_RCVAVAIL_LSB) | + (1ULL << QIB_I_RCVURG_LSB); + for (i = 0; i < dd->first_user_ctxt; i++) { + if (ctxtrbits & rmask) { + ctxtrbits &= ~rmask; + if (dd->rcd[i]) { + qib_kreceive(dd->rcd[i], NULL, &npkts); + adjust_rcv_timeout(dd->rcd[i], npkts); + } + } + rmask <<= 1; + } + if (ctxtrbits) { + ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) | + (ctxtrbits >> QIB_I_RCVURG_LSB); + qib_handle_urcv(dd, ctxtrbits); + } + } + + if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1))) + sdma_7322_intr(dd, istat); + + if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) + qib_ib_piobufavail(dd); + + ret = IRQ_HANDLED; +bail: + return ret; +} + +/* + * Dedicated receive packet available interrupt handler. + */ +static irqreturn_t qib_7322pintr(int irq, void *data) +{ + struct qib_ctxtdata *rcd = data; + struct qib_devdata *dd = rcd->dd; + u32 npkts; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | + (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); + + qib_kreceive(rcd, NULL, &npkts); + adjust_rcv_timeout(rcd, npkts); + + return IRQ_HANDLED; +} + +/* + * Dedicated Send buffer available interrupt handler. + */ +static irqreturn_t qib_7322bufavail(int irq, void *data) +{ + struct qib_devdata *dd = data; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); + + /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */ + if (dd->flags & QIB_INITTED) + qib_ib_piobufavail(dd); + else + qib_wantpiobuf_7322_intr(dd, 0); + + return IRQ_HANDLED; +} + +/* + * Dedicated Send DMA interrupt handler. + */ +static irqreturn_t sdma_intr(int irq, void *data) +{ + struct qib_pportdata *ppd = data; + struct qib_devdata *dd = ppd->dd; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? + INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); + qib_sdma_intr(ppd); + + return IRQ_HANDLED; +} + +/* + * Dedicated Send DMA idle interrupt handler. + */ +static irqreturn_t sdma_idle_intr(int irq, void *data) +{ + struct qib_pportdata *ppd = data; + struct qib_devdata *dd = ppd->dd; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? + INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); + qib_sdma_intr(ppd); + + return IRQ_HANDLED; +} + +/* + * Dedicated Send DMA progress interrupt handler. + */ +static irqreturn_t sdma_progress_intr(int irq, void *data) +{ + struct qib_pportdata *ppd = data; + struct qib_devdata *dd = ppd->dd; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? + INT_MASK_P(SDmaProgress, 1) : + INT_MASK_P(SDmaProgress, 0)); + qib_sdma_intr(ppd); + + return IRQ_HANDLED; +} + +/* + * Dedicated Send DMA cleanup interrupt handler. + */ +static irqreturn_t sdma_cleanup_intr(int irq, void *data) +{ + struct qib_pportdata *ppd = data; + struct qib_devdata *dd = ppd->dd; + + if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) + /* + * This return value is not great, but we do not want the + * interrupt core code to remove our interrupt handler + * because we don't appear to be handling an interrupt + * during a chip reset. + */ + return IRQ_HANDLED; + + qib_stats.sps_ints++; + if (dd->int_counter != (u32) -1) + dd->int_counter++; + + /* Clear the interrupt bit we expect to be set. */ + qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? + INT_MASK_PM(SDmaCleanupDone, 1) : + INT_MASK_PM(SDmaCleanupDone, 0)); + qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); + + return IRQ_HANDLED; +} + +/* + * Set up our chip-specific interrupt handler. + * The interrupt type has already been setup, so + * we just need to do the registration and error checking. + * If we are using MSIx interrupts, we may fall back to + * INTx later, if the interrupt handler doesn't get called + * within 1/2 second (see verify_interrupt()). + */ +static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) +{ + int ret, i, msixnum; + u64 redirect[6]; + u64 mask; + + if (!dd->num_pports) + return; + + if (clearpend) { + /* + * if not switching interrupt types, be sure interrupts are + * disabled, and then clear anything pending at this point, + * because we are starting clean. + */ + qib_7322_set_intr_state(dd, 0); + + /* clear the reset error, init error/hwerror mask */ + qib_7322_init_hwerrors(dd); + + /* clear any interrupt bits that might be set */ + qib_write_kreg(dd, kr_intclear, ~0ULL); + + /* make sure no pending MSIx intr, and clear diag reg */ + qib_write_kreg(dd, kr_intgranted, ~0ULL); + qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); + } + + if (!dd->cspec->num_msix_entries) { + /* Try to get INTx interrupt */ +try_intx: + if (!dd->pcidev->irq) { + qib_dev_err(dd, "irq is 0, BIOS error? " + "Interrupts won't work\n"); + goto bail; + } + ret = request_irq(dd->pcidev->irq, qib_7322intr, + IRQF_SHARED, QIB_DRV_NAME, dd); + if (ret) { + qib_dev_err(dd, "Couldn't setup INTx " + "interrupt (irq=%d): %d\n", + dd->pcidev->irq, ret); + goto bail; + } + dd->cspec->irq = dd->pcidev->irq; + dd->cspec->main_int_mask = ~0ULL; + goto bail; + } + + /* Try to get MSIx interrupts */ + memset(redirect, 0, sizeof redirect); + mask = ~0ULL; + msixnum = 0; + for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { + irq_handler_t handler; + const char *name; + void *arg; + u64 val; + int lsb, reg, sh; + + if (i < ARRAY_SIZE(irq_table)) { + if (irq_table[i].port) { + /* skip if for a non-configured port */ + if (irq_table[i].port > dd->num_pports) + continue; + arg = dd->pport + irq_table[i].port - 1; + } else + arg = dd; + lsb = irq_table[i].lsb; + handler = irq_table[i].handler; + name = irq_table[i].name; + } else { + unsigned ctxt; + + ctxt = i - ARRAY_SIZE(irq_table); + /* per krcvq context receive interrupt */ + arg = dd->rcd[ctxt]; + if (!arg) + continue; + lsb = QIB_I_RCVAVAIL_LSB + ctxt; + handler = qib_7322pintr; + name = QIB_DRV_NAME " (kctx)"; + } + ret = request_irq(dd->cspec->msix_entries[msixnum].vector, + handler, 0, name, arg); + if (ret) { + /* + * Shouldn't happen since the enable said we could + * have as many as we are trying to setup here. + */ + qib_dev_err(dd, "Couldn't setup MSIx " + "interrupt (vec=%d, irq=%d): %d\n", msixnum, + dd->cspec->msix_entries[msixnum].vector, + ret); + qib_7322_nomsix(dd); + goto try_intx; + } + dd->cspec->msix_arg[msixnum] = arg; + if (lsb >= 0) { + reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; + sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * + SYM_LSB(IntRedirect0, vec1); + mask &= ~(1ULL << lsb); + redirect[reg] |= ((u64) msixnum) << sh; + } + val = qib_read_kreg64(dd, 2 * msixnum + 1 + + (QIB_7322_MsixTable_OFFS / sizeof(u64))); + msixnum++; + } + /* Initialize the vector mapping */ + for (i = 0; i < ARRAY_SIZE(redirect); i++) + qib_write_kreg(dd, kr_intredirect + i, redirect[i]); + dd->cspec->main_int_mask = mask; +bail:; +} + +/** + * qib_7322_boardname - fill in the board name and note features + * @dd: the qlogic_ib device + * + * info will be based on the board revision register + */ +static unsigned qib_7322_boardname(struct qib_devdata *dd) +{ + /* Will need enumeration of board-types here */ + char *n; + u32 boardid, namelen; + unsigned features = DUAL_PORT_CAP; + + boardid = SYM_FIELD(dd->revision, Revision, BoardID); + + switch (boardid) { + case 0: + n = "InfiniPath_QLE7342_Emulation"; + break; + case 1: + n = "InfiniPath_QLE7340"; + dd->flags |= QIB_HAS_QSFP; + features = PORT_SPD_CAP; + break; + case 2: + n = "InfiniPath_QLE7342"; + dd->flags |= QIB_HAS_QSFP; + break; + case 3: + n = "InfiniPath_QMI7342"; + break; + case 4: + n = "InfiniPath_Unsupported7342"; + qib_dev_err(dd, "Unsupported version of QMH7342\n"); + features = 0; + break; + case BOARD_QMH7342: + n = "InfiniPath_QMH7342"; + features = 0x24; + break; + case BOARD_QME7342: + n = "InfiniPath_QME7342"; + break; + case 15: + n = "InfiniPath_QLE7342_TEST"; + dd->flags |= QIB_HAS_QSFP; + break; + default: + n = "InfiniPath_QLE73xy_UNKNOWN"; + qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); + break; + } + dd->board_atten = 1; /* index into txdds_Xdr */ + + namelen = strlen(n) + 1; + dd->boardname = kmalloc(namelen, GFP_KERNEL); + if (!dd->boardname) + qib_dev_err(dd, "Failed allocation for board name: %s\n", n); + else + snprintf(dd->boardname, namelen, "%s", n); + + snprintf(dd->boardversion, sizeof(dd->boardversion), + "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", + QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, + (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), + dd->majrev, dd->minrev, + (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); + + if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { + qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" + " by module parameter\n", dd->unit); + features &= PORT_SPD_CAP; + } + + return features; +} + +/* + * This routine sleeps, so it can only be called from user context, not + * from interrupt context. + */ +static int qib_do_7322_reset(struct qib_devdata *dd) +{ + u64 val; + u64 *msix_vecsave; + int i, msix_entries, ret = 1; + u16 cmdval; + u8 int_line, clinesz; + unsigned long flags; + + /* Use dev_err so it shows up in logs, etc. */ + qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); + + qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); + + msix_entries = dd->cspec->num_msix_entries; + + /* no interrupts till re-initted */ + qib_7322_set_intr_state(dd, 0); + + if (msix_entries) { + qib_7322_nomsix(dd); + /* can be up to 512 bytes, too big for stack */ + msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * + sizeof(u64), GFP_KERNEL); + if (!msix_vecsave) + qib_dev_err(dd, "No mem to save MSIx data\n"); + } else + msix_vecsave = NULL; + + /* + * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector + * info that is set up by the BIOS, so we have to save and restore + * it ourselves. There is some risk something could change it, + * after we save it, but since we have disabled the MSIx, it + * shouldn't be touched... + */ + for (i = 0; i < msix_entries; i++) { + u64 vecaddr, vecdata; + vecaddr = qib_read_kreg64(dd, 2 * i + + (QIB_7322_MsixTable_OFFS / sizeof(u64))); + vecdata = qib_read_kreg64(dd, 1 + 2 * i + + (QIB_7322_MsixTable_OFFS / sizeof(u64))); + if (msix_vecsave) { + msix_vecsave[2 * i] = vecaddr; + /* save it without the masked bit set */ + msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL; + } + } + + dd->pport->cpspec->ibdeltainprog = 0; + dd->pport->cpspec->ibsymdelta = 0; + dd->pport->cpspec->iblnkerrdelta = 0; + dd->pport->cpspec->ibmalfdelta = 0; + dd->int_counter = 0; /* so we check interrupts work again */ + + /* + * Keep chip from being accessed until we are ready. Use + * writeq() directly, to allow the write even though QIB_PRESENT + * isnt' set. + */ + dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); + dd->flags |= QIB_DOING_RESET; + val = dd->control | QLOGIC_IB_C_RESET; + writeq(val, &dd->kregbase[kr_control]); + + for (i = 1; i <= 5; i++) { + /* + * Allow MBIST, etc. to complete; longer on each retry. + * We sometimes get machine checks from bus timeout if no + * response, so for now, make it *really* long. + */ + msleep(1000 + (1 + i) * 3000); + + qib_pcie_reenable(dd, cmdval, int_line, clinesz); + + /* + * Use readq directly, so we don't need to mark it as PRESENT + * until we get a successful indication that all is well. + */ + val = readq(&dd->kregbase[kr_revision]); + if (val == dd->revision) + break; + if (i == 5) { + qib_dev_err(dd, "Failed to initialize after reset, " + "unusable\n"); + ret = 0; + goto bail; + } + } + + dd->flags |= QIB_PRESENT; /* it's back */ + + if (msix_entries) { + /* restore the MSIx vector address and data if saved above */ + for (i = 0; i < msix_entries; i++) { + dd->cspec->msix_entries[i].entry = i; + if (!msix_vecsave || !msix_vecsave[2 * i]) + continue; + qib_write_kreg(dd, 2 * i + + (QIB_7322_MsixTable_OFFS / sizeof(u64)), + msix_vecsave[2 * i]); + qib_write_kreg(dd, 1 + 2 * i + + (QIB_7322_MsixTable_OFFS / sizeof(u64)), + msix_vecsave[1 + 2 * i]); + } + } + + /* initialize the remaining registers. */ + for (i = 0; i < dd->num_pports; ++i) + write_7322_init_portregs(&dd->pport[i]); + write_7322_initregs(dd); + + if (qib_pcie_params(dd, dd->lbus_width, + &dd->cspec->num_msix_entries, + dd->cspec->msix_entries)) + qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " + "continuing anyway\n"); + + qib_setup_7322_interrupt(dd, 1); + + for (i = 0; i < dd->num_pports; ++i) { + struct qib_pportdata *ppd = &dd->pport[i]; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_FORCE_NOTIFY; + ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } + +bail: + dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ + kfree(msix_vecsave); + return ret; +} + +/** + * qib_7322_put_tid - write a TID to the chip + * @dd: the qlogic_ib device + * @tidptr: pointer to the expected TID (in chip) to update + * @tidtype: 0 for eager, 1 for expected + * @pa: physical address of in memory buffer; tidinvalid if freeing + */ +static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, + u32 type, unsigned long pa) +{ + if (!(dd->flags & QIB_PRESENT)) + return; + if (pa != dd->tidinvalid) { + u64 chippa = pa >> IBA7322_TID_PA_SHIFT; + + /* paranoia checks */ + if (pa != (chippa << IBA7322_TID_PA_SHIFT)) { + qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", + pa); + return; + } + if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { + qib_dev_err(dd, "Physical page address 0x%lx " + "larger than supported\n", pa); + return; + } + + if (type == RCVHQ_RCV_TYPE_EAGER) + chippa |= dd->tidtemplate; + else /* for now, always full 4KB page */ + chippa |= IBA7322_TID_SZ_4K; + pa = chippa; + } + writeq(pa, tidptr); + mmiowb(); +} + +/** + * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager + * @dd: the qlogic_ib device + * @ctxt: the ctxt + * + * clear all TID entries for a ctxt, expected and eager. + * Used from qib_close(). + */ +static void qib_7322_clear_tids(struct qib_devdata *dd, + struct qib_ctxtdata *rcd) +{ + u64 __iomem *tidbase; + unsigned long tidinv; + u32 ctxt; + int i; + + if (!dd->kregbase || !rcd) + return; + + ctxt = rcd->ctxt; + + tidinv = dd->tidinvalid; + tidbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + + dd->rcvtidbase + + ctxt * dd->rcvtidcnt * sizeof(*tidbase)); + + for (i = 0; i < dd->rcvtidcnt; i++) + qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, + tidinv); + + tidbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + + dd->rcvegrbase + + rcd->rcvegr_tid_base * sizeof(*tidbase)); + + for (i = 0; i < rcd->rcvegrcnt; i++) + qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, + tidinv); +} + +/** + * qib_7322_tidtemplate - setup constants for TID updates + * @dd: the qlogic_ib device + * + * We setup stuff that we use a lot, to avoid calculating each time + */ +static void qib_7322_tidtemplate(struct qib_devdata *dd) +{ + /* + * For now, we always allocate 4KB buffers (at init) so we can + * receive max size packets. We may want a module parameter to + * specify 2KB or 4KB and/or make it per port instead of per device + * for those who want to reduce memory footprint. Note that the + * rcvhdrentsize size must be large enough to hold the largest + * IB header (currently 96 bytes) that we expect to handle (plus of + * course the 2 dwords of RHF). + */ + if (dd->rcvegrbufsize == 2048) + dd->tidtemplate = IBA7322_TID_SZ_2K; + else if (dd->rcvegrbufsize == 4096) + dd->tidtemplate = IBA7322_TID_SZ_4K; + dd->tidinvalid = 0; +} + +/** + * qib_init_7322_get_base_info - set chip-specific flags for user code + * @rcd: the qlogic_ib ctxt + * @kbase: qib_base_info pointer + * + * We set the PCIE flag because the lower bandwidth on PCIe vs + * HyperTransport can affect some user packet algorithims. + */ + +static int qib_7322_get_base_info(struct qib_ctxtdata *rcd, + struct qib_base_info *kinfo) +{ + kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | + QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | + QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA; + if (rcd->dd->cspec->r1) + kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; + if (rcd->dd->flags & QIB_USE_SPCL_TRIG) + kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; + + return 0; +} + +static struct qib_message_header * +qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) +{ + u32 offset = qib_hdrget_offset(rhf_addr); + + return (struct qib_message_header *) + (rhf_addr - dd->rhf_offset + offset); +} + +/* + * Configure number of contexts. + */ +static void qib_7322_config_ctxts(struct qib_devdata *dd) +{ + unsigned long flags; + u32 nchipctxts; + + nchipctxts = qib_read_kreg32(dd, kr_contextcnt); + dd->cspec->numctxts = nchipctxts; + if (qib_n_krcv_queues > 1 && dd->num_pports) { + /* + * Set the mask for which bits from the QPN are used + * to select a context number. + */ + dd->qpn_mask = 0x3f; + dd->first_user_ctxt = NUM_IB_PORTS + + (qib_n_krcv_queues - 1) * dd->num_pports; + if (dd->first_user_ctxt > nchipctxts) + dd->first_user_ctxt = nchipctxts; + dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; + } else { + dd->first_user_ctxt = NUM_IB_PORTS; + dd->n_krcv_queues = 1; + } + + if (!qib_cfgctxts) { + int nctxts = dd->first_user_ctxt + num_online_cpus(); + + if (nctxts <= 6) + dd->ctxtcnt = 6; + else if (nctxts <= 10) + dd->ctxtcnt = 10; + else if (nctxts <= nchipctxts) + dd->ctxtcnt = nchipctxts; + } else if (qib_cfgctxts < dd->num_pports) + dd->ctxtcnt = dd->num_pports; + else if (qib_cfgctxts <= nchipctxts) + dd->ctxtcnt = qib_cfgctxts; + if (!dd->ctxtcnt) /* none of the above, set to max */ + dd->ctxtcnt = nchipctxts; + + /* + * Chip can be configured for 6, 10, or 18 ctxts, and choice + * affects number of eager TIDs per ctxt (1K, 2K, 4K). + * Lock to be paranoid about later motion, etc. + */ + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + if (dd->ctxtcnt > 10) + dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); + else if (dd->ctxtcnt > 6) + dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); + /* else configure for default 6 receive ctxts */ + + /* The XRC opcode is 5. */ + dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); + + /* + * RcvCtrl *must* be written here so that the + * chip understands how to change rcvegrcnt below. + */ + qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); + + /* kr_rcvegrcnt changes based on the number of contexts enabled */ + dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); + dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, + dd->num_pports > 1 ? 1024U : 2048U); +} + +static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) +{ + + int lsb, ret = 0; + u64 maskr; /* right-justified mask */ + + switch (which) { + + case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ + ret = ppd->link_width_enabled; + goto done; + + case QIB_IB_CFG_LWID: /* Get currently active Link-width */ + ret = ppd->link_width_active; + goto done; + + case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ + ret = ppd->link_speed_enabled; + goto done; + + case QIB_IB_CFG_SPD: /* Get current Link spd */ + ret = ppd->link_speed_active; + goto done; + + case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ + lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); + maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); + break; + + case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ + lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); + maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); + break; + + case QIB_IB_CFG_LINKLATENCY: + ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) & + SYM_MASK(IBCStatusB_0, LinkRoundTripLatency); + goto done; + + case QIB_IB_CFG_OP_VLS: + ret = ppd->vls_operational; + goto done; + + case QIB_IB_CFG_VL_HIGH_CAP: + ret = 16; + goto done; + + case QIB_IB_CFG_VL_LOW_CAP: + ret = 16; + goto done; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, + OverrunThreshold); + goto done; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, + PhyerrThreshold); + goto done; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + ret = (ppd->cpspec->ibcctrl_a & + SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ? + IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; + goto done; + + case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ + lsb = IBA7322_IBC_HRTBT_LSB; + maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ + break; + + case QIB_IB_CFG_PMA_TICKS: + /* + * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs + * Since the clock is always 250MHz, the value is 3, 1 or 0. + */ + if (ppd->link_speed_active == QIB_IB_QDR) + ret = 3; + else if (ppd->link_speed_active == QIB_IB_DDR) + ret = 1; + else + ret = 0; + goto done; + + default: + ret = -EINVAL; + goto done; + } + ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); +done: + return ret; +} + +/* + * Below again cribbed liberally from older version. Do not lean + * heavily on it. + */ +#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB +#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \ + | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16)) + +static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) +{ + struct qib_devdata *dd = ppd->dd; + u64 maskr; /* right-justified mask */ + int lsb, ret = 0; + u16 lcmd, licmd; + unsigned long flags; + + switch (which) { + case QIB_IB_CFG_LIDLMC: + /* + * Set LID and LMC. Combined to avoid possible hazard + * caller puts LMC in 16MSbits, DLID in 16LSbits of val + */ + lsb = IBA7322_IBC_DLIDLMC_SHIFT; + maskr = IBA7322_IBC_DLIDLMC_MASK; + /* + * For header-checking, the SLID in the packet will + * be masked with SendIBSLMCMask, and compared + * with SendIBSLIDAssignMask. Make sure we do not + * set any bits not covered by the mask, or we get + * false-positives. + */ + qib_write_kreg_port(ppd, krp_sendslid, + val & (val >> 16) & SendIBSLIDAssignMask); + qib_write_kreg_port(ppd, krp_sendslidmask, + (val >> 16) & SendIBSLMCMask); + break; + + case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ + ppd->link_width_enabled = val; + /* convert IB value to chip register value */ + if (val == IB_WIDTH_1X) + val = 0; + else if (val == IB_WIDTH_4X) + val = 1; + else + val = 3; + maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS); + lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS); + break; + + case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ + /* + * As with width, only write the actual register if the + * link is currently down, otherwise takes effect on next + * link change. Since setting is being explictly requested + * (via MAD or sysfs), clear autoneg failure status if speed + * autoneg is enabled. + */ + ppd->link_speed_enabled = val; + val <<= IBA7322_IBC_SPEED_LSB; + maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | + IBA7322_IBC_MAX_SPEED_MASK; + if (val & (val - 1)) { + /* Muliple speeds enabled */ + val |= IBA7322_IBC_IBTA_1_2_MASK | + IBA7322_IBC_MAX_SPEED_MASK; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } else if (val & IBA7322_IBC_SPEED_QDR) + val |= IBA7322_IBC_IBTA_1_2_MASK; + /* IBTA 1.2 mode + min/max + speed bits are contiguous */ + lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE); + break; + + case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ + lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); + maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); + break; + + case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ + lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); + maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); + break; + + case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ + maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, + OverrunThreshold); + if (maskr != val) { + ppd->cpspec->ibcctrl_a &= + ~SYM_MASK(IBCCtrlA_0, OverrunThreshold); + ppd->cpspec->ibcctrl_a |= (u64) val << + SYM_LSB(IBCCtrlA_0, OverrunThreshold); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); + } + goto bail; + + case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ + maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, + PhyerrThreshold); + if (maskr != val) { + ppd->cpspec->ibcctrl_a &= + ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold); + ppd->cpspec->ibcctrl_a |= (u64) val << + SYM_LSB(IBCCtrlA_0, PhyerrThreshold); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); + } + goto bail; + + case QIB_IB_CFG_PKEYS: /* update pkeys */ + maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | + ((u64) ppd->pkeys[2] << 32) | + ((u64) ppd->pkeys[3] << 48); + qib_write_kreg_port(ppd, krp_partitionkey, maskr); + goto bail; + + case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ + /* will only take effect when the link state changes */ + if (val == IB_LINKINITCMD_POLL) + ppd->cpspec->ibcctrl_a &= + ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); + else /* SLEEP */ + ppd->cpspec->ibcctrl_a |= + SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); + qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); + goto bail; + + case QIB_IB_CFG_MTU: /* update the MTU in IBC */ + /* + * Update our housekeeping variables, and set IBC max + * size, same as init code; max IBC is max we allow in + * buffer, less the qword pbc, plus 1 for ICRC, in dwords + * Set even if it's unchanged, print debug message only + * on changes. + */ + val = (ppd->ibmaxlen >> 2) + 1; + ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); + ppd->cpspec->ibcctrl_a |= (u64)val << + SYM_LSB(IBCCtrlA_0, MaxPktLen); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); + goto bail; + + case QIB_IB_CFG_LSTATE: /* set the IB link state */ + switch (val & 0xffff0000) { + case IB_LINKCMD_DOWN: + lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; + ppd->cpspec->ibmalfusesnap = 1; + ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, + crp_errlink); + if (!ppd->cpspec->ibdeltainprog && + qib_compat_ddr_negotiate) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = + read_7322_creg32_port(ppd, + crp_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = + read_7322_creg32_port(ppd, + crp_iblinkerrrecov); + } + break; + + case IB_LINKCMD_ARMED: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; + if (ppd->cpspec->ibmalfusesnap) { + ppd->cpspec->ibmalfusesnap = 0; + ppd->cpspec->ibmalfdelta += + read_7322_creg32_port(ppd, + crp_errlink) - + ppd->cpspec->ibmalfsnap; + } + break; + + case IB_LINKCMD_ACTIVE: + lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); + goto bail; + } + switch (val & 0xffff) { + case IB_LINKINITCMD_NOP: + licmd = 0; + break; + + case IB_LINKINITCMD_POLL: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; + break; + + case IB_LINKINITCMD_SLEEP: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; + break; + + case IB_LINKINITCMD_DISABLE: + licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; + ppd->cpspec->chase_end = 0; + /* + * stop state chase counter and timer, if running. + * wait forpending timer, but don't clear .data (ppd)! + */ + if (ppd->cpspec->chase_timer.expires) { + del_timer_sync(&ppd->cpspec->chase_timer); + ppd->cpspec->chase_timer.expires = 0; + } + break; + + default: + ret = -EINVAL; + qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", + val & 0xffff); + goto bail; + } + qib_set_ib_7322_lstate(ppd, lcmd, licmd); + goto bail; + + case QIB_IB_CFG_OP_VLS: + if (ppd->vls_operational != val) { + ppd->vls_operational = val; + set_vls(ppd); + } + goto bail; + + case QIB_IB_CFG_VL_HIGH_LIMIT: + qib_write_kreg_port(ppd, krp_highprio_limit, val); + goto bail; + + case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ + if (val > 3) { + ret = -EINVAL; + goto bail; + } + lsb = IBA7322_IBC_HRTBT_LSB; + maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ + break; + + case QIB_IB_CFG_PORT: + /* val is the port number of the switch we are connected to. */ + if (ppd->dd->cspec->r1) { + cancel_delayed_work(&ppd->cpspec->ipg_work); + ppd->cpspec->ipg_tries = 0; + } + goto bail; + + default: + ret = -EINVAL; + goto bail; + } + ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); + ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); + qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); + qib_write_kreg(dd, kr_scratch, 0); +bail: + return ret; +} + +static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) +{ + int ret = 0; + u64 val, ctrlb; + + /* only IBC loopback, may add serdes and xgxs loopbacks later */ + if (!strncmp(what, "ibc", 3)) { + ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, + Loopback); + val = 0; /* disable heart beat, so link will come up */ + qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", + ppd->dd->unit, ppd->port); + } else if (!strncmp(what, "off", 3)) { + ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, + Loopback); + /* enable heart beat again */ + val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; + qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " + "(normal)\n", ppd->dd->unit, ppd->port); + } else + ret = -EINVAL; + if (!ret) { + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK + << IBA7322_IBC_HRTBT_LSB); + ppd->cpspec->ibcctrl_b = ctrlb | val; + qib_write_kreg_port(ppd, krp_ibcctrl_b, + ppd->cpspec->ibcctrl_b); + qib_write_kreg(ppd->dd, kr_scratch, 0); + } + return ret; +} + +static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno, + struct ib_vl_weight_elem *vl) +{ + unsigned i; + + for (i = 0; i < 16; i++, regno++, vl++) { + u32 val = qib_read_kreg_port(ppd, regno); + + vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & + SYM_RMASK(LowPriority0_0, VirtualLane); + vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & + SYM_RMASK(LowPriority0_0, Weight); + } +} + +static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno, + struct ib_vl_weight_elem *vl) +{ + unsigned i; + + for (i = 0; i < 16; i++, regno++, vl++) { + u64 val; + + val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << + SYM_LSB(LowPriority0_0, VirtualLane)) | + ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << + SYM_LSB(LowPriority0_0, Weight)); + qib_write_kreg_port(ppd, regno, val); + } + if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); + qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + } +} + +static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t) +{ + switch (which) { + case QIB_IB_TBL_VL_HIGH_ARB: + get_vl_weights(ppd, krp_highprio_0, t); + break; + + case QIB_IB_TBL_VL_LOW_ARB: + get_vl_weights(ppd, krp_lowprio_0, t); + break; + + default: + return -EINVAL; + } + return 0; +} + +static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) +{ + switch (which) { + case QIB_IB_TBL_VL_HIGH_ARB: + set_vl_weights(ppd, krp_highprio_0, t); + break; + + case QIB_IB_TBL_VL_LOW_ARB: + set_vl_weights(ppd, krp_lowprio_0, t); + break; + + default: + return -EINVAL; + } + return 0; +} + +static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, + u32 updegr, u32 egrhd) +{ + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + if (updegr) + qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); +} + +static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) +{ + u32 head, tail; + + head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); + if (rcd->rcvhdrtail_kvaddr) + tail = qib_get_rcvhdrtail(rcd); + else + tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); + return head == tail; +} + +#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \ + QIB_RCVCTRL_CTXT_DIS | \ + QIB_RCVCTRL_TIDFLOW_ENB | \ + QIB_RCVCTRL_TIDFLOW_DIS | \ + QIB_RCVCTRL_TAILUPD_ENB | \ + QIB_RCVCTRL_TAILUPD_DIS | \ + QIB_RCVCTRL_INTRAVAIL_ENB | \ + QIB_RCVCTRL_INTRAVAIL_DIS | \ + QIB_RCVCTRL_BP_ENB | \ + QIB_RCVCTRL_BP_DIS) + +#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \ + QIB_RCVCTRL_CTXT_DIS | \ + QIB_RCVCTRL_PKEY_DIS | \ + QIB_RCVCTRL_PKEY_ENB) + +/* + * Modify the RCVCTRL register in chip-specific way. This + * is a function because bit positions and (future) register + * location is chip-specifc, but the needed operations are + * generic. <op> is a bit-mask because we often want to + * do multiple modifications. + */ +static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, + int ctxt) +{ + struct qib_devdata *dd = ppd->dd; + struct qib_ctxtdata *rcd; + u64 mask, val; + unsigned long flags; + + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + + if (op & QIB_RCVCTRL_TIDFLOW_ENB) + dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); + if (op & QIB_RCVCTRL_TIDFLOW_DIS) + dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); + if (op & QIB_RCVCTRL_TAILUPD_ENB) + dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); + if (op & QIB_RCVCTRL_TAILUPD_DIS) + dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); + if (op & QIB_RCVCTRL_PKEY_ENB) + ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); + if (op & QIB_RCVCTRL_PKEY_DIS) + ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); + if (ctxt < 0) { + mask = (1ULL << dd->ctxtcnt) - 1; + rcd = NULL; + } else { + mask = (1ULL << ctxt); + rcd = dd->rcd[ctxt]; + } + if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) { + ppd->p_rcvctrl |= + (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); + if (!(dd->flags & QIB_NODMA_RTAIL)) { + op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */ + dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); + } + /* Write these registers before the context is enabled. */ + qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, + rcd->rcvhdrqtailaddr_phys); + qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, + rcd->rcvhdrq_phys); + rcd->seq_cnt = 1; + } + if (op & QIB_RCVCTRL_CTXT_DIS) + ppd->p_rcvctrl &= + ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); + if (op & QIB_RCVCTRL_BP_ENB) + dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); + if (op & QIB_RCVCTRL_BP_DIS) + dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); + if (op & QIB_RCVCTRL_INTRAVAIL_ENB) + dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); + if (op & QIB_RCVCTRL_INTRAVAIL_DIS) + dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); + /* + * Decide which registers to write depending on the ops enabled. + * Special case is "flush" (no bits set at all) + * which needs to write both. + */ + if (op == 0 || (op & RCVCTRL_COMMON_MODS)) + qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); + if (op == 0 || (op & RCVCTRL_PORT_MODS)) + qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); + if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { + /* + * Init the context registers also; if we were + * disabled, tail and head should both be zero + * already from the enable, but since we don't + * know, we have to do it explictly. + */ + val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); + qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); + + /* be sure enabling write seen; hd/tl should be 0 */ + (void) qib_read_kreg32(dd, kr_scratch); + val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); + dd->rcd[ctxt]->head = val; + /* If kctxt, interrupt on next receive. */ + if (ctxt < dd->first_user_ctxt) + val |= dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && + dd->rcd[ctxt] && dd->rhdrhead_intr_off) { + /* arm rcv interrupt */ + val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; + qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); + } + if (op & QIB_RCVCTRL_CTXT_DIS) { + unsigned f; + + /* Now that the context is disabled, clear these registers. */ + if (ctxt >= 0) { + qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); + qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); + for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) + qib_write_ureg(dd, ur_rcvflowtable + f, + TIDFLOW_ERRBITS, ctxt); + } else { + unsigned i; + + for (i = 0; i < dd->cfgctxts; i++) { + qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, + i, 0); + qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); + for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) + qib_write_ureg(dd, ur_rcvflowtable + f, + TIDFLOW_ERRBITS, i); + } + } + } + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); +} + +/* + * Modify the SENDCTRL register in chip-specific way. This + * is a function where there are multiple such registers with + * slightly different layouts. + * The chip doesn't allow back-to-back sendctrl writes, so write + * the scratch register after writing sendctrl. + * + * Which register is written depends on the operation. + * Most operate on the common register, while + * SEND_ENB and SEND_DIS operate on the per-port ones. + * SEND_ENB is included in common because it can change SPCL_TRIG + */ +#define SENDCTRL_COMMON_MODS (\ + QIB_SENDCTRL_CLEAR | \ + QIB_SENDCTRL_AVAIL_DIS | \ + QIB_SENDCTRL_AVAIL_ENB | \ + QIB_SENDCTRL_AVAIL_BLIP | \ + QIB_SENDCTRL_DISARM | \ + QIB_SENDCTRL_DISARM_ALL | \ + QIB_SENDCTRL_SEND_ENB) + +#define SENDCTRL_PORT_MODS (\ + QIB_SENDCTRL_CLEAR | \ + QIB_SENDCTRL_SEND_ENB | \ + QIB_SENDCTRL_SEND_DIS | \ + QIB_SENDCTRL_FLUSH) + +static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op) +{ + struct qib_devdata *dd = ppd->dd; + u64 tmp_dd_sendctrl; + unsigned long flags; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + + /* First the dd ones that are "sticky", saved in shadow */ + if (op & QIB_SENDCTRL_CLEAR) + dd->sendctrl = 0; + if (op & QIB_SENDCTRL_AVAIL_DIS) + dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); + else if (op & QIB_SENDCTRL_AVAIL_ENB) { + dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); + if (dd->flags & QIB_USE_SPCL_TRIG) + dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); + } + + /* Then the ppd ones that are "sticky", saved in shadow */ + if (op & QIB_SENDCTRL_SEND_DIS) + ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); + else if (op & QIB_SENDCTRL_SEND_ENB) + ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); + + if (op & QIB_SENDCTRL_DISARM_ALL) { + u32 i, last; + + tmp_dd_sendctrl = dd->sendctrl; + last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; + /* + * Disarm any buffers that are not yet launched, + * disabling updates until done. + */ + tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); + for (i = 0; i < last; i++) { + qib_write_kreg(dd, kr_sendctrl, + tmp_dd_sendctrl | + SYM_MASK(SendCtrl, Disarm) | i); + qib_write_kreg(dd, kr_scratch, 0); + } + } + + if (op & QIB_SENDCTRL_FLUSH) { + u64 tmp_ppd_sendctrl = ppd->p_sendctrl; + + /* + * Now drain all the fifos. The Abort bit should never be + * needed, so for now, at least, we don't use it. + */ + tmp_ppd_sendctrl |= + SYM_MASK(SendCtrl_0, TxeDrainRmFifo) | + SYM_MASK(SendCtrl_0, TxeDrainLaFifo) | + SYM_MASK(SendCtrl_0, TxeBypassIbc); + qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + tmp_dd_sendctrl = dd->sendctrl; + + if (op & QIB_SENDCTRL_DISARM) + tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | + ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) << + SYM_LSB(SendCtrl, DisarmSendBuf)); + if ((op & QIB_SENDCTRL_AVAIL_BLIP) && + (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) + tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); + + if (op == 0 || (op & SENDCTRL_COMMON_MODS)) { + qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + if (op == 0 || (op & SENDCTRL_PORT_MODS)) { + qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + if (op & QIB_SENDCTRL_AVAIL_BLIP) { + qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); + qib_write_kreg(dd, kr_scratch, 0); + } + + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + + if (op & QIB_SENDCTRL_FLUSH) { + u32 v; + /* + * ensure writes have hit chip, then do a few + * more reads, to allow DMA of pioavail registers + * to occur, so in-memory copy is in sync with + * the chip. Not always safe to sleep. + */ + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + v = qib_read_kreg32(dd, kr_scratch); + qib_write_kreg(dd, kr_scratch, v); + qib_read_kreg32(dd, kr_scratch); + } +} + +#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */ +#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */ +#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */ + +/** + * qib_portcntr_7322 - read a per-port chip counter + * @ppd: the qlogic_ib pport + * @creg: the counter to read (not a chip offset) + */ +static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg) +{ + struct qib_devdata *dd = ppd->dd; + u64 ret = 0ULL; + u16 creg; + /* 0xffff for unimplemented or synthesized counters */ + static const u32 xlator[] = { + [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG, + [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG, + [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount, + [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount, + [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount, + [QIBPORTCNTR_SENDSTALL] = crp_sendstall, + [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG, + [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount, + [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount, + [QIBPORTCNTR_RCVEBP] = crp_rcvebp, + [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl, + [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG, + [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */ + [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr, + [QIBPORTCNTR_RXVLERR] = crp_rxvlerr, + [QIBPORTCNTR_ERRICRC] = crp_erricrc, + [QIBPORTCNTR_ERRVCRC] = crp_errvcrc, + [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc, + [QIBPORTCNTR_BADFORMAT] = crp_badformat, + [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen, + [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr, + [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen, + [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl, + [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl, + [QIBPORTCNTR_ERRLINK] = crp_errlink, + [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown, + [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov, + [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr, + [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt, + [QIBPORTCNTR_ERRPKEY] = crp_errpkey, + /* + * the next 3 aren't really counters, but were implemented + * as counters in older chips, so still get accessed as + * though they were counters from this code. + */ + [QIBPORTCNTR_PSINTERVAL] = krp_psinterval, + [QIBPORTCNTR_PSSTART] = krp_psstart, + [QIBPORTCNTR_PSSTAT] = krp_psstat, + /* pseudo-counter, summed for all ports */ + [QIBPORTCNTR_KHDROVFL] = 0xffff, + }; + + if (reg >= ARRAY_SIZE(xlator)) { + qib_devinfo(ppd->dd->pcidev, + "Unimplemented portcounter %u\n", reg); + goto done; + } + creg = xlator[reg] & _PORT_CNTR_IDXMASK; + + /* handle non-counters and special cases first */ + if (reg == QIBPORTCNTR_KHDROVFL) { + int i; + + /* sum over all kernel contexts (skip if mini_init) */ + for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { + struct qib_ctxtdata *rcd = dd->rcd[i]; + + if (!rcd || rcd->ppd != ppd) + continue; + ret += read_7322_creg32(dd, cr_base_egrovfl + i); + } + goto done; + } else if (reg == QIBPORTCNTR_RXDROPPKT) { + /* + * Used as part of the synthesis of port_rcv_errors + * in the verbs code for IBTA counters. Not needed for 7322, + * because all the errors are already counted by other cntrs. + */ + goto done; + } else if (reg == QIBPORTCNTR_PSINTERVAL || + reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) { + /* were counters in older chips, now per-port kernel regs */ + ret = qib_read_kreg_port(ppd, creg); + goto done; + } + + /* + * Only fast increment counters are 64 bits; use 32 bit reads to + * avoid two independent reads when on Opteron. + */ + if (xlator[reg] & _PORT_64BIT_FLAG) + ret = read_7322_creg_port(ppd, creg); + else + ret = read_7322_creg32_port(ppd, creg); + if (creg == crp_ibsymbolerr) { + if (ppd->cpspec->ibdeltainprog) + ret -= ret - ppd->cpspec->ibsymsnap; + ret -= ppd->cpspec->ibsymdelta; + } else if (creg == crp_iblinkerrrecov) { + if (ppd->cpspec->ibdeltainprog) + ret -= ret - ppd->cpspec->iblnkerrsnap; + ret -= ppd->cpspec->iblnkerrdelta; + } else if (creg == crp_errlink) + ret -= ppd->cpspec->ibmalfdelta; + else if (creg == crp_iblinkdown) + ret += ppd->cpspec->iblnkdowndelta; +done: + return ret; +} + +/* + * Device counter names (not port-specific), one line per stat, + * single string. Used by utilities like ipathstats to print the stats + * in a way which works for different versions of drivers, without changing + * the utility. Names need to be 12 chars or less (w/o newline), for proper + * display by utility. + * Non-error counters are first. + * Start of "error" conters is indicated by a leading "E " on the first + * "error" counter, and doesn't count in label length. + * The EgrOvfl list needs to be last so we truncate them at the configured + * context count for the device. + * cntr7322indices contains the corresponding register indices. + */ +static const char cntr7322names[] = + "Interrupts\n" + "HostBusStall\n" + "E RxTIDFull\n" + "RxTIDInvalid\n" + "RxTIDFloDrop\n" /* 7322 only */ + "Ctxt0EgrOvfl\n" + "Ctxt1EgrOvfl\n" + "Ctxt2EgrOvfl\n" + "Ctxt3EgrOvfl\n" + "Ctxt4EgrOvfl\n" + "Ctxt5EgrOvfl\n" + "Ctxt6EgrOvfl\n" + "Ctxt7EgrOvfl\n" + "Ctxt8EgrOvfl\n" + "Ctxt9EgrOvfl\n" + "Ctx10EgrOvfl\n" + "Ctx11EgrOvfl\n" + "Ctx12EgrOvfl\n" + "Ctx13EgrOvfl\n" + "Ctx14EgrOvfl\n" + "Ctx15EgrOvfl\n" + "Ctx16EgrOvfl\n" + "Ctx17EgrOvfl\n" + ; + +static const u32 cntr7322indices[] = { + cr_lbint | _PORT_64BIT_FLAG, + cr_lbstall | _PORT_64BIT_FLAG, + cr_tidfull, + cr_tidinvalid, + cr_rxtidflowdrop, + cr_base_egrovfl + 0, + cr_base_egrovfl + 1, + cr_base_egrovfl + 2, + cr_base_egrovfl + 3, + cr_base_egrovfl + 4, + cr_base_egrovfl + 5, + cr_base_egrovfl + 6, + cr_base_egrovfl + 7, + cr_base_egrovfl + 8, + cr_base_egrovfl + 9, + cr_base_egrovfl + 10, + cr_base_egrovfl + 11, + cr_base_egrovfl + 12, + cr_base_egrovfl + 13, + cr_base_egrovfl + 14, + cr_base_egrovfl + 15, + cr_base_egrovfl + 16, + cr_base_egrovfl + 17, +}; + +/* + * same as cntr7322names and cntr7322indices, but for port-specific counters. + * portcntr7322indices is somewhat complicated by some registers needing + * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG + */ +static const char portcntr7322names[] = + "TxPkt\n" + "TxFlowPkt\n" + "TxWords\n" + "RxPkt\n" + "RxFlowPkt\n" + "RxWords\n" + "TxFlowStall\n" + "TxDmaDesc\n" /* 7220 and 7322-only */ + "E RxDlidFltr\n" /* 7220 and 7322-only */ + "IBStatusChng\n" + "IBLinkDown\n" + "IBLnkRecov\n" + "IBRxLinkErr\n" + "IBSymbolErr\n" + "RxLLIErr\n" + "RxBadFormat\n" + "RxBadLen\n" + "RxBufOvrfl\n" + "RxEBP\n" + "RxFlowCtlErr\n" + "RxICRCerr\n" + "RxLPCRCerr\n" + "RxVCRCerr\n" + "RxInvalLen\n" + "RxInvalPKey\n" + "RxPktDropped\n" + "TxBadLength\n" + "TxDropped\n" + "TxInvalLen\n" + "TxUnderrun\n" + "TxUnsupVL\n" + "RxLclPhyErr\n" /* 7220 and 7322-only from here down */ + "RxVL15Drop\n" + "RxVlErr\n" + "XcessBufOvfl\n" + "RxQPBadCtxt\n" /* 7322-only from here down */ + "TXBadHeader\n" + ; + +static const u32 portcntr7322indices[] = { + QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, + crp_pktsendflow, + QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, + QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, + crp_pktrcvflowctrl, + QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, + QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, + crp_txsdmadesc | _PORT_64BIT_FLAG, + crp_rxdlidfltr, + crp_ibstatuschange, + QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, + QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, + QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, + QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, + crp_rcvflowctrlviol, + QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, + QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, + QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, + QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, + crp_txminmaxlenerr, + crp_txdroppedpkt, + crp_txlenerr, + crp_txunderrun, + crp_txunsupvl, + QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, + QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, + QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, + crp_rxqpinvalidctxt, + crp_txhdrerr, +}; + +/* do all the setup to make the counter reads efficient later */ +static void init_7322_cntrnames(struct qib_devdata *dd) +{ + int i, j = 0; + char *s; + + for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; + i++) { + /* we always have at least one counter before the egrovfl */ + if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) + j = 1; + s = strchr(s + 1, '\n'); + if (s && j) + j++; + } + dd->cspec->ncntrs = i; + if (!s) + /* full list; size is without terminating null */ + dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; + else + dd->cspec->cntrnamelen = 1 + s - cntr7322names; + dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->cspec->cntrs) + qib_dev_err(dd, "Failed allocation for counters\n"); + + for (i = 0, s = (char *)portcntr7322names; s; i++) + s = strchr(s + 1, '\n'); + dd->cspec->nportcntrs = i - 1; + dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; + for (i = 0; i < dd->num_pports; ++i) { + dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs + * sizeof(u64), GFP_KERNEL); + if (!dd->pport[i].cpspec->portcntrs) + qib_dev_err(dd, "Failed allocation for" + " portcounters\n"); + } +} + +static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, + u64 **cntrp) +{ + u32 ret; + + if (namep) { + ret = dd->cspec->cntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + else + *namep = (char *) cntr7322names; + } else { + u64 *cntr = dd->cspec->cntrs; + int i; + + ret = dd->cspec->ncntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + *cntrp = cntr; + for (i = 0; i < dd->cspec->ncntrs; i++) + if (cntr7322indices[i] & _PORT_64BIT_FLAG) + *cntr++ = read_7322_creg(dd, + cntr7322indices[i] & + _PORT_CNTR_IDXMASK); + else + *cntr++ = read_7322_creg32(dd, + cntr7322indices[i]); + } +done: + return ret; +} + +static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, + char **namep, u64 **cntrp) +{ + u32 ret; + + if (namep) { + ret = dd->cspec->portcntrnamelen; + if (pos >= ret) + ret = 0; /* final read after getting everything */ + else + *namep = (char *)portcntr7322names; + } else { + struct qib_pportdata *ppd = &dd->pport[port]; + u64 *cntr = ppd->cpspec->portcntrs; + int i; + + ret = dd->cspec->nportcntrs * sizeof(u64); + if (!cntr || pos >= ret) { + /* everything read, or couldn't get memory */ + ret = 0; + goto done; + } + *cntrp = cntr; + for (i = 0; i < dd->cspec->nportcntrs; i++) { + if (portcntr7322indices[i] & _PORT_VIRT_FLAG) + *cntr++ = qib_portcntr_7322(ppd, + portcntr7322indices[i] & + _PORT_CNTR_IDXMASK); + else if (portcntr7322indices[i] & _PORT_64BIT_FLAG) + *cntr++ = read_7322_creg_port(ppd, + portcntr7322indices[i] & + _PORT_CNTR_IDXMASK); + else + *cntr++ = read_7322_creg32_port(ppd, + portcntr7322indices[i]); + } + } +done: + return ret; +} + +/** + * qib_get_7322_faststats - get word counters from chip before they overflow + * @opaque - contains a pointer to the qlogic_ib device qib_devdata + * + * VESTIGIAL IBA7322 has no "small fast counters", so the only + * real purpose of this function is to maintain the notion of + * "active time", which in turn is only logged into the eeprom, + * which we don;t have, yet, for 7322-based boards. + * + * called from add_timer + */ +static void qib_get_7322_faststats(unsigned long opaque) +{ + struct qib_devdata *dd = (struct qib_devdata *) opaque; + struct qib_pportdata *ppd; + unsigned long flags; + u64 traffic_wds; + int pidx; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + /* + * If port isn't enabled or not operational ports, or + * diags is running (can cause memory diags to fail) + * skip this port this time. + */ + if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) + || dd->diag_client) + continue; + + /* + * Maintain an activity timer, based on traffic + * exceeding a threshold, so we need to check the word-counts + * even if they are 64-bit. + */ + traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) + + qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND); + spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); + traffic_wds -= ppd->dd->traffic_wds; + ppd->dd->traffic_wds += traffic_wds; + if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) + atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); + spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); + if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & + QIB_IB_QDR) && + (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | + QIBL_LINKACTIVE)) && + ppd->cpspec->qdr_dfe_time && + time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) { + ppd->cpspec->qdr_dfe_on = 0; + + qib_write_kreg_port(ppd, krp_static_adapt_dis(2), + ppd->dd->cspec->r1 ? + QDR_STATIC_ADAPT_INIT_R1 : + QDR_STATIC_ADAPT_INIT); + force_h1(ppd); + } + } + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); +} + +/* + * If we were using MSIx, try to fallback to INTx. + */ +static int qib_7322_intr_fallback(struct qib_devdata *dd) +{ + if (!dd->cspec->num_msix_entries) + return 0; /* already using INTx */ + + qib_devinfo(dd->pcidev, "MSIx interrupt not detected," + " trying INTx interrupts\n"); + qib_7322_nomsix(dd); + qib_enable_intx(dd->pcidev); + qib_setup_7322_interrupt(dd, 0); + return 1; +} + +/* + * Reset the XGXS (between serdes and IBC). Slightly less intrusive + * than resetting the IBC or external link state, and useful in some + * cases to cause some retraining. To do this right, we reset IBC + * as well, then return to previous state (which may be still in reset) + * NOTE: some callers of this "know" this writes the current value + * of cpspec->ibcctrl_a as part of it's operation, so if that changes, + * check all callers. + */ +static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) +{ + u64 val; + struct qib_devdata *dd = ppd->dd; + const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) | + SYM_MASK(IBPCSConfig_0, xcv_treset) | + SYM_MASK(IBPCSConfig_0, tx_rx_reset); + + val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a & + ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); + + qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits); + qib_read_kreg32(dd, kr_scratch); + qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); + qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); + qib_write_kreg(dd, kr_scratch, 0ULL); +} + +/* + * This code for non-IBTA-compliant IB speed negotiation is only known to + * work for the SDR to DDR transition, and only between an HCA and a switch + * with recent firmware. It is based on observed heuristics, rather than + * actual knowledge of the non-compliant speed negotiation. + * It has a number of hard-coded fields, since the hope is to rewrite this + * when a spec is available on how the negoation is intended to work. + */ +static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr, + u32 dcnt, u32 *data) +{ + int i; + u64 pbc; + u32 __iomem *piobuf; + u32 pnum, control, len; + struct qib_devdata *dd = ppd->dd; + + i = 0; + len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ + control = qib_7322_setpbc_control(ppd, len, 0, 15); + pbc = ((u64) control << 32) | len; + while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) { + if (i++ > 15) + return; + udelay(2); + } + /* disable header check on this packet, since it can't be valid */ + dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); + writeq(pbc, piobuf); + qib_flush_wc(); + qib_pio_copy(piobuf + 2, hdr, 7); + qib_pio_copy(piobuf + 9, data, dcnt); + if (dd->flags & QIB_USE_SPCL_TRIG) { + u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; + + qib_flush_wc(); + __raw_writel(0xaebecede, piobuf + spcl_off); + } + qib_flush_wc(); + qib_sendbuf_done(dd, pnum); + /* and re-enable hdr check */ + dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); +} + +/* + * _start packet gets sent twice at start, _done gets sent twice at end + */ +static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) +{ + struct qib_devdata *dd = ppd->dd; + static u32 swapped; + u32 dw, i, hcnt, dcnt, *data; + static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; + static u32 madpayload_start[0x40] = { + 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, + 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ + }; + static u32 madpayload_done[0x40] = { + 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, + 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x40000001, 0x1388, 0x15e, /* rest 0's */ + }; + + dcnt = ARRAY_SIZE(madpayload_start); + hcnt = ARRAY_SIZE(hdr); + if (!swapped) { + /* for maintainability, do it at runtime */ + for (i = 0; i < hcnt; i++) { + dw = (__force u32) cpu_to_be32(hdr[i]); + hdr[i] = dw; + } + for (i = 0; i < dcnt; i++) { + dw = (__force u32) cpu_to_be32(madpayload_start[i]); + madpayload_start[i] = dw; + dw = (__force u32) cpu_to_be32(madpayload_done[i]); + madpayload_done[i] = dw; + } + swapped = 1; + } + + data = which ? madpayload_done : madpayload_start; + + autoneg_7322_sendpkt(ppd, hdr, dcnt, data); + qib_read_kreg64(dd, kr_scratch); + udelay(2); + autoneg_7322_sendpkt(ppd, hdr, dcnt, data); + qib_read_kreg64(dd, kr_scratch); + udelay(2); +} + +/* + * Do the absolute minimum to cause an IB speed change, and make it + * ready, but don't actually trigger the change. The caller will + * do that when ready (if link is in Polling training state, it will + * happen immediately, otherwise when link next goes down) + * + * This routine should only be used as part of the DDR autonegotation + * code for devices that are not compliant with IB 1.2 (or code that + * fixes things up for same). + * + * When link has gone down, and autoneg enabled, or autoneg has + * failed and we give up until next time we set both speeds, and + * then we want IBTA enabled as well as "use max enabled speed. + */ +static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) +{ + u64 newctrlb; + newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | + IBA7322_IBC_IBTA_1_2_MASK | + IBA7322_IBC_MAX_SPEED_MASK); + + if (speed & (speed - 1)) /* multiple speeds */ + newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) | + IBA7322_IBC_IBTA_1_2_MASK | + IBA7322_IBC_MAX_SPEED_MASK; + else + newctrlb |= speed == QIB_IB_QDR ? + IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : + ((speed == QIB_IB_DDR ? + IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR)); + + if (newctrlb == ppd->cpspec->ibcctrl_b) + return; + + ppd->cpspec->ibcctrl_b = newctrlb; + qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); + qib_write_kreg(ppd->dd, kr_scratch, 0); +} + +/* + * This routine is only used when we are not talking to another + * IB 1.2-compliant device that we think can do DDR. + * (This includes all existing switch chips as of Oct 2007.) + * 1.2-compliant devices go directly to DDR prior to reaching INIT + */ +static void try_7322_autoneg(struct qib_pportdata *ppd) +{ + unsigned long flags; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_AUTONEG_INPROG; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + qib_autoneg_7322_send(ppd, 0); + set_7322_ibspeed_fast(ppd, QIB_IB_DDR); + qib_7322_mini_pcs_reset(ppd); + /* 2 msec is minimum length of a poll cycle */ + schedule_delayed_work(&ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); +} + +/* + * Handle the empirically determined mechanism for auto-negotiation + * of DDR speed with switches. + */ +static void autoneg_7322_work(struct work_struct *work) +{ + struct qib_pportdata *ppd; + struct qib_devdata *dd; + u64 startms; + u32 i; + unsigned long flags; + + ppd = container_of(work, struct qib_chippport_specific, + autoneg_work.work)->ppd; + dd = ppd->dd; + + startms = jiffies_to_msecs(jiffies); + + /* + * Busy wait for this first part, it should be at most a + * few hundred usec, since we scheduled ourselves for 2msec. + */ + for (i = 0; i < 25; i++) { + if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) + == IB_7322_LT_STATE_POLLQUIET) { + qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); + break; + } + udelay(100); + } + + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + goto done; /* we got there early or told to stop */ + + /* we expect this to timeout */ + if (wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(90))) + goto done; + qib_7322_mini_pcs_reset(ppd); + + /* we expect this to timeout */ + if (wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(1700))) + goto done; + qib_7322_mini_pcs_reset(ppd); + + set_7322_ibspeed_fast(ppd, QIB_IB_SDR); + + /* + * Wait up to 250 msec for link to train and get to INIT at DDR; + * this should terminate early. + */ + wait_event_timeout(ppd->cpspec->autoneg_wait, + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), + msecs_to_jiffies(250)); +done: + if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; + if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { + ppd->lflags |= QIBL_IB_AUTONEG_FAILED; + ppd->cpspec->autoneg_tries = 0; + } + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); + } +} + +/* + * This routine is used to request IPG set in the QLogic switch. + * Only called if r1. + */ +static void try_7322_ipg(struct qib_pportdata *ppd) +{ + struct qib_ibport *ibp = &ppd->ibport_data; + struct ib_mad_send_buf *send_buf; + struct ib_mad_agent *agent; + struct ib_smp *smp; + unsigned delay; + int ret; + + agent = ibp->send_agent; + if (!agent) + goto retry; + + send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, + IB_MGMT_MAD_DATA, GFP_ATOMIC); + if (IS_ERR(send_buf)) + goto retry; + + if (!ibp->smi_ah) { + struct ib_ah_attr attr; + struct ib_ah *ah; + + memset(&attr, 0, sizeof attr); + attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE); + attr.port_num = ppd->port; + ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); + if (IS_ERR(ah)) + ret = -EINVAL; + else { + send_buf->ah = ah; + ibp->smi_ah = to_iah(ah); + ret = 0; + } + } else { + send_buf->ah = &ibp->smi_ah->ibah; + ret = 0; + } + + smp = send_buf->mad; + smp->base_version = IB_MGMT_BASE_VERSION; + smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; + smp->class_version = 1; + smp->method = IB_MGMT_METHOD_SEND; + smp->hop_cnt = 1; + smp->attr_id = QIB_VENDOR_IPG; + smp->attr_mod = 0; + + if (!ret) + ret = ib_post_send_mad(send_buf, NULL); + if (ret) + ib_free_send_mad(send_buf); +retry: + delay = 2 << ppd->cpspec->ipg_tries; + schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); +} + +/* + * Timeout handler for setting IPG. + * Only called if r1. + */ +static void ipg_7322_work(struct work_struct *work) +{ + struct qib_pportdata *ppd; + + ppd = container_of(work, struct qib_chippport_specific, + ipg_work.work)->ppd; + if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) + && ++ppd->cpspec->ipg_tries <= 10) + try_7322_ipg(ppd); +} + +static u32 qib_7322_iblink_state(u64 ibcs) +{ + u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState); + + switch (state) { + case IB_7322_L_STATE_INIT: + state = IB_PORT_INIT; + break; + case IB_7322_L_STATE_ARM: + state = IB_PORT_ARMED; + break; + case IB_7322_L_STATE_ACTIVE: + /* fall through */ + case IB_7322_L_STATE_ACT_DEFER: + state = IB_PORT_ACTIVE; + break; + default: /* fall through */ + case IB_7322_L_STATE_DOWN: + state = IB_PORT_DOWN; + break; + } + return state; +} + +/* returns the IBTA port state, rather than the IBC link training state */ +static u8 qib_7322_phys_portstate(u64 ibcs) +{ + u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState); + return qib_7322_physportstate[state]; +} + +static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) +{ + int ret = 0, symadj = 0; + unsigned long flags; + int mult; + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + + /* Update our picture of width and speed from chip */ + if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) { + ppd->link_speed_active = QIB_IB_QDR; + mult = 4; + } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) { + ppd->link_speed_active = QIB_IB_DDR; + mult = 2; + } else { + ppd->link_speed_active = QIB_IB_SDR; + mult = 1; + } + if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) { + ppd->link_width_active = IB_WIDTH_4X; + mult *= 4; + } else + ppd->link_width_active = IB_WIDTH_1X; + ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; + + if (!ibup) { + u64 clr; + + /* Link went down. */ + /* do IPG MAD again after linkdown, even if last time failed */ + ppd->cpspec->ipg_tries = 0; + clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) & + (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) | + SYM_MASK(IBCStatusB_0, heartbeat_crosstalk)); + if (clr) + qib_write_kreg_port(ppd, krp_ibcstatus_b, clr); + if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | + QIBL_IB_AUTONEG_INPROG))) + set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + /* unlock the Tx settings, speed may change */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + qib_cancel_sends(ppd); + /* on link down, ensure sane pcs state */ + qib_7322_mini_pcs_reset(ppd); + spin_lock_irqsave(&ppd->sdma_lock, flags); + if (__qib_sdma_running(ppd)) + __qib_sdma_process_event(ppd, + qib_sdma_event_e70_go_idle); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + } + clr = read_7322_creg32_port(ppd, crp_iblinkdown); + if (clr == ppd->cpspec->iblnkdownsnap) + ppd->cpspec->iblnkdowndelta++; + } else { + if (qib_compat_ddr_negotiate && + !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | + QIBL_IB_AUTONEG_INPROG)) && + ppd->link_speed_active == QIB_IB_SDR && + (ppd->link_speed_enabled & QIB_IB_DDR) + && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { + /* we are SDR, and auto-negotiation enabled */ + ++ppd->cpspec->autoneg_tries; + if (!ppd->cpspec->ibdeltainprog) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymdelta += + read_7322_creg32_port(ppd, + crp_ibsymbolerr) - + ppd->cpspec->ibsymsnap; + ppd->cpspec->iblnkerrdelta += + read_7322_creg32_port(ppd, + crp_iblinkerrrecov) - + ppd->cpspec->iblnkerrsnap; + } + try_7322_autoneg(ppd); + ret = 1; /* no other IB status change processing */ + } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && + ppd->link_speed_active == QIB_IB_SDR) { + qib_autoneg_7322_send(ppd, 1); + set_7322_ibspeed_fast(ppd, QIB_IB_DDR); + qib_7322_mini_pcs_reset(ppd); + udelay(2); + ret = 1; /* no other IB status change processing */ + } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && + (ppd->link_speed_active & QIB_IB_DDR)) { + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | + QIBL_IB_AUTONEG_FAILED); + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + ppd->cpspec->autoneg_tries = 0; + /* re-enable SDR, for next link down */ + set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); + wake_up(&ppd->cpspec->autoneg_wait); + symadj = 1; + } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { + /* + * Clear autoneg failure flag, and do setup + * so we'll try next time link goes down and + * back to INIT (possibly connected to a + * different device). + */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; + symadj = 1; + } + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + symadj = 1; + if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) + try_7322_ipg(ppd); + if (!ppd->cpspec->recovery_init) + setup_7322_link_recovery(ppd, 0); + ppd->cpspec->qdr_dfe_time = jiffies + + msecs_to_jiffies(QDR_DFE_DISABLE_DELAY); + } + ppd->cpspec->ibmalfusesnap = 0; + ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, + crp_errlink); + } + if (symadj) { + ppd->cpspec->iblnkdownsnap = + read_7322_creg32_port(ppd, crp_iblinkdown); + if (ppd->cpspec->ibdeltainprog) { + ppd->cpspec->ibdeltainprog = 0; + ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, + crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; + ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, + crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; + } + } else if (!ibup && qib_compat_ddr_negotiate && + !ppd->cpspec->ibdeltainprog && + !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + ppd->cpspec->ibdeltainprog = 1; + ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, + crp_ibsymbolerr); + ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, + crp_iblinkerrrecov); + } + + if (!ret) + qib_setup_7322_setextled(ppd, ibup); + return ret; +} + +/* + * Does read/modify/write to appropriate registers to + * set output and direction bits selected by mask. + * these are in their canonical postions (e.g. lsb of + * dir will end up in D48 of extctrl on existing chips). + * returns contents of GP Inputs. + */ +static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) +{ + u64 read_val, new_out; + unsigned long flags; + + if (mask) { + /* some bits being written, lock access to GPIO */ + dir &= mask; + out &= mask; + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); + dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); + new_out = (dd->cspec->gpio_out & ~mask) | out; + + qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); + qib_write_kreg(dd, kr_gpio_out, new_out); + dd->cspec->gpio_out = new_out; + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); + } + /* + * It is unlikely that a read at this time would get valid + * data on a pin whose direction line was set in the same + * call to this function. We include the read here because + * that allows us to potentially combine a change on one pin with + * a read on another, and because the old code did something like + * this. + */ + read_val = qib_read_kreg64(dd, kr_extstatus); + return SYM_FIELD(read_val, EXTStatus, GPIOIn); +} + +/* Enable writes to config EEPROM, if possible. Returns previous state */ +static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) +{ + int prev_wen; + u32 mask; + + mask = 1 << QIB_EEPROM_WEN_NUM; + prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; + gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); + + return prev_wen & 1; +} + +/* + * Read fundamental info we need to use the chip. These are + * the registers that describe chip capabilities, and are + * saved in shadow registers. + */ +static void get_7322_chip_params(struct qib_devdata *dd) +{ + u64 val; + u32 piobufs; + int mtu; + + dd->palign = qib_read_kreg32(dd, kr_pagealign); + + dd->uregbase = qib_read_kreg32(dd, kr_userregbase); + + dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); + dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); + dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); + dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); + dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; + + val = qib_read_kreg64(dd, kr_sendpiobufcnt); + dd->piobcnt2k = val & ~0U; + dd->piobcnt4k = val >> 32; + val = qib_read_kreg64(dd, kr_sendpiosize); + dd->piosize2k = val & ~0U; + dd->piosize4k = val >> 32; + + mtu = ib_mtu_enum_to_int(qib_ibmtu); + if (mtu == -1) + mtu = QIB_DEFAULT_MTU; + dd->pport[0].ibmtu = (u32)mtu; + dd->pport[1].ibmtu = (u32)mtu; + + /* these may be adjusted in init_chip_wc_pat() */ + dd->pio2kbase = (u32 __iomem *) + ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); + dd->pio4kbase = (u32 __iomem *) + ((char __iomem *) dd->kregbase + + (dd->piobufbase >> 32)); + /* + * 4K buffers take 2 pages; we use roundup just to be + * paranoid; we calculate it once here, rather than on + * ever buf allocate + */ + dd->align4k = ALIGN(dd->piosize4k, dd->palign); + + piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; + + dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / + (sizeof(u64) * BITS_PER_BYTE / 2); +} + +/* + * The chip base addresses in cspec and cpspec have to be set + * after possible init_chip_wc_pat(), rather than in + * get_7322_chip_params(), so split out as separate function + */ +static void qib_7322_set_baseaddrs(struct qib_devdata *dd) +{ + u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); + + dd->cspec->cregbase = (u64 __iomem *)(cregbase + + (char __iomem *)dd->kregbase); + + dd->egrtidbase = (u64 __iomem *) + ((char __iomem *) dd->kregbase + dd->rcvegrbase); + + /* port registers are defined as relative to base of chip */ + dd->pport[0].cpspec->kpregbase = + (u64 __iomem *)((char __iomem *)dd->kregbase); + dd->pport[1].cpspec->kpregbase = + (u64 __iomem *)(dd->palign + + (char __iomem *)dd->kregbase); + dd->pport[0].cpspec->cpregbase = + (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], + kr_counterregbase) + (char __iomem *)dd->kregbase); + dd->pport[1].cpspec->cpregbase = + (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], + kr_counterregbase) + (char __iomem *)dd->kregbase); +} + +/* + * This is a fairly special-purpose observer, so we only support + * the port-specific parts of SendCtrl + */ + +#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \ + SYM_MASK(SendCtrl_0, SDmaEnable) | \ + SYM_MASK(SendCtrl_0, SDmaIntEnable) | \ + SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \ + SYM_MASK(SendCtrl_0, SDmaHalt) | \ + SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \ + SYM_MASK(SendCtrl_0, ForceCreditUpToDate)) + +static int sendctrl_hook(struct qib_devdata *dd, + const struct diag_observer *op, u32 offs, + u64 *data, u64 mask, int only_32) +{ + unsigned long flags; + unsigned idx; + unsigned pidx; + struct qib_pportdata *ppd = NULL; + u64 local_data, all_bits; + + /* + * The fixed correspondence between Physical ports and pports is + * severed. We need to hunt for the ppd that corresponds + * to the offset we got. And we have to do that without admitting + * we know the stride, apparently. + */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + u64 __iomem *psptr; + u32 psoffs; + + ppd = dd->pport + pidx; + if (!ppd->cpspec->kpregbase) + continue; + + psptr = ppd->cpspec->kpregbase + krp_sendctrl; + psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); + if (psoffs == offs) + break; + } + + /* If pport is not being managed by driver, just avoid shadows. */ + if (pidx >= dd->num_pports) + ppd = NULL; + + /* In any case, "idx" is flat index in kreg space */ + idx = offs / sizeof(u64); + + all_bits = ~0ULL; + if (only_32) + all_bits >>= 32; + + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (!ppd || (mask & all_bits) != all_bits) { + /* + * At least some mask bits are zero, so we need + * to read. The judgement call is whether from + * reg or shadow. First-cut: read reg, and complain + * if any bits which should be shadowed are different + * from their shadowed value. + */ + if (only_32) + local_data = (u64)qib_read_kreg32(dd, idx); + else + local_data = qib_read_kreg64(dd, idx); + *data = (local_data & ~mask) | (*data & mask); + } + if (mask) { + /* + * At least some mask bits are one, so we need + * to write, but only shadow some bits. + */ + u64 sval, tval; /* Shadowed, transient */ + + /* + * New shadow val is bits we don't want to touch, + * ORed with bits we do, that are intended for shadow. + */ + if (ppd) { + sval = ppd->p_sendctrl & ~mask; + sval |= *data & SENDCTRL_SHADOWED & mask; + ppd->p_sendctrl = sval; + } else + sval = *data & SENDCTRL_SHADOWED & mask; + tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); + qib_write_kreg(dd, idx, tval); + qib_write_kreg(dd, kr_scratch, 0Ull); + } + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + return only_32 ? 4 : 8; +} + +static const struct diag_observer sendctrl_0_observer = { + sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64), + KREG_IDX(SendCtrl_0) * sizeof(u64) +}; + +static const struct diag_observer sendctrl_1_observer = { + sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64), + KREG_IDX(SendCtrl_1) * sizeof(u64) +}; + +static ushort sdma_fetch_prio = 8; +module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO); +MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority"); + +/* Besides logging QSFP events, we set appropriate TxDDS values */ +static void init_txdds_table(struct qib_pportdata *ppd, int override); + +static void qsfp_7322_event(struct work_struct *work) +{ + struct qib_qsfp_data *qd; + struct qib_pportdata *ppd; + u64 pwrup; + int ret; + u32 le2; + + qd = container_of(work, struct qib_qsfp_data, work); + ppd = qd->ppd; + pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); + + /* + * Some QSFP's not only do not respond until the full power-up + * time, but may behave badly if we try. So hold off responding + * to insertion. + */ + while (1) { + u64 now = get_jiffies_64(); + if (time_after64(now, pwrup)) + break; + msleep(1); + } + ret = qib_refresh_qsfp_cache(ppd, &qd->cache); + /* + * Need to change LE2 back to defaults if we couldn't + * read the cable type (to handle cable swaps), so do this + * even on failure to read cable information. We don't + * get here for QME, so IS_QME check not needed here. + */ + le2 = (!ret && qd->cache.atten[1] >= qib_long_atten && + !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ? + LE2_5m : LE2_DEFAULT; + ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); + init_txdds_table(ppd, 0); +} + +/* + * There is little we can do but complain to the user if QSFP + * initialization fails. + */ +static void qib_init_7322_qsfp(struct qib_pportdata *ppd) +{ + unsigned long flags; + struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; + struct qib_devdata *dd = ppd->dd; + u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N; + + mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); + qd->ppd = ppd; + qib_qsfp_init(qd, qsfp_7322_event); + spin_lock_irqsave(&dd->cspec->gpio_lock, flags); + dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); + dd->cspec->gpio_mask |= mod_prs_bit; + qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); + qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); + spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); +} + +/* + * called at device initialization time, and also if the txselect + * module parameter is changed. This is used for cables that don't + * have valid QSFP EEPROMs (not present, or attenuation is zero). + * We initialize to the default, then if there is a specific + * unit,port match, we use that (and set it immediately, for the + * current speed, if the link is at INIT or better). + * String format is "default# unit#,port#=# ... u,p=#", separators must + * be a SPACE character. A newline terminates. The u,p=# tuples may + * optionally have "u,p=#,#", where the final # is the H1 value + * The last specific match is used (actually, all are used, but last + * one is the one that winds up set); if none at all, fall back on default. + */ +static void set_no_qsfp_atten(struct qib_devdata *dd, int change) +{ + char *nxt, *str; + u32 pidx, unit, port, deflt, h1; + unsigned long val; + int any = 0, seth1; + + str = txselect_list; + + /* default number is validated in setup_txselect() */ + deflt = simple_strtoul(str, &nxt, 0); + for (pidx = 0; pidx < dd->num_pports; ++pidx) + dd->pport[pidx].cpspec->no_eep = deflt; + + while (*nxt && nxt[1]) { + str = ++nxt; + unit = simple_strtoul(str, &nxt, 0); + if (nxt == str || !*nxt || *nxt != ',') { + while (*nxt && *nxt++ != ' ') /* skip to next, if any */ + ; + continue; + } + str = ++nxt; + port = simple_strtoul(str, &nxt, 0); + if (nxt == str || *nxt != '=') { + while (*nxt && *nxt++ != ' ') /* skip to next, if any */ + ; + continue; + } + str = ++nxt; + val = simple_strtoul(str, &nxt, 0); + if (nxt == str) { + while (*nxt && *nxt++ != ' ') /* skip to next, if any */ + ; + continue; + } + if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) + continue; + seth1 = 0; + h1 = 0; /* gcc thinks it might be used uninitted */ + if (*nxt == ',' && nxt[1]) { + str = ++nxt; + h1 = (u32)simple_strtoul(str, &nxt, 0); + if (nxt == str) + while (*nxt && *nxt++ != ' ') /* skip */ + ; + else + seth1 = 1; + } + for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; + ++pidx) { + struct qib_pportdata *ppd = &dd->pport[pidx]; + + if (ppd->port != port || !ppd->link_speed_supported) + continue; + ppd->cpspec->no_eep = val; + /* now change the IBC and serdes, overriding generic */ + init_txdds_table(ppd, 1); + any++; + } + if (*nxt == '\n') + break; /* done */ + } + if (change && !any) { + /* no specific setting, use the default. + * Change the IBC and serdes, but since it's + * general, don't override specific settings. + */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].link_speed_supported) + init_txdds_table(&dd->pport[pidx], 0); + } +} + +/* handle the txselect parameter changing */ +static int setup_txselect(const char *str, struct kernel_param *kp) +{ + struct qib_devdata *dd; + unsigned long val; + char *n; + if (strlen(str) >= MAX_ATTEN_LEN) { + printk(KERN_INFO QIB_DRV_NAME " txselect_values string " + "too long\n"); + return -ENOSPC; + } + val = simple_strtoul(str, &n, 0); + if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { + printk(KERN_INFO QIB_DRV_NAME + "txselect_values must start with a number < %d\n", + TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); + return -EINVAL; + } + strcpy(txselect_list, str); + + list_for_each_entry(dd, &qib_dev_list, list) + if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) + set_no_qsfp_atten(dd, 1); + return 0; +} + +/* + * Write the final few registers that depend on some of the + * init setup. Done late in init, just before bringing up + * the serdes. + */ +static int qib_late_7322_initreg(struct qib_devdata *dd) +{ + int ret = 0, n; + u64 val; + + qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); + qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); + qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); + qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); + val = qib_read_kreg64(dd, kr_sendpioavailaddr); + if (val != dd->pioavailregs_phys) { + qib_dev_err(dd, "Catastrophic software error, " + "SendPIOAvailAddr written as %lx, " + "read back as %llx\n", + (unsigned long) dd->pioavailregs_phys, + (unsigned long long) val); + ret = -EINVAL; + } + + n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; + qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); + /* driver sends get pkey, lid, etc. checking also, to catch bugs */ + qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); + + qib_register_observer(dd, &sendctrl_0_observer); + qib_register_observer(dd, &sendctrl_1_observer); + + dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; + qib_write_kreg(dd, kr_control, dd->control); + /* + * Set SendDmaFetchPriority and init Tx params, including + * QSFP handler on boards that have QSFP. + * First set our default attenuation entry for cables that + * don't have valid attenuation. + */ + set_no_qsfp_atten(dd, 0); + for (n = 0; n < dd->num_pports; ++n) { + struct qib_pportdata *ppd = dd->pport + n; + + qib_write_kreg_port(ppd, krp_senddmaprioritythld, + sdma_fetch_prio & 0xf); + /* Initialize qsfp if present on board. */ + if (dd->flags & QIB_HAS_QSFP) + qib_init_7322_qsfp(ppd); + } + dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; + qib_write_kreg(dd, kr_control, dd->control); + + return ret; +} + +/* per IB port errors. */ +#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \ + MASK_ACROSS(8, 15)) +#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41)) +#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \ + MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \ + MASK_ACROSS(0, 11)) + +/* + * Write the initialization per-port registers that need to be done at + * driver load and after reset completes (i.e., that aren't done as part + * of other init procedures called from qib_init.c). + * Some of these should be redundant on reset, but play safe. + */ +static void write_7322_init_portregs(struct qib_pportdata *ppd) +{ + u64 val; + int i; + + if (!ppd->link_speed_supported) { + /* no buffer credits for this port */ + for (i = 1; i < 8; i++) + qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); + qib_write_kreg_port(ppd, krp_ibcctrl_b, 0); + qib_write_kreg(ppd->dd, kr_scratch, 0); + return; + } + + /* + * Set the number of supported virtual lanes in IBC, + * for flow control packet handling on unsupported VLs + */ + val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); + val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP); + val |= (u64)(ppd->vls_supported - 1) << + SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP); + qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); + + qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP); + + /* enable tx header checking */ + qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY | + IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID | + IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ); + + qib_write_kreg_port(ppd, krp_ncmodectrl, + SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal)); + + /* + * Unconditionally clear the bufmask bits. If SDMA is + * enabled, we'll set them appropriately later. + */ + qib_write_kreg_port(ppd, krp_senddmabufmask0, 0); + qib_write_kreg_port(ppd, krp_senddmabufmask1, 0); + qib_write_kreg_port(ppd, krp_senddmabufmask2, 0); + if (ppd->dd->cspec->r1) + ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); +} + +/* + * Write the initialization per-device registers that need to be done at + * driver load and after reset completes (i.e., that aren't done as part + * of other init procedures called from qib_init.c). Also write per-port + * registers that are affected by overall device config, such as QP mapping + * Some of these should be redundant on reset, but play safe. + */ +static void write_7322_initregs(struct qib_devdata *dd) +{ + struct qib_pportdata *ppd; + int i, pidx; + u64 val; + + /* Set Multicast QPs received by port 2 to map to context one. */ + qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + unsigned n, regno; + unsigned long flags; + + if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) + continue; + + ppd = &dd->pport[pidx]; + + /* be paranoid against later code motion, etc. */ + spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); + ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); + spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); + + /* Initialize QP to context mapping */ + regno = krp_rcvqpmaptable; + val = 0; + if (dd->num_pports > 1) + n = dd->first_user_ctxt / dd->num_pports; + else + n = dd->first_user_ctxt - 1; + for (i = 0; i < 32; ) { + unsigned ctxt; + + if (dd->num_pports > 1) + ctxt = (i % n) * dd->num_pports + pidx; + else if (i % n) + ctxt = (i % n) + 1; + else + ctxt = ppd->hw_pidx; + val |= ctxt << (5 * (i % 6)); + i++; + if (i % 6 == 0) { + qib_write_kreg_port(ppd, regno, val); + val = 0; + regno++; + } + } + qib_write_kreg_port(ppd, regno, val); + } + + /* + * Setup up interrupt mitigation for kernel contexts, but + * not user contexts (user contexts use interrupts when + * stalled waiting for any packet, so want those interrupts + * right away). + */ + for (i = 0; i < dd->first_user_ctxt; i++) { + dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; + qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); + } + + /* + * Initialize as (disabled) rcvflow tables. Application code + * will setup each flow as it uses the flow. + * Doesn't clear any of the error bits that might be set. + */ + val = TIDFLOW_ERRBITS; /* these are W1C */ + for (i = 0; i < dd->ctxtcnt; i++) { + int flow; + for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) + qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); + } + + /* + * dual cards init to dual port recovery, single port cards to + * the one port. Dual port cards may later adjust to 1 port, + * and then back to dual port if both ports are connected + * */ + if (dd->num_pports) + setup_7322_link_recovery(dd->pport, dd->num_pports > 1); +} + +static int qib_init_7322_variables(struct qib_devdata *dd) +{ + struct qib_pportdata *ppd; + unsigned features, pidx, sbufcnt; + int ret, mtu; + u32 sbufs, updthresh; + + /* pport structs are contiguous, allocated after devdata */ + ppd = (struct qib_pportdata *)(dd + 1); + dd->pport = ppd; + ppd[0].dd = dd; + ppd[1].dd = dd; + + dd->cspec = (struct qib_chip_specific *)(ppd + 2); + + ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); + ppd[1].cpspec = &ppd[0].cpspec[1]; + ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ + ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ + + spin_lock_init(&dd->cspec->rcvmod_lock); + spin_lock_init(&dd->cspec->gpio_lock); + + /* we haven't yet set QIB_PRESENT, so use read directly */ + dd->revision = readq(&dd->kregbase[kr_revision]); + + if ((dd->revision & 0xffffffffU) == 0xffffffffU) { + qib_dev_err(dd, "Revision register read failure, " + "giving up initialization\n"); + ret = -ENODEV; + goto bail; + } + dd->flags |= QIB_PRESENT; /* now register routines work */ + + dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); + dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); + dd->cspec->r1 = dd->minrev == 1; + + get_7322_chip_params(dd); + features = qib_7322_boardname(dd); + + /* now that piobcnt2k and 4k set, we can allocate these */ + sbufcnt = dd->piobcnt2k + dd->piobcnt4k + + NUM_VL15_BUFS + BITS_PER_LONG - 1; + sbufcnt /= BITS_PER_LONG; + dd->cspec->sendchkenable = kmalloc(sbufcnt * + sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); + dd->cspec->sendgrhchk = kmalloc(sbufcnt * + sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); + dd->cspec->sendibchk = kmalloc(sbufcnt * + sizeof(*dd->cspec->sendibchk), GFP_KERNEL); + if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || + !dd->cspec->sendibchk) { + qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n"); + ret = -ENOMEM; + goto bail; + } + + ppd = dd->pport; + + /* + * GPIO bits for TWSI data and clock, + * used for serial EEPROM. + */ + dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; + dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; + dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; + + dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | + QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP | + QIB_HAS_THRESH_UPDATE | + (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0); + dd->flags |= qib_special_trigger ? + QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; + + /* + * Setup initial values. These may change when PAT is enabled, but + * we need these to do initial chip register accesses. + */ + qib_7322_set_baseaddrs(dd); + + mtu = ib_mtu_enum_to_int(qib_ibmtu); + if (mtu == -1) + mtu = QIB_DEFAULT_MTU; + + dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; + /* all hwerrors become interrupts, unless special purposed */ + dd->cspec->hwerrmask = ~0ULL; + /* link_recovery setup causes these errors, so ignore them, + * other than clearing them when they occur */ + dd->cspec->hwerrmask &= + ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | + SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | + HWE_MASK(LATriggered)); + + for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { + struct qib_chippport_specific *cp = ppd->cpspec; + ppd->link_speed_supported = features & PORT_SPD_CAP; + features >>= PORT_SPD_CAP_SHIFT; + if (!ppd->link_speed_supported) { + /* single port mode (7340, or configured) */ + dd->skip_kctxt_mask |= 1 << pidx; + if (pidx == 0) { + /* Make sure port is disabled. */ + qib_write_kreg_port(ppd, krp_rcvctrl, 0); + qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); + ppd[0] = ppd[1]; + dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, + IBSerdesPClkNotDetectMask_0) + | SYM_MASK(HwErrMask, + SDmaMemReadErrMask_0)); + dd->cspec->int_enable_mask &= ~( + SYM_MASK(IntMask, SDmaCleanupDoneMask_0) | + SYM_MASK(IntMask, SDmaIdleIntMask_0) | + SYM_MASK(IntMask, SDmaProgressIntMask_0) | + SYM_MASK(IntMask, SDmaIntMask_0) | + SYM_MASK(IntMask, ErrIntMask_0) | + SYM_MASK(IntMask, SendDoneIntMask_0)); + } else { + /* Make sure port is disabled. */ + qib_write_kreg_port(ppd, krp_rcvctrl, 0); + qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); + dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, + IBSerdesPClkNotDetectMask_1) + | SYM_MASK(HwErrMask, + SDmaMemReadErrMask_1)); + dd->cspec->int_enable_mask &= ~( + SYM_MASK(IntMask, SDmaCleanupDoneMask_1) | + SYM_MASK(IntMask, SDmaIdleIntMask_1) | + SYM_MASK(IntMask, SDmaProgressIntMask_1) | + SYM_MASK(IntMask, SDmaIntMask_1) | + SYM_MASK(IntMask, ErrIntMask_1) | + SYM_MASK(IntMask, SendDoneIntMask_1)); + } + continue; + } + + dd->num_pports++; + qib_init_pportdata(ppd, dd, pidx, dd->num_pports); + + ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; + ppd->link_width_enabled = IB_WIDTH_4X; + ppd->link_speed_enabled = ppd->link_speed_supported; + /* + * Set the initial values to reasonable default, will be set + * for real when link is up. + */ + ppd->link_width_active = IB_WIDTH_4X; + ppd->link_speed_active = QIB_IB_SDR; + ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; + switch (qib_num_cfg_vls) { + case 1: + ppd->vls_supported = IB_VL_VL0; + break; + case 2: + ppd->vls_supported = IB_VL_VL0_1; + break; + default: + qib_devinfo(dd->pcidev, + "Invalid num_vls %u, using 4 VLs\n", + qib_num_cfg_vls); + qib_num_cfg_vls = 4; + /* fall through */ + case 4: + ppd->vls_supported = IB_VL_VL0_3; + break; + case 8: + if (mtu <= 2048) + ppd->vls_supported = IB_VL_VL0_7; + else { + qib_devinfo(dd->pcidev, + "Invalid num_vls %u for MTU %d " + ", using 4 VLs\n", + qib_num_cfg_vls, mtu); + ppd->vls_supported = IB_VL_VL0_3; + qib_num_cfg_vls = 4; + } + break; + } + ppd->vls_operational = ppd->vls_supported; + + init_waitqueue_head(&cp->autoneg_wait); + INIT_DELAYED_WORK(&cp->autoneg_work, + autoneg_7322_work); + if (ppd->dd->cspec->r1) + INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); + + /* + * For Mez and similar cards, no qsfp info, so do + * the "cable info" setup here. Can be overridden + * in adapter-specific routines. + */ + if (!(ppd->dd->flags & QIB_HAS_QSFP)) { + if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) + qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " + "Unknown mezzanine card type\n", + dd->unit, ppd->port); + cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; + /* + * Choose center value as default tx serdes setting + * until changed through module parameter. + */ + ppd->cpspec->no_eep = IS_QMH(dd) ? + TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; + } else + cp->h1_val = H1_FORCE_VAL; + + /* Avoid writes to chip for mini_init */ + if (!qib_mini_init) + write_7322_init_portregs(ppd); + + init_timer(&cp->chase_timer); + cp->chase_timer.function = reenable_chase; + cp->chase_timer.data = (unsigned long)ppd; + + ppd++; + } + + dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; + dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; + dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); + + /* we always allocate at least 2048 bytes for eager buffers */ + dd->rcvegrbufsize = max(mtu, 2048); + + qib_7322_tidtemplate(dd); + + /* + * We can request a receive interrupt for 1 or + * more packets from current offset. + */ + dd->rhdrhead_intr_off = + (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT; + + /* setup the stats timer; the add_timer is done at end of init */ + init_timer(&dd->stats_timer); + dd->stats_timer.function = qib_get_7322_faststats; + dd->stats_timer.data = (unsigned long) dd; + + dd->ureg_align = 0x10000; /* 64KB alignment */ + + dd->piosize2kmax_dwords = dd->piosize2k >> 2; + + qib_7322_config_ctxts(dd); + qib_set_ctxtcnt(dd); + + if (qib_wc_pat) { + ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k); + if (ret) + goto bail; + } + qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ + + ret = 0; + if (qib_mini_init) + goto bail; + if (!dd->num_pports) { + qib_dev_err(dd, "No ports enabled, giving up initialization\n"); + goto bail; /* no error, so can still figure out why err */ + } + + write_7322_initregs(dd); + ret = qib_create_ctxts(dd); + init_7322_cntrnames(dd); + + updthresh = 8U; /* update threshold */ + + /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. + * reserve the update threshold amount for other kernel use, such + * as sending SMI, MAD, and ACKs, or 3, whichever is greater, + * unless we aren't enabling SDMA, in which case we want to use + * all the 4k bufs for the kernel. + * if this was less than the update threshold, we could wait + * a long time for an update. Coded this way because we + * sometimes change the update threshold for various reasons, + * and we want this to remain robust. + */ + if (dd->flags & QIB_HAS_SEND_DMA) { + dd->cspec->sdmabufcnt = dd->piobcnt4k; + sbufs = updthresh > 3 ? updthresh : 3; + } else { + dd->cspec->sdmabufcnt = 0; + sbufs = dd->piobcnt4k; + } + dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - + dd->cspec->sdmabufcnt; + dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; + dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ + dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? + dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; + + /* + * If we have 16 user contexts, we will have 7 sbufs + * per context, so reduce the update threshold to match. We + * want to update before we actually run out, at low pbufs/ctxt + * so give ourselves some margin. + */ + if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) + updthresh = dd->pbufsctxt - 2; + dd->cspec->updthresh_dflt = updthresh; + dd->cspec->updthresh = updthresh; + + /* before full enable, no interrupts, no locking needed */ + dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) + << SYM_LSB(SendCtrl, AvailUpdThld)) | + SYM_MASK(SendCtrl, SendBufAvailPad64Byte); + + dd->psxmitwait_supported = 1; + dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; +bail: + if (!dd->ctxtcnt) + dd->ctxtcnt = 1; /* for other initialization code */ + + return ret; +} + +static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc, + u32 *pbufnum) +{ + u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; + struct qib_devdata *dd = ppd->dd; + + /* last is same for 2k and 4k, because we use 4k if all 2k busy */ + if (pbc & PBC_7322_VL15_SEND) { + first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; + last = first; + } else { + if ((plen + 1) > dd->piosize2kmax_dwords) + first = dd->piobcnt2k; + else + first = 0; + last = dd->cspec->lastbuf_for_pio; + } + return qib_getsendbuf_range(dd, pbufnum, first, last); +} + +static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv, + u32 start) +{ + qib_write_kreg_port(ppd, krp_psinterval, intv); + qib_write_kreg_port(ppd, krp_psstart, start); +} + +/* + * Must be called with sdma_lock held, or before init finished. + */ +static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) +{ + qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); +} + +static struct sdma_set_state_action sdma_7322_action_table[] = { + [qib_sdma_state_s00_hw_down] = { + .go_s99_running_tofalse = 1, + .op_enable = 0, + .op_intenable = 0, + .op_halt = 0, + .op_drain = 0, + }, + [qib_sdma_state_s10_hw_start_up_wait] = { + .op_enable = 0, + .op_intenable = 1, + .op_halt = 1, + .op_drain = 0, + }, + [qib_sdma_state_s20_idle] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + .op_drain = 0, + }, + [qib_sdma_state_s30_sw_clean_up_wait] = { + .op_enable = 0, + .op_intenable = 1, + .op_halt = 1, + .op_drain = 0, + }, + [qib_sdma_state_s40_hw_clean_up_wait] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + .op_drain = 0, + }, + [qib_sdma_state_s50_hw_halt_wait] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 1, + .op_drain = 1, + }, + [qib_sdma_state_s99_running] = { + .op_enable = 1, + .op_intenable = 1, + .op_halt = 0, + .op_drain = 0, + .go_s99_running_totrue = 1, + }, +}; + +static void qib_7322_sdma_init_early(struct qib_pportdata *ppd) +{ + ppd->sdma_state.set_state_action = sdma_7322_action_table; +} + +static int init_sdma_7322_regs(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + unsigned lastbuf, erstbuf; + u64 senddmabufmask[3] = { 0 }; + int n, ret = 0; + + qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); + qib_sdma_7322_setlengen(ppd); + qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ + qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt); + qib_write_kreg_port(ppd, krp_senddmadesccnt, 0); + qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); + + if (dd->num_pports) + n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ + else + n = dd->cspec->sdmabufcnt; /* failsafe for init */ + erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - + ((dd->num_pports == 1 || ppd->port == 2) ? n : + dd->cspec->sdmabufcnt); + lastbuf = erstbuf + n; + + ppd->sdma_state.first_sendbuf = erstbuf; + ppd->sdma_state.last_sendbuf = lastbuf; + for (; erstbuf < lastbuf; ++erstbuf) { + unsigned word = erstbuf / BITS_PER_LONG; + unsigned bit = erstbuf & (BITS_PER_LONG - 1); + + BUG_ON(word >= 3); + senddmabufmask[word] |= 1ULL << bit; + } + qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]); + qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]); + qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]); + return ret; +} + +/* sdma_lock must be held */ +static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + int sane; + int use_dmahead; + u16 swhead; + u16 swtail; + u16 cnt; + u16 hwhead; + + use_dmahead = __qib_sdma_running(ppd) && + (dd->flags & QIB_HAS_SDMA_TIMEOUT); +retry: + hwhead = use_dmahead ? + (u16) le64_to_cpu(*ppd->sdma_head_dma) : + (u16) qib_read_kreg_port(ppd, krp_senddmahead); + + swhead = ppd->sdma_descq_head; + swtail = ppd->sdma_descq_tail; + cnt = ppd->sdma_descq_cnt; + + if (swhead < swtail) + /* not wrapped */ + sane = (hwhead >= swhead) & (hwhead <= swtail); + else if (swhead > swtail) + /* wrapped around */ + sane = ((hwhead >= swhead) && (hwhead < cnt)) || + (hwhead <= swtail); + else + /* empty */ + sane = (hwhead == swhead); + + if (unlikely(!sane)) { + if (use_dmahead) { + /* try one more time, directly from the register */ + use_dmahead = 0; + goto retry; + } + /* proceed as if no progress */ + hwhead = swhead; + } + + return hwhead; +} + +static int qib_sdma_7322_busy(struct qib_pportdata *ppd) +{ + u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus); + + return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) || + (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) || + !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) || + !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty)); +} + +/* + * Compute the amount of delay before sending the next packet if the + * port's send rate differs from the static rate set for the QP. + * The delay affects the next packet and the amount of the delay is + * based on the length of the this packet. + */ +static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen, + u8 srate, u8 vl) +{ + u8 snd_mult = ppd->delay_mult; + u8 rcv_mult = ib_rate_to_delay[srate]; + u32 ret; + + ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0; + + /* Indicate VL15, else set the VL in the control word */ + if (vl == 15) + ret |= PBC_7322_VL15_SEND_CTRL; + else + ret |= vl << PBC_VL_NUM_LSB; + ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; + + return ret; +} + +/* + * Enable the per-port VL15 send buffers for use. + * They follow the rest of the buffers, without a config parameter. + * This was in initregs, but that is done before the shadow + * is set up, and this has to be done after the shadow is + * set up. + */ +static void qib_7322_initvl15_bufs(struct qib_devdata *dd) +{ + unsigned vl15bufs; + + vl15bufs = dd->piobcnt2k + dd->piobcnt4k; + qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, + TXCHK_CHG_TYPE_KERN, NULL); +} + +static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd) +{ + if (rcd->ctxt < NUM_IB_PORTS) { + if (rcd->dd->num_pports > 1) { + rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; + rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; + } else { + rcd->rcvegrcnt = KCTXT0_EGRCNT; + rcd->rcvegr_tid_base = 0; + } + } else { + rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; + rcd->rcvegr_tid_base = KCTXT0_EGRCNT + + (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; + } +} + +#define QTXSLEEPS 5000 +static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, + u32 len, u32 which, struct qib_ctxtdata *rcd) +{ + int i; + const int last = start + len - 1; + const int lastr = last / BITS_PER_LONG; + u32 sleeps = 0; + int wait = rcd != NULL; + unsigned long flags; + + while (wait) { + unsigned long shadow; + int cstart, previ = -1; + + /* + * when flipping from kernel to user, we can't change + * the checking type if the buffer is allocated to the + * driver. It's OK the other direction, because it's + * from close, and we have just disarm'ed all the + * buffers. All the kernel to kernel changes are also + * OK. + */ + for (cstart = start; cstart <= last; cstart++) { + i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) + / BITS_PER_LONG; + if (i != previ) { + shadow = (unsigned long) + le64_to_cpu(dd->pioavailregs_dma[i]); + previ = i; + } + if (test_bit(((2 * cstart) + + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) + % BITS_PER_LONG, &shadow)) + break; + } + + if (cstart > last) + break; + + if (sleeps == QTXSLEEPS) + break; + /* make sure we see an updated copy next time around */ + sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + sleeps++; + msleep(1); + } + + switch (which) { + case TXCHK_CHG_TYPE_DIS1: + /* + * disable checking on a range; used by diags; just + * one buffer, but still written generically + */ + for (i = start; i <= last; i++) + clear_bit(i, dd->cspec->sendchkenable); + break; + + case TXCHK_CHG_TYPE_ENAB1: + /* + * (re)enable checking on a range; used by diags; just + * one buffer, but still written generically; read + * scratch to be sure buffer actually triggered, not + * just flushed from processor. + */ + qib_read_kreg32(dd, kr_scratch); + for (i = start; i <= last; i++) + set_bit(i, dd->cspec->sendchkenable); + break; + + case TXCHK_CHG_TYPE_KERN: + /* usable by kernel */ + for (i = start; i <= last; i++) { + set_bit(i, dd->cspec->sendibchk); + clear_bit(i, dd->cspec->sendgrhchk); + } + spin_lock_irqsave(&dd->uctxt_lock, flags); + /* see if we need to raise avail update threshold */ + for (i = dd->first_user_ctxt; + dd->cspec->updthresh != dd->cspec->updthresh_dflt + && i < dd->cfgctxts; i++) + if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && + ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) + < dd->cspec->updthresh_dflt) + break; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + if (i == dd->cfgctxts) { + spin_lock_irqsave(&dd->sendctrl_lock, flags); + dd->cspec->updthresh = dd->cspec->updthresh_dflt; + dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); + dd->sendctrl |= (dd->cspec->updthresh & + SYM_RMASK(SendCtrl, AvailUpdThld)) << + SYM_LSB(SendCtrl, AvailUpdThld); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + } + break; + + case TXCHK_CHG_TYPE_USER: + /* for user process */ + for (i = start; i <= last; i++) { + clear_bit(i, dd->cspec->sendibchk); + set_bit(i, dd->cspec->sendgrhchk); + } + spin_lock_irqsave(&dd->sendctrl_lock, flags); + if (rcd && rcd->subctxt_cnt && ((rcd->piocnt + / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { + dd->cspec->updthresh = (rcd->piocnt / + rcd->subctxt_cnt) - 1; + dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); + dd->sendctrl |= (dd->cspec->updthresh & + SYM_RMASK(SendCtrl, AvailUpdThld)) + << SYM_LSB(SendCtrl, AvailUpdThld); + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); + } else + spin_unlock_irqrestore(&dd->sendctrl_lock, flags); + break; + + default: + break; + } + + for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) + qib_write_kreg(dd, kr_sendcheckmask + i, + dd->cspec->sendchkenable[i]); + + for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { + qib_write_kreg(dd, kr_sendgrhcheckmask + i, + dd->cspec->sendgrhchk[i]); + qib_write_kreg(dd, kr_sendibpktmask + i, + dd->cspec->sendibchk[i]); + } + + /* + * Be sure whatever we did was seen by the chip and acted upon, + * before we return. Mostly important for which >= 2. + */ + qib_read_kreg32(dd, kr_scratch); +} + + +/* useful for trigger analyzers, etc. */ +static void writescratch(struct qib_devdata *dd, u32 val) +{ + qib_write_kreg(dd, kr_scratch, val); +} + +/* Dummy for now, use chip regs soon */ +static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) +{ + return -ENXIO; +} + +/** + * qib_init_iba7322_funcs - set up the chip-specific function pointers + * @dev: the pci_dev for qlogic_ib device + * @ent: pci_device_id struct for this dev + * + * Also allocates, inits, and returns the devdata struct for this + * device instance + * + * This is global, and is called directly at init to set up the + * chip-specific function pointers for later use. + */ +struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct qib_devdata *dd; + int ret, i; + u32 tabsize, actual_cnt = 0; + + dd = qib_alloc_devdata(pdev, + NUM_IB_PORTS * sizeof(struct qib_pportdata) + + sizeof(struct qib_chip_specific) + + NUM_IB_PORTS * sizeof(struct qib_chippport_specific)); + if (IS_ERR(dd)) + goto bail; + + dd->f_bringup_serdes = qib_7322_bringup_serdes; + dd->f_cleanup = qib_setup_7322_cleanup; + dd->f_clear_tids = qib_7322_clear_tids; + dd->f_free_irq = qib_7322_free_irq; + dd->f_get_base_info = qib_7322_get_base_info; + dd->f_get_msgheader = qib_7322_get_msgheader; + dd->f_getsendbuf = qib_7322_getsendbuf; + dd->f_gpio_mod = gpio_7322_mod; + dd->f_eeprom_wen = qib_7322_eeprom_wen; + dd->f_hdrqempty = qib_7322_hdrqempty; + dd->f_ib_updown = qib_7322_ib_updown; + dd->f_init_ctxt = qib_7322_init_ctxt; + dd->f_initvl15_bufs = qib_7322_initvl15_bufs; + dd->f_intr_fallback = qib_7322_intr_fallback; + dd->f_late_initreg = qib_late_7322_initreg; + dd->f_setpbc_control = qib_7322_setpbc_control; + dd->f_portcntr = qib_portcntr_7322; + dd->f_put_tid = qib_7322_put_tid; + dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; + dd->f_rcvctrl = rcvctrl_7322_mod; + dd->f_read_cntrs = qib_read_7322cntrs; + dd->f_read_portcntrs = qib_read_7322portcntrs; + dd->f_reset = qib_do_7322_reset; + dd->f_init_sdma_regs = init_sdma_7322_regs; + dd->f_sdma_busy = qib_sdma_7322_busy; + dd->f_sdma_gethead = qib_sdma_7322_gethead; + dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; + dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; + dd->f_sdma_update_tail = qib_sdma_update_7322_tail; + dd->f_sendctrl = sendctrl_7322_mod; + dd->f_set_armlaunch = qib_set_7322_armlaunch; + dd->f_set_cntr_sample = qib_set_cntr_7322_sample; + dd->f_iblink_state = qib_7322_iblink_state; + dd->f_ibphys_portstate = qib_7322_phys_portstate; + dd->f_get_ib_cfg = qib_7322_get_ib_cfg; + dd->f_set_ib_cfg = qib_7322_set_ib_cfg; + dd->f_set_ib_loopback = qib_7322_set_loopback; + dd->f_get_ib_table = qib_7322_get_ib_table; + dd->f_set_ib_table = qib_7322_set_ib_table; + dd->f_set_intr_state = qib_7322_set_intr_state; + dd->f_setextled = qib_setup_7322_setextled; + dd->f_txchk_change = qib_7322_txchk_change; + dd->f_update_usrhead = qib_update_7322_usrhead; + dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; + dd->f_xgxs_reset = qib_7322_mini_pcs_reset; + dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; + dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; + dd->f_sdma_init_early = qib_7322_sdma_init_early; + dd->f_writescratch = writescratch; + dd->f_tempsense_rd = qib_7322_tempsense_rd; + /* + * Do remaining PCIe setup and save PCIe values in dd. + * Any error printing is already done by the init code. + * On return, we have the chip mapped, but chip registers + * are not set up until start of qib_init_7322_variables. + */ + ret = qib_pcie_ddinit(dd, pdev, ent); + if (ret < 0) + goto bail_free; + + /* initialize chip-specific variables */ + ret = qib_init_7322_variables(dd); + if (ret) + goto bail_cleanup; + + if (qib_mini_init || !dd->num_pports) + goto bail; + + /* + * Determine number of vectors we want; depends on port count + * and number of configured kernel receive queues actually used. + * Should also depend on whether sdma is enabled or not, but + * that's such a rare testing case it's not worth worrying about. + */ + tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); + for (i = 0; i < tabsize; i++) + if ((i < ARRAY_SIZE(irq_table) && + irq_table[i].port <= dd->num_pports) || + (i >= ARRAY_SIZE(irq_table) && + dd->rcd[i - ARRAY_SIZE(irq_table)])) + actual_cnt++; + tabsize = actual_cnt; + dd->cspec->msix_entries = kmalloc(tabsize * + sizeof(struct msix_entry), GFP_KERNEL); + dd->cspec->msix_arg = kmalloc(tabsize * + sizeof(void *), GFP_KERNEL); + if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { + qib_dev_err(dd, "No memory for MSIx table\n"); + tabsize = 0; + } + for (i = 0; i < tabsize; i++) + dd->cspec->msix_entries[i].entry = i; + + if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) + qib_dev_err(dd, "Failed to setup PCIe or interrupts; " + "continuing anyway\n"); + /* may be less than we wanted, if not enough available */ + dd->cspec->num_msix_entries = tabsize; + + /* setup interrupt handler */ + qib_setup_7322_interrupt(dd, 1); + + /* clear diagctrl register, in case diags were running and crashed */ + qib_write_kreg(dd, kr_hwdiagctrl, 0); + + goto bail; + +bail_cleanup: + qib_pcie_ddcleanup(dd); +bail_free: + qib_free_devdata(dd); + dd = ERR_PTR(ret); +bail: + return dd; +} + +/* + * Set the table entry at the specified index from the table specifed. + * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first + * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR. + * 'idx' below addresses the correct entry, while its 4 LSBs select the + * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table. + */ +#define DDS_ENT_AMP_LSB 14 +#define DDS_ENT_MAIN_LSB 9 +#define DDS_ENT_POST_LSB 5 +#define DDS_ENT_PRE_XTRA_LSB 3 +#define DDS_ENT_PRE_LSB 0 + +/* + * Set one entry in the TxDDS table for spec'd port + * ridx picks one of the entries, while tp points + * to the appropriate table entry. + */ +static void set_txdds(struct qib_pportdata *ppd, int ridx, + const struct txdds_ent *tp) +{ + struct qib_devdata *dd = ppd->dd; + u32 pack_ent; + int regidx; + + /* Get correct offset in chip-space, and in source table */ + regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx; + /* + * We do not use qib_write_kreg_port() because it was intended + * only for registers in the lower "port specific" pages. + * So do index calculation by hand. + */ + if (ppd->hw_pidx) + regidx += (dd->palign / sizeof(u64)); + + pack_ent = tp->amp << DDS_ENT_AMP_LSB; + pack_ent |= tp->main << DDS_ENT_MAIN_LSB; + pack_ent |= tp->pre << DDS_ENT_PRE_LSB; + pack_ent |= tp->post << DDS_ENT_POST_LSB; + qib_write_kreg(dd, regidx, pack_ent); + /* Prevent back-to-back writes by hitting scratch */ + qib_write_kreg(ppd->dd, kr_scratch, 0); +} + +static const struct vendor_txdds_ent vendor_txdds[] = { + { /* Amphenol 1m 30awg NoEq */ + { 0x41, 0x50, 0x48 }, "584470002 ", + { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 }, + }, + { /* Amphenol 3m 28awg NoEq */ + { 0x41, 0x50, 0x48 }, "584470004 ", + { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 }, + }, + { /* Finisar 3m OM2 Optical */ + { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL", + { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 }, + }, + { /* Finisar 30m OM2 Optical */ + { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL", + { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 }, + }, + { /* Finisar Default OM2 Optical */ + { 0x00, 0x90, 0x65 }, NULL, + { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 }, + }, + { /* Gore 1m 30awg NoEq */ + { 0x00, 0x21, 0x77 }, "QSN3300-1 ", + { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 }, + }, + { /* Gore 2m 30awg NoEq */ + { 0x00, 0x21, 0x77 }, "QSN3300-2 ", + { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 }, + }, + { /* Gore 1m 28awg NoEq */ + { 0x00, 0x21, 0x77 }, "QSN3800-1 ", + { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 }, + }, + { /* Gore 3m 28awg NoEq */ + { 0x00, 0x21, 0x77 }, "QSN3800-3 ", + { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 }, + }, + { /* Gore 5m 24awg Eq */ + { 0x00, 0x21, 0x77 }, "QSN7000-5 ", + { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 }, + }, + { /* Gore 7m 24awg Eq */ + { 0x00, 0x21, 0x77 }, "QSN7000-7 ", + { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 }, + }, + { /* Gore 5m 26awg Eq */ + { 0x00, 0x21, 0x77 }, "QSN7600-5 ", + { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 }, + }, + { /* Gore 7m 26awg Eq */ + { 0x00, 0x21, 0x77 }, "QSN7600-7 ", + { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 }, + }, + { /* Intersil 12m 24awg Active */ + { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224", + { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 }, + }, + { /* Intersil 10m 28awg Active */ + { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028", + { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 }, + }, + { /* Intersil 7m 30awg Active */ + { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730", + { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 }, + }, + { /* Intersil 5m 32awg Active */ + { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532", + { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 }, + }, + { /* Intersil Default Active */ + { 0x00, 0x30, 0xB4 }, NULL, + { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 }, + }, + { /* Luxtera 20m Active Optical */ + { 0x00, 0x25, 0x63 }, NULL, + { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 }, + }, + { /* Molex 1M Cu loopback */ + { 0x00, 0x09, 0x3A }, "74763-0025 ", + { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, + }, + { /* Molex 2m 28awg NoEq */ + { 0x00, 0x09, 0x3A }, "74757-2201 ", + { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 }, + }, +}; + +static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = { + /* amp, pre, main, post */ + { 2, 2, 15, 6 }, /* Loopback */ + { 0, 0, 0, 1 }, /* 2 dB */ + { 0, 0, 0, 2 }, /* 3 dB */ + { 0, 0, 0, 3 }, /* 4 dB */ + { 0, 0, 0, 4 }, /* 5 dB */ + { 0, 0, 0, 5 }, /* 6 dB */ + { 0, 0, 0, 6 }, /* 7 dB */ + { 0, 0, 0, 7 }, /* 8 dB */ + { 0, 0, 0, 8 }, /* 9 dB */ + { 0, 0, 0, 9 }, /* 10 dB */ + { 0, 0, 0, 10 }, /* 11 dB */ + { 0, 0, 0, 11 }, /* 12 dB */ + { 0, 0, 0, 12 }, /* 13 dB */ + { 0, 0, 0, 13 }, /* 14 dB */ + { 0, 0, 0, 14 }, /* 15 dB */ + { 0, 0, 0, 15 }, /* 16 dB */ +}; + +static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { + /* amp, pre, main, post */ + { 2, 2, 15, 6 }, /* Loopback */ + { 0, 0, 0, 8 }, /* 2 dB */ + { 0, 0, 0, 8 }, /* 3 dB */ + { 0, 0, 0, 9 }, /* 4 dB */ + { 0, 0, 0, 9 }, /* 5 dB */ + { 0, 0, 0, 10 }, /* 6 dB */ + { 0, 0, 0, 10 }, /* 7 dB */ + { 0, 0, 0, 11 }, /* 8 dB */ + { 0, 0, 0, 11 }, /* 9 dB */ + { 0, 0, 0, 12 }, /* 10 dB */ + { 0, 0, 0, 12 }, /* 11 dB */ + { 0, 0, 0, 13 }, /* 12 dB */ + { 0, 0, 0, 13 }, /* 13 dB */ + { 0, 0, 0, 14 }, /* 14 dB */ + { 0, 0, 0, 14 }, /* 15 dB */ + { 0, 0, 0, 15 }, /* 16 dB */ +}; + +static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { + /* amp, pre, main, post */ + { 2, 2, 15, 6 }, /* Loopback */ + { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ + { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ + { 0, 1, 0, 11 }, /* 4 dB */ + { 0, 1, 0, 13 }, /* 5 dB */ + { 0, 1, 0, 15 }, /* 6 dB */ + { 0, 1, 3, 15 }, /* 7 dB */ + { 0, 1, 7, 15 }, /* 8 dB */ + { 0, 1, 7, 15 }, /* 9 dB */ + { 0, 1, 8, 15 }, /* 10 dB */ + { 0, 1, 9, 15 }, /* 11 dB */ + { 0, 1, 10, 15 }, /* 12 dB */ + { 0, 2, 6, 15 }, /* 13 dB */ + { 0, 2, 7, 15 }, /* 14 dB */ + { 0, 2, 8, 15 }, /* 15 dB */ + { 0, 2, 9, 15 }, /* 16 dB */ +}; + +/* + * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. + * These are mostly used for mez cards going through connectors + * and backplane traces, but can be used to add other "unusual" + * table values as well. + */ +static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ +}; + +static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ +}; + +static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 1, 12, 10 }, /* QME7342 backplane setting */ + { 0, 1, 12, 11 }, /* QME7342 backplane setting */ + { 0, 1, 12, 12 }, /* QME7342 backplane setting */ + { 0, 1, 12, 14 }, /* QME7342 backplane setting */ + { 0, 1, 12, 6 }, /* QME7342 backplane setting */ + { 0, 1, 12, 7 }, /* QME7342 backplane setting */ + { 0, 1, 12, 8 }, /* QME7342 backplane setting */ +}; + +static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, + unsigned atten) +{ + /* + * The attenuation table starts at 2dB for entry 1, + * with entry 0 being the loopback entry. + */ + if (atten <= 2) + atten = 1; + else if (atten > TXDDS_TABLE_SZ) + atten = TXDDS_TABLE_SZ - 1; + else + atten--; + return txdds + atten; +} + +/* + * if override is set, the module parameter txselect has a value + * for this specific port, so use it, rather than our normal mechanism. + */ +static void find_best_ent(struct qib_pportdata *ppd, + const struct txdds_ent **sdr_dds, + const struct txdds_ent **ddr_dds, + const struct txdds_ent **qdr_dds, int override) +{ + struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; + int idx; + + /* Search table of known cables */ + for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) { + const struct vendor_txdds_ent *v = vendor_txdds + idx; + + if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && + (!v->partnum || + !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { + *sdr_dds = &v->sdr; + *ddr_dds = &v->ddr; + *qdr_dds = &v->qdr; + return; + } + } + + /* Lookup serdes setting by cable type and attenuation */ + if (!override && QSFP_IS_ACTIVE(qd->tech)) { + *sdr_dds = txdds_sdr + ppd->dd->board_atten; + *ddr_dds = txdds_ddr + ppd->dd->board_atten; + *qdr_dds = txdds_qdr + ppd->dd->board_atten; + return; + } + + if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || + qd->atten[1])) { + *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); + *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); + *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); + return; + } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { + /* + * If we have no (or incomplete) data from the cable + * EEPROM, or no QSFP, or override is set, use the + * module parameter value to index into the attentuation + * table. + */ + idx = ppd->cpspec->no_eep; + *sdr_dds = &txdds_sdr[idx]; + *ddr_dds = &txdds_ddr[idx]; + *qdr_dds = &txdds_qdr[idx]; + } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { + /* similar to above, but index into the "extra" table. */ + idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; + *sdr_dds = &txdds_extra_sdr[idx]; + *ddr_dds = &txdds_extra_ddr[idx]; + *qdr_dds = &txdds_extra_qdr[idx]; + } else { + /* this shouldn't happen, it's range checked */ + *sdr_dds = txdds_sdr + qib_long_atten; + *ddr_dds = txdds_ddr + qib_long_atten; + *qdr_dds = txdds_qdr + qib_long_atten; + } +} + +static void init_txdds_table(struct qib_pportdata *ppd, int override) +{ + const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; + struct txdds_ent *dds; + int idx; + int single_ent = 0; + + find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); + + /* for mez cards or override, use the selected value for all entries */ + if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) + single_ent = 1; + + /* Fill in the first entry with the best entry found. */ + set_txdds(ppd, 0, sdr_dds); + set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); + set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); + if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | + QIBL_LINKACTIVE)) { + dds = (struct txdds_ent *)(ppd->link_speed_active == + QIB_IB_QDR ? qdr_dds : + (ppd->link_speed_active == + QIB_IB_DDR ? ddr_dds : sdr_dds)); + write_tx_serdes_param(ppd, dds); + } + + /* Fill in the remaining entries with the default table values. */ + for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { + set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx); + set_txdds(ppd, idx + TXDDS_TABLE_SZ, + single_ent ? ddr_dds : txdds_ddr + idx); + set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ, + single_ent ? qdr_dds : txdds_qdr + idx); + } +} + +#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl) +#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg) +#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy) +#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address) +#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data) +#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read) +#define AHB_TRANS_TRIES 10 + +/* + * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4, + * 5=subsystem which is why most calls have "chan + chan >> 1" + * for the channel argument. + */ +static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, + u32 data, u32 mask) +{ + u32 rd_data, wr_data, sz_mask; + u64 trans, acc, prev_acc; + u32 ret = 0xBAD0BAD; + int tries; + + prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); + /* From this point on, make sure we return access */ + acc = (quad << 1) | 1; + qib_write_kreg(dd, KR_AHB_ACC, acc); + + for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { + trans = qib_read_kreg64(dd, KR_AHB_TRANS); + if (trans & AHB_TRANS_RDY) + break; + } + if (tries >= AHB_TRANS_TRIES) { + qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); + goto bail; + } + + /* If mask is not all 1s, we need to read, but different SerDes + * entities have different sizes + */ + sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; + wr_data = data & mask & sz_mask; + if ((~mask & sz_mask) != 0) { + trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); + qib_write_kreg(dd, KR_AHB_TRANS, trans); + + for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { + trans = qib_read_kreg64(dd, KR_AHB_TRANS); + if (trans & AHB_TRANS_RDY) + break; + } + if (tries >= AHB_TRANS_TRIES) { + qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", + AHB_TRANS_TRIES); + goto bail; + } + /* Re-read in case host split reads and read data first */ + trans = qib_read_kreg64(dd, KR_AHB_TRANS); + rd_data = (uint32_t)(trans >> AHB_DATA_LSB); + wr_data |= (rd_data & ~mask & sz_mask); + } + + /* If mask is not zero, we need to write. */ + if (mask & sz_mask) { + trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); + trans |= ((uint64_t)wr_data << AHB_DATA_LSB); + trans |= AHB_WR; + qib_write_kreg(dd, KR_AHB_TRANS, trans); + + for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { + trans = qib_read_kreg64(dd, KR_AHB_TRANS); + if (trans & AHB_TRANS_RDY) + break; + } + if (tries >= AHB_TRANS_TRIES) { + qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", + AHB_TRANS_TRIES); + goto bail; + } + } + ret = wr_data; +bail: + qib_write_kreg(dd, KR_AHB_ACC, prev_acc); + return ret; +} + +static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, + unsigned mask) +{ + struct qib_devdata *dd = ppd->dd; + int chan; + u32 rbc; + + for (chan = 0; chan < SERDES_CHANS; ++chan) { + ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, + data, mask); + rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + addr, 0, 0); + } +} + +static int serdes_7322_init(struct qib_pportdata *ppd) +{ + u64 data; + u32 le_val; + + /* + * Initialize the Tx DDS tables. Also done every QSFP event, + * for adapters with QSFP + */ + init_txdds_table(ppd, 0); + + /* ensure no tx overrides from earlier driver loads */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + + /* Patch some SerDes defaults to "Better for IB" */ + /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ + ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); + + /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ + ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); + /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */ + ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6)); + + /* May be overridden in qsfp_7322_event */ + le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; + ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); + + /* enable LE1 adaptation for all but QME, which is disabled */ + le_val = IS_QME(ppd->dd) ? 0 : 1; + ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5)); + + /* Clear cmode-override, may be set from older driver */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); + + /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */ + ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8)); + + /* setup LoS params; these are subsystem, so chan == 5 */ + /* LoS filter threshold_count on, ch 0-3, set to 8 */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); + + /* LoS filter threshold_count off, ch 0-3, set to 4 */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); + + /* LoS filter select enabled */ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); + + /* LoS target data: SDR=4, DDR=2, QDR=1 */ + ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ + ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ + ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ + + data = qib_read_kreg_port(ppd, krp_serdesctrl); + qib_write_kreg_port(ppd, krp_serdesctrl, data | + SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); + + /* rxbistena; set 0 to avoid effects of it switch later */ + ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); + + /* Configure 4 DFE taps, and only they adapt */ + ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0)); + + /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ + le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; + ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); + + /* + * Set receive adaptation mode. SDR and DDR adaptation are + * always on, and QDR is initially enabled; later disabled. + */ + qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); + qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); + qib_write_kreg_port(ppd, krp_static_adapt_dis(2), + ppd->dd->cspec->r1 ? + QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); + ppd->cpspec->qdr_dfe_on = 1; + + /* FLoop LOS gate: PPM filter enabled */ + ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); + + /* rx offset center enabled */ + ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4); + + if (!ppd->dd->cspec->r1) { + ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12); + ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8); + } + + /* Set the frequency loop bandwidth to 15 */ + ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5)); + + return 0; +} + +/* start adjust QMH serdes parameters */ + +static void set_man_code(struct qib_pportdata *ppd, int chan, int code) +{ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 9, code << 9, 0x3f << 9); +} + +static void set_man_mode_h1(struct qib_pportdata *ppd, int chan, + int enable, u32 tapenable) +{ + if (enable) + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 1, 3 << 10, 0x1f << 10); + else + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 1, 0, 0x1f << 10); +} + +/* Set clock to 1, 0, 1, 0 */ +static void clock_man(struct qib_pportdata *ppd, int chan) +{ + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 4, 0x4000, 0x4000); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 4, 0, 0x4000); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 4, 0x4000, 0x4000); + ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), + 4, 0, 0x4000); +} + +/* + * write the current Tx serdes pre,post,main,amp settings into the serdes. + * The caller must pass the settings appropriate for the current speed, + * or not care if they are correct for the current speed. + */ +static void write_tx_serdes_param(struct qib_pportdata *ppd, + struct txdds_ent *txdds) +{ + u64 deemph; + + deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override); + /* field names for amp, main, post, pre, respectively */ + deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) | + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); + + deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + tx_override_deemphasis_select); + deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txampcntl_d2a); + deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txc0_ena); + deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcp1_ena); + deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcn1_ena); + qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); +} + +/* + * Set the parameters for mez cards on link bounce, so they are + * always exactly what was requested. Similar logic to init_txdds + * but does just the serdes. + */ +static void adj_tx_serdes(struct qib_pportdata *ppd) +{ + const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; + struct txdds_ent *dds; + + find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); + dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? + qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? + ddr_dds : sdr_dds)); + write_tx_serdes_param(ppd, dds); +} + +/* set QDR forced value for H1, if needed */ +static void force_h1(struct qib_pportdata *ppd) +{ + int chan; + + ppd->cpspec->qdr_reforce = 0; + if (!ppd->dd->cspec->r1) + return; + + for (chan = 0; chan < SERDES_CHANS; chan++) { + set_man_mode_h1(ppd, chan, 1, 0); + set_man_code(ppd, chan, ppd->cpspec->h1_val); + clock_man(ppd, chan); + set_man_mode_h1(ppd, chan, 0, 0); + } +} + +#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) +#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) + +#define R_OPCODE_LSB 3 +#define R_OP_NOP 0 +#define R_OP_SHIFT 2 +#define R_OP_UPDATE 3 +#define R_TDI_LSB 2 +#define R_TDO_LSB 1 +#define R_RDY 1 + +static int qib_r_grab(struct qib_devdata *dd) +{ + u64 val; + val = SJA_EN; + qib_write_kreg(dd, kr_r_access, val); + qib_read_kreg32(dd, kr_scratch); + return 0; +} + +/* qib_r_wait_for_rdy() not only waits for the ready bit, it + * returns the current state of R_TDO + */ +static int qib_r_wait_for_rdy(struct qib_devdata *dd) +{ + u64 val; + int timeout; + for (timeout = 0; timeout < 100 ; ++timeout) { + val = qib_read_kreg32(dd, kr_r_access); + if (val & R_RDY) + return (val >> R_TDO_LSB) & 1; + } + return -1; +} + +static int qib_r_shift(struct qib_devdata *dd, int bisten, + int len, u8 *inp, u8 *outp) +{ + u64 valbase, val; + int ret, pos; + + valbase = SJA_EN | (bisten << BISTEN_LSB) | + (R_OP_SHIFT << R_OPCODE_LSB); + ret = qib_r_wait_for_rdy(dd); + if (ret < 0) + goto bail; + for (pos = 0; pos < len; ++pos) { + val = valbase; + if (outp) { + outp[pos >> 3] &= ~(1 << (pos & 7)); + outp[pos >> 3] |= (ret << (pos & 7)); + } + if (inp) { + int tdi = inp[pos >> 3] >> (pos & 7); + val |= ((tdi & 1) << R_TDI_LSB); + } + qib_write_kreg(dd, kr_r_access, val); + qib_read_kreg32(dd, kr_scratch); + ret = qib_r_wait_for_rdy(dd); + if (ret < 0) + break; + } + /* Restore to NOP between operations. */ + val = SJA_EN | (bisten << BISTEN_LSB); + qib_write_kreg(dd, kr_r_access, val); + qib_read_kreg32(dd, kr_scratch); + ret = qib_r_wait_for_rdy(dd); + + if (ret >= 0) + ret = pos; +bail: + return ret; +} + +static int qib_r_update(struct qib_devdata *dd, int bisten) +{ + u64 val; + int ret; + + val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB); + ret = qib_r_wait_for_rdy(dd); + if (ret >= 0) { + qib_write_kreg(dd, kr_r_access, val); + qib_read_kreg32(dd, kr_scratch); + } + return ret; +} + +#define BISTEN_PORT_SEL 15 +#define LEN_PORT_SEL 625 +#define BISTEN_AT 17 +#define LEN_AT 156 +#define BISTEN_ETM 16 +#define LEN_ETM 632 + +#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) + +/* these are common for all IB port use cases. */ +static u8 reset_at[BIT2BYTE(LEN_AT)] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, +}; +static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e, + 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7, + 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70, + 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00, + 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, +}; +static u8 at[BIT2BYTE(LEN_AT)] = { + 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, +}; + +/* used for IB1 or IB2, only one in use */ +static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03, + 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00, +}; + +/* used when both IB1 and IB2 are in use */ +static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, + 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05, + 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07, + 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, +}; + +/* used when only IB1 is in use */ +static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = { + 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, + 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, + 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, + 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* used when only IB2 is in use */ +static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = { + 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32, + 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, + 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, + 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32, + 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, + 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, + 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, +}; + +/* used when both IB1 and IB2 are in use */ +static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = { + 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, + 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a, + 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, + 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +/* + * Do setup to properly handle IB link recovery; if port is zero, we + * are initializing to cover both ports; otherwise we are initializing + * to cover a single port card, or the port has reached INIT and we may + * need to switch coverage types. + */ +static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both) +{ + u8 *portsel, *etm; + struct qib_devdata *dd = ppd->dd; + + if (!ppd->dd->cspec->r1) + return; + if (!both) { + dd->cspec->recovery_ports_initted++; + ppd->cpspec->recovery_init = 1; + } + if (!both && dd->cspec->recovery_ports_initted == 1) { + portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; + etm = atetm_1port; + } else { + portsel = portsel_2port; + etm = atetm_2port; + } + + if (qib_r_grab(dd) < 0 || + qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || + qib_r_update(dd, BISTEN_ETM) < 0 || + qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || + qib_r_update(dd, BISTEN_AT) < 0 || + qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, + portsel, NULL) < 0 || + qib_r_update(dd, BISTEN_PORT_SEL) < 0 || + qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || + qib_r_update(dd, BISTEN_AT) < 0 || + qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || + qib_r_update(dd, BISTEN_ETM) < 0) + qib_dev_err(dd, "Failed IB link recovery setup\n"); +} + +static void check_7322_rxe_status(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + u64 fmask; + + if (dd->cspec->recovery_ports_initted != 1) + return; /* rest doesn't apply to dualport */ + qib_write_kreg(dd, kr_control, dd->control | + SYM_MASK(Control, FreezeMode)); + (void)qib_read_kreg64(dd, kr_scratch); + udelay(3); /* ibcreset asserted 400ns, be sure that's over */ + fmask = qib_read_kreg64(dd, kr_act_fmask); + if (!fmask) { + /* + * require a powercycle before we'll work again, and make + * sure we get no more interrupts, and don't turn off + * freeze. + */ + ppd->dd->cspec->stay_in_freeze = 1; + qib_7322_set_intr_state(ppd->dd, 0); + qib_write_kreg(dd, kr_fmask, 0ULL); + qib_dev_err(dd, "HCA unusable until powercycled\n"); + return; /* eventually reset */ + } + + qib_write_kreg(ppd->dd, kr_hwerrclear, + SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1)); + + /* don't do the full clear_freeze(), not needed for this */ + qib_write_kreg(dd, kr_control, dd->control); + qib_read_kreg32(dd, kr_scratch); + /* take IBC out of reset */ + if (ppd->link_speed_supported) { + ppd->cpspec->ibcctrl_a &= + ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); + qib_write_kreg_port(ppd, krp_ibcctrl_a, + ppd->cpspec->ibcctrl_a); + qib_read_kreg32(dd, kr_scratch); + if (ppd->lflags & QIBL_IB_LINK_DISABLED) + qib_set_ib_7322_lstate(ppd, 0, + QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + } +} diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c new file mode 100644 index 000000000000..9b40f345ac3f --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -0,0 +1,1586 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/idr.h> + +#include "qib.h" +#include "qib_common.h" + +/* + * min buffers we want to have per context, after driver + */ +#define QIB_MIN_USER_CTXT_BUFCNT 7 + +#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF +#define QLOGIC_IB_R_SOFTWARE_SHIFT 24 +#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) + +/* + * Number of ctxts we are configured to use (to allow for more pio + * buffers per ctxt, etc.) Zero means use chip value. + */ +ushort qib_cfgctxts; +module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); +MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); + +/* + * If set, do not write to any regs if avoidable, hack to allow + * check for deranged default register values. + */ +ushort qib_mini_init; +module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); +MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); + +unsigned qib_n_krcv_queues; +module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); +MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); + +/* + * qib_wc_pat parameter: + * 0 is WC via MTRR + * 1 is WC via PAT + * If PAT initialization fails, code reverts back to MTRR + */ +unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ +module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); +MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); + +struct workqueue_struct *qib_wq; +struct workqueue_struct *qib_cq_wq; + +static void verify_interrupt(unsigned long); + +static struct idr qib_unit_table; +u32 qib_cpulist_count; +unsigned long *qib_cpulist; + +/* set number of contexts we'll actually use */ +void qib_set_ctxtcnt(struct qib_devdata *dd) +{ + if (!qib_cfgctxts) + dd->cfgctxts = dd->ctxtcnt; + else if (qib_cfgctxts < dd->num_pports) + dd->cfgctxts = dd->ctxtcnt; + else if (qib_cfgctxts <= dd->ctxtcnt) + dd->cfgctxts = qib_cfgctxts; + else + dd->cfgctxts = dd->ctxtcnt; +} + +/* + * Common code for creating the receive context array. + */ +int qib_create_ctxts(struct qib_devdata *dd) +{ + unsigned i; + int ret; + + /* + * Allocate full ctxtcnt array, rather than just cfgctxts, because + * cleanup iterates across all possible ctxts. + */ + dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); + if (!dd->rcd) { + qib_dev_err(dd, "Unable to allocate ctxtdata array, " + "failing\n"); + ret = -ENOMEM; + goto done; + } + + /* create (one or more) kctxt */ + for (i = 0; i < dd->first_user_ctxt; ++i) { + struct qib_pportdata *ppd; + struct qib_ctxtdata *rcd; + + if (dd->skip_kctxt_mask & (1 << i)) + continue; + + ppd = dd->pport + (i % dd->num_pports); + rcd = qib_create_ctxtdata(ppd, i); + if (!rcd) { + qib_dev_err(dd, "Unable to allocate ctxtdata" + " for Kernel ctxt, failing\n"); + ret = -ENOMEM; + goto done; + } + rcd->pkeys[0] = QIB_DEFAULT_P_KEY; + rcd->seq_cnt = 1; + } + ret = 0; +done: + return ret; +} + +/* + * Common code for user and kernel context setup. + */ +struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) +{ + struct qib_devdata *dd = ppd->dd; + struct qib_ctxtdata *rcd; + + rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); + if (rcd) { + INIT_LIST_HEAD(&rcd->qp_wait_list); + rcd->ppd = ppd; + rcd->dd = dd; + rcd->cnt = 1; + rcd->ctxt = ctxt; + dd->rcd[ctxt] = rcd; + + dd->f_init_ctxt(rcd); + + /* + * To avoid wasting a lot of memory, we allocate 32KB chunks + * of physically contiguous memory, advance through it until + * used up and then allocate more. Of course, we need + * memory to store those extra pointers, now. 32KB seems to + * be the most that is "safe" under memory pressure + * (creating large files and then copying them over + * NFS while doing lots of MPI jobs). The OOM killer can + * get invoked, even though we say we can sleep and this can + * cause significant system problems.... + */ + rcd->rcvegrbuf_size = 0x8000; + rcd->rcvegrbufs_perchunk = + rcd->rcvegrbuf_size / dd->rcvegrbufsize; + rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + + rcd->rcvegrbufs_perchunk - 1) / + rcd->rcvegrbufs_perchunk; + } + return rcd; +} + +/* + * Common code for initializing the physical port structure. + */ +void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, + u8 hw_pidx, u8 port) +{ + ppd->dd = dd; + ppd->hw_pidx = hw_pidx; + ppd->port = port; /* IB port number, not index */ + + spin_lock_init(&ppd->sdma_lock); + spin_lock_init(&ppd->lflags_lock); + init_waitqueue_head(&ppd->state_wait); + + init_timer(&ppd->symerr_clear_timer); + ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; + ppd->symerr_clear_timer.data = (unsigned long)ppd; +} + +static int init_pioavailregs(struct qib_devdata *dd) +{ + int ret, pidx; + u64 *status_page; + + dd->pioavailregs_dma = dma_alloc_coherent( + &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, + GFP_KERNEL); + if (!dd->pioavailregs_dma) { + qib_dev_err(dd, "failed to allocate PIOavail reg area " + "in memory\n"); + ret = -ENOMEM; + goto done; + } + + /* + * We really want L2 cache aligned, but for current CPUs of + * interest, they are the same. + */ + status_page = (u64 *) + ((char *) dd->pioavailregs_dma + + ((2 * L1_CACHE_BYTES + + dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); + /* device status comes first, for backwards compatibility */ + dd->devstatusp = status_page; + *status_page++ = 0; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + dd->pport[pidx].statusp = status_page; + *status_page++ = 0; + } + + /* + * Setup buffer to hold freeze and other messages, accessible to + * apps, following statusp. This is per-unit, not per port. + */ + dd->freezemsg = (char *) status_page; + *dd->freezemsg = 0; + /* length of msg buffer is "whatever is left" */ + ret = (char *) status_page - (char *) dd->pioavailregs_dma; + dd->freezelen = PAGE_SIZE - ret; + + ret = 0; + +done: + return ret; +} + +/** + * init_shadow_tids - allocate the shadow TID array + * @dd: the qlogic_ib device + * + * allocate the shadow TID array, so we can qib_munlock previous + * entries. It may make more sense to move the pageshadow to the + * ctxt data structure, so we only allocate memory for ctxts actually + * in use, since we at 8k per ctxt, now. + * We don't want failures here to prevent use of the driver/chip, + * so no return value. + */ +static void init_shadow_tids(struct qib_devdata *dd) +{ + struct page **pages; + dma_addr_t *addrs; + + pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); + if (!pages) { + qib_dev_err(dd, "failed to allocate shadow page * " + "array, no expected sends!\n"); + goto bail; + } + + addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); + if (!addrs) { + qib_dev_err(dd, "failed to allocate shadow dma handle " + "array, no expected sends!\n"); + goto bail_free; + } + + memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); + memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); + + dd->pageshadow = pages; + dd->physshadow = addrs; + return; + +bail_free: + vfree(pages); +bail: + dd->pageshadow = NULL; +} + +/* + * Do initialization for device that is only needed on + * first detect, not on resets. + */ +static int loadtime_init(struct qib_devdata *dd) +{ + int ret = 0; + + if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & + QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { + qib_dev_err(dd, "Driver only handles version %d, " + "chip swversion is %d (%llx), failng\n", + QIB_CHIP_SWVERSION, + (int)(dd->revision >> + QLOGIC_IB_R_SOFTWARE_SHIFT) & + QLOGIC_IB_R_SOFTWARE_MASK, + (unsigned long long) dd->revision); + ret = -ENOSYS; + goto done; + } + + if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) + qib_devinfo(dd->pcidev, "%s", dd->boardversion); + + spin_lock_init(&dd->pioavail_lock); + spin_lock_init(&dd->sendctrl_lock); + spin_lock_init(&dd->uctxt_lock); + spin_lock_init(&dd->qib_diag_trans_lock); + spin_lock_init(&dd->eep_st_lock); + mutex_init(&dd->eep_lock); + + if (qib_mini_init) + goto done; + + ret = init_pioavailregs(dd); + init_shadow_tids(dd); + + qib_get_eeprom_info(dd); + + /* setup time (don't start yet) to verify we got interrupt */ + init_timer(&dd->intrchk_timer); + dd->intrchk_timer.function = verify_interrupt; + dd->intrchk_timer.data = (unsigned long) dd; + +done: + return ret; +} + +/** + * init_after_reset - re-initialize after a reset + * @dd: the qlogic_ib device + * + * sanity check at least some of the values after reset, and + * ensure no receive or transmit (explictly, in case reset + * failed + */ +static int init_after_reset(struct qib_devdata *dd) +{ + int i; + + /* + * Ensure chip does no sends or receives, tail updates, or + * pioavail updates while we re-initialize. This is mostly + * for the driver data structures, not chip registers. + */ + for (i = 0; i < dd->num_pports; ++i) { + /* + * ctxt == -1 means "all contexts". Only really safe for + * _dis_abling things, as here. + */ + dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | + QIB_RCVCTRL_INTRAVAIL_DIS | + QIB_RCVCTRL_TAILUPD_DIS, -1); + /* Redundant across ports for some, but no big deal. */ + dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | + QIB_SENDCTRL_AVAIL_DIS); + } + + return 0; +} + +static void enable_chip(struct qib_devdata *dd) +{ + u64 rcvmask; + int i; + + /* + * Enable PIO send, and update of PIOavail regs to memory. + */ + for (i = 0; i < dd->num_pports; ++i) + dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | + QIB_SENDCTRL_AVAIL_ENB); + /* + * Enable kernel ctxts' receive and receive interrupt. + * Other ctxts done as user opens and inits them. + */ + rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; + rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? + QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; + for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { + struct qib_ctxtdata *rcd = dd->rcd[i]; + + if (rcd) + dd->f_rcvctrl(rcd->ppd, rcvmask, i); + } +} + +static void verify_interrupt(unsigned long opaque) +{ + struct qib_devdata *dd = (struct qib_devdata *) opaque; + + if (!dd) + return; /* being torn down */ + + /* + * If we don't have a lid or any interrupts, let the user know and + * don't bother checking again. + */ + if (dd->int_counter == 0) { + if (!dd->f_intr_fallback(dd)) + dev_err(&dd->pcidev->dev, "No interrupts detected, " + "not usable.\n"); + else /* re-arm the timer to see if fallback works */ + mod_timer(&dd->intrchk_timer, jiffies + HZ/2); + } +} + +static void init_piobuf_state(struct qib_devdata *dd) +{ + int i, pidx; + u32 uctxts; + + /* + * Ensure all buffers are free, and fifos empty. Buffers + * are common, so only do once for port 0. + * + * After enable and qib_chg_pioavailkernel so we can safely + * enable pioavail updates and PIOENABLE. After this, packets + * are ready and able to go out. + */ + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); + for (pidx = 0; pidx < dd->num_pports; ++pidx) + dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); + + /* + * If not all sendbufs are used, add the one to each of the lower + * numbered contexts. pbufsctxt and lastctxt_piobuf are + * calculated in chip-specific code because it may cause some + * chip-specific adjustments to be made. + */ + uctxts = dd->cfgctxts - dd->first_user_ctxt; + dd->ctxts_extrabuf = dd->pbufsctxt ? + dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; + + /* + * Set up the shadow copies of the piobufavail registers, + * which we compare against the chip registers for now, and + * the in memory DMA'ed copies of the registers. + * By now pioavail updates to memory should have occurred, so + * copy them into our working/shadow registers; this is in + * case something went wrong with abort, but mostly to get the + * initial values of the generation bit correct. + */ + for (i = 0; i < dd->pioavregs; i++) { + __le64 tmp; + + tmp = dd->pioavailregs_dma[i]; + /* + * Don't need to worry about pioavailkernel here + * because we will call qib_chg_pioavailkernel() later + * in initialization, to busy out buffers as needed. + */ + dd->pioavailshadow[i] = le64_to_cpu(tmp); + } + while (i < ARRAY_SIZE(dd->pioavailshadow)) + dd->pioavailshadow[i++] = 0; /* for debugging sanity */ + + /* after pioavailshadow is setup */ + qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, + TXCHK_CHG_TYPE_KERN, NULL); + dd->f_initvl15_bufs(dd); +} + +/** + * qib_init - do the actual initialization sequence on the chip + * @dd: the qlogic_ib device + * @reinit: reinitializing, so don't allocate new memory + * + * Do the actual initialization sequence on the chip. This is done + * both from the init routine called from the PCI infrastructure, and + * when we reset the chip, or detect that it was reset internally, + * or it's administratively re-enabled. + * + * Memory allocation here and in called routines is only done in + * the first case (reinit == 0). We have to be careful, because even + * without memory allocation, we need to re-write all the chip registers + * TIDs, etc. after the reset or enable has completed. + */ +int qib_init(struct qib_devdata *dd, int reinit) +{ + int ret = 0, pidx, lastfail = 0; + u32 portok = 0; + unsigned i; + struct qib_ctxtdata *rcd; + struct qib_pportdata *ppd; + unsigned long flags; + + /* Set linkstate to unknown, so we can watch for a transition. */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | + QIBL_LINKDOWN | QIBL_LINKINIT | + QIBL_LINKV); + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + } + + if (reinit) + ret = init_after_reset(dd); + else + ret = loadtime_init(dd); + if (ret) + goto done; + + /* Bypass most chip-init, to get to device creation */ + if (qib_mini_init) + return 0; + + ret = dd->f_late_initreg(dd); + if (ret) + goto done; + + /* dd->rcd can be NULL if early init failed */ + for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { + /* + * Set up the (kernel) rcvhdr queue and egr TIDs. If doing + * re-init, the simplest way to handle this is to free + * existing, and re-allocate. + * Need to re-create rest of ctxt 0 ctxtdata as well. + */ + rcd = dd->rcd[i]; + if (!rcd) + continue; + + lastfail = qib_create_rcvhdrq(dd, rcd); + if (!lastfail) + lastfail = qib_setup_eagerbufs(rcd); + if (lastfail) { + qib_dev_err(dd, "failed to allocate kernel ctxt's " + "rcvhdrq and/or egr bufs\n"); + continue; + } + } + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + int mtu; + if (lastfail) + ret = lastfail; + ppd = dd->pport + pidx; + mtu = ib_mtu_enum_to_int(qib_ibmtu); + if (mtu == -1) { + mtu = QIB_DEFAULT_MTU; + qib_ibmtu = 0; /* don't leave invalid value */ + } + /* set max we can ever have for this driver load */ + ppd->init_ibmaxlen = min(mtu > 2048 ? + dd->piosize4k : dd->piosize2k, + dd->rcvegrbufsize + + (dd->rcvhdrentsize << 2)); + /* + * Have to initialize ibmaxlen, but this will normally + * change immediately in qib_set_mtu(). + */ + ppd->ibmaxlen = ppd->init_ibmaxlen; + qib_set_mtu(ppd, mtu); + + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + + lastfail = dd->f_bringup_serdes(ppd); + if (lastfail) { + qib_devinfo(dd->pcidev, + "Failed to bringup IB port %u\n", ppd->port); + lastfail = -ENETDOWN; + continue; + } + + /* let link come up, and enable IBC */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + portok++; + } + + if (!portok) { + /* none of the ports initialized */ + if (!ret && lastfail) + ret = lastfail; + else if (!ret) + ret = -ENETDOWN; + /* but continue on, so we can debug cause */ + } + + enable_chip(dd); + + init_piobuf_state(dd); + +done: + if (!ret) { + /* chip is OK for user apps; mark it as initialized */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + /* + * Set status even if port serdes is not initialized + * so that diags will work. + */ + *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | + QIB_STATUS_INITTED; + if (!ppd->link_speed_enabled) + continue; + if (dd->flags & QIB_HAS_SEND_DMA) + ret = qib_setup_sdma(ppd); + init_timer(&ppd->hol_timer); + ppd->hol_timer.function = qib_hol_event; + ppd->hol_timer.data = (unsigned long)ppd; + ppd->hol_state = QIB_HOL_UP; + } + + /* now we can enable all interrupts from the chip */ + dd->f_set_intr_state(dd, 1); + + /* + * Setup to verify we get an interrupt, and fallback + * to an alternate if necessary and possible. + */ + mod_timer(&dd->intrchk_timer, jiffies + HZ/2); + /* start stats retrieval timer */ + mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); + } + + /* if ret is non-zero, we probably should do some cleanup here... */ + return ret; +} + +/* + * These next two routines are placeholders in case we don't have per-arch + * code for controlling write combining. If explicit control of write + * combining is not available, performance will probably be awful. + */ + +int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) +{ + return -EOPNOTSUPP; +} + +void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) +{ +} + +static inline struct qib_devdata *__qib_lookup(int unit) +{ + return idr_find(&qib_unit_table, unit); +} + +struct qib_devdata *qib_lookup(int unit) +{ + struct qib_devdata *dd; + unsigned long flags; + + spin_lock_irqsave(&qib_devs_lock, flags); + dd = __qib_lookup(unit); + spin_unlock_irqrestore(&qib_devs_lock, flags); + + return dd; +} + +/* + * Stop the timers during unit shutdown, or after an error late + * in initialization. + */ +static void qib_stop_timers(struct qib_devdata *dd) +{ + struct qib_pportdata *ppd; + int pidx; + + if (dd->stats_timer.data) { + del_timer_sync(&dd->stats_timer); + dd->stats_timer.data = 0; + } + if (dd->intrchk_timer.data) { + del_timer_sync(&dd->intrchk_timer); + dd->intrchk_timer.data = 0; + } + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + if (ppd->hol_timer.data) + del_timer_sync(&ppd->hol_timer); + if (ppd->led_override_timer.data) { + del_timer_sync(&ppd->led_override_timer); + atomic_set(&ppd->led_override_timer_active, 0); + } + if (ppd->symerr_clear_timer.data) + del_timer_sync(&ppd->symerr_clear_timer); + } +} + +/** + * qib_shutdown_device - shut down a device + * @dd: the qlogic_ib device + * + * This is called to make the device quiet when we are about to + * unload the driver, and also when the device is administratively + * disabled. It does not free any data structures. + * Everything it does has to be setup again by qib_init(dd, 1) + */ +static void qib_shutdown_device(struct qib_devdata *dd) +{ + struct qib_pportdata *ppd; + unsigned pidx; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + spin_lock_irq(&ppd->lflags_lock); + ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | + QIBL_LINKARMED | QIBL_LINKACTIVE | + QIBL_LINKV); + spin_unlock_irq(&ppd->lflags_lock); + *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); + } + dd->flags &= ~QIB_INITTED; + + /* mask interrupts, but not errors */ + dd->f_set_intr_state(dd, 0); + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | + QIB_RCVCTRL_CTXT_DIS | + QIB_RCVCTRL_INTRAVAIL_DIS | + QIB_RCVCTRL_PKEY_ENB, -1); + /* + * Gracefully stop all sends allowing any in progress to + * trickle out first. + */ + dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); + } + + /* + * Enough for anything that's going to trickle out to have actually + * done so. + */ + udelay(20); + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + dd->f_setextled(ppd, 0); /* make sure LEDs are off */ + + if (dd->flags & QIB_HAS_SEND_DMA) + qib_teardown_sdma(ppd); + + dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | + QIB_SENDCTRL_SEND_DIS); + /* + * Clear SerdesEnable. + * We can't count on interrupts since we are stopping. + */ + dd->f_quiet_serdes(ppd); + } + + qib_update_eeprom_log(dd); +} + +/** + * qib_free_ctxtdata - free a context's allocated data + * @dd: the qlogic_ib device + * @rcd: the ctxtdata structure + * + * free up any allocated data for a context + * This should not touch anything that would affect a simultaneous + * re-allocation of context data, because it is called after qib_mutex + * is released (and can be called from reinit as well). + * It should never change any chip state, or global driver state. + */ +void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) +{ + if (!rcd) + return; + + if (rcd->rcvhdrq) { + dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, + rcd->rcvhdrq, rcd->rcvhdrq_phys); + rcd->rcvhdrq = NULL; + if (rcd->rcvhdrtail_kvaddr) { + dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, + rcd->rcvhdrtail_kvaddr, + rcd->rcvhdrqtailaddr_phys); + rcd->rcvhdrtail_kvaddr = NULL; + } + } + if (rcd->rcvegrbuf) { + unsigned e; + + for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { + void *base = rcd->rcvegrbuf[e]; + size_t size = rcd->rcvegrbuf_size; + + dma_free_coherent(&dd->pcidev->dev, size, + base, rcd->rcvegrbuf_phys[e]); + } + kfree(rcd->rcvegrbuf); + rcd->rcvegrbuf = NULL; + kfree(rcd->rcvegrbuf_phys); + rcd->rcvegrbuf_phys = NULL; + rcd->rcvegrbuf_chunks = 0; + } + + kfree(rcd->tid_pg_list); + vfree(rcd->user_event_mask); + vfree(rcd->subctxt_uregbase); + vfree(rcd->subctxt_rcvegrbuf); + vfree(rcd->subctxt_rcvhdr_base); + kfree(rcd); +} + +/* + * Perform a PIO buffer bandwidth write test, to verify proper system + * configuration. Even when all the setup calls work, occasionally + * BIOS or other issues can prevent write combining from working, or + * can cause other bandwidth problems to the chip. + * + * This test simply writes the same buffer over and over again, and + * measures close to the peak bandwidth to the chip (not testing + * data bandwidth to the wire). On chips that use an address-based + * trigger to send packets to the wire, this is easy. On chips that + * use a count to trigger, we want to make sure that the packet doesn't + * go out on the wire, or trigger flow control checks. + */ +static void qib_verify_pioperf(struct qib_devdata *dd) +{ + u32 pbnum, cnt, lcnt; + u32 __iomem *piobuf; + u32 *addr; + u64 msecs, emsecs; + + piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); + if (!piobuf) { + qib_devinfo(dd->pcidev, + "No PIObufs for checking perf, skipping\n"); + return; + } + + /* + * Enough to give us a reasonable test, less than piobuf size, and + * likely multiple of store buffer length. + */ + cnt = 1024; + + addr = vmalloc(cnt); + if (!addr) { + qib_devinfo(dd->pcidev, + "Couldn't get memory for checking PIO perf," + " skipping\n"); + goto done; + } + + preempt_disable(); /* we want reasonably accurate elapsed time */ + msecs = 1 + jiffies_to_msecs(jiffies); + for (lcnt = 0; lcnt < 10000U; lcnt++) { + /* wait until we cross msec boundary */ + if (jiffies_to_msecs(jiffies) >= msecs) + break; + udelay(1); + } + + dd->f_set_armlaunch(dd, 0); + + /* + * length 0, no dwords actually sent + */ + writeq(0, piobuf); + qib_flush_wc(); + + /* + * This is only roughly accurate, since even with preempt we + * still take interrupts that could take a while. Running for + * >= 5 msec seems to get us "close enough" to accurate values. + */ + msecs = jiffies_to_msecs(jiffies); + for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { + qib_pio_copy(piobuf + 64, addr, cnt >> 2); + emsecs = jiffies_to_msecs(jiffies) - msecs; + } + + /* 1 GiB/sec, slightly over IB SDR line rate */ + if (lcnt < (emsecs * 1024U)) + qib_dev_err(dd, + "Performance problem: bandwidth to PIO buffers is " + "only %u MiB/sec\n", + lcnt / (u32) emsecs); + + preempt_enable(); + + vfree(addr); + +done: + /* disarm piobuf, so it's available again */ + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); + qib_sendbuf_done(dd, pbnum); + dd->f_set_armlaunch(dd, 1); +} + + +void qib_free_devdata(struct qib_devdata *dd) +{ + unsigned long flags; + + spin_lock_irqsave(&qib_devs_lock, flags); + idr_remove(&qib_unit_table, dd->unit); + list_del(&dd->list); + spin_unlock_irqrestore(&qib_devs_lock, flags); + + ib_dealloc_device(&dd->verbs_dev.ibdev); +} + +/* + * Allocate our primary per-unit data structure. Must be done via verbs + * allocator, because the verbs cleanup process both does cleanup and + * free of the data structure. + * "extra" is for chip-specific data. + * + * Use the idr mechanism to get a unit number for this unit. + */ +struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) +{ + unsigned long flags; + struct qib_devdata *dd; + int ret; + + if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { + dd = ERR_PTR(-ENOMEM); + goto bail; + } + + dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); + if (!dd) { + dd = ERR_PTR(-ENOMEM); + goto bail; + } + + spin_lock_irqsave(&qib_devs_lock, flags); + ret = idr_get_new(&qib_unit_table, dd, &dd->unit); + if (ret >= 0) + list_add(&dd->list, &qib_dev_list); + spin_unlock_irqrestore(&qib_devs_lock, flags); + + if (ret < 0) { + qib_early_err(&pdev->dev, + "Could not allocate unit ID: error %d\n", -ret); + ib_dealloc_device(&dd->verbs_dev.ibdev); + dd = ERR_PTR(ret); + goto bail; + } + + if (!qib_cpulist_count) { + u32 count = num_online_cpus(); + qib_cpulist = kzalloc(BITS_TO_LONGS(count) * + sizeof(long), GFP_KERNEL); + if (qib_cpulist) + qib_cpulist_count = count; + else + qib_early_err(&pdev->dev, "Could not alloc cpulist " + "info, cpu affinity might be wrong\n"); + } + +bail: + return dd; +} + +/* + * Called from freeze mode handlers, and from PCI error + * reporting code. Should be paranoid about state of + * system and data structures. + */ +void qib_disable_after_error(struct qib_devdata *dd) +{ + if (dd->flags & QIB_INITTED) { + u32 pidx; + + dd->flags &= ~QIB_INITTED; + if (dd->pport) + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + struct qib_pportdata *ppd; + + ppd = dd->pport + pidx; + if (dd->flags & QIB_PRESENT) { + qib_set_linkstate(ppd, + QIB_IB_LINKDOWN_DISABLE); + dd->f_setextled(ppd, 0); + } + *ppd->statusp &= ~QIB_STATUS_IB_READY; + } + } + + /* + * Mark as having had an error for driver, and also + * for /sys and status word mapped to user programs. + * This marks unit as not usable, until reset. + */ + if (dd->devstatusp) + *dd->devstatusp |= QIB_STATUS_HWERROR; +} + +static void __devexit qib_remove_one(struct pci_dev *); +static int __devinit qib_init_one(struct pci_dev *, + const struct pci_device_id *); + +#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " +#define PFX QIB_DRV_NAME ": " + +static const struct pci_device_id qib_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, qib_pci_tbl); + +struct pci_driver qib_driver = { + .name = QIB_DRV_NAME, + .probe = qib_init_one, + .remove = __devexit_p(qib_remove_one), + .id_table = qib_pci_tbl, + .err_handler = &qib_pci_err_handler, +}; + +/* + * Do all the generic driver unit- and chip-independent memory + * allocation and initialization. + */ +static int __init qlogic_ib_init(void) +{ + int ret; + + ret = qib_dev_init(); + if (ret) + goto bail; + + /* + * We create our own workqueue mainly because we want to be + * able to flush it when devices are being removed. We can't + * use schedule_work()/flush_scheduled_work() because both + * unregister_netdev() and linkwatch_event take the rtnl lock, + * so flush_scheduled_work() can deadlock during device + * removal. + */ + qib_wq = create_workqueue("qib"); + if (!qib_wq) { + ret = -ENOMEM; + goto bail_dev; + } + + qib_cq_wq = create_workqueue("qib_cq"); + if (!qib_cq_wq) { + ret = -ENOMEM; + goto bail_wq; + } + + /* + * These must be called before the driver is registered with + * the PCI subsystem. + */ + idr_init(&qib_unit_table); + if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { + printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); + ret = -ENOMEM; + goto bail_cq_wq; + } + + ret = pci_register_driver(&qib_driver); + if (ret < 0) { + printk(KERN_ERR QIB_DRV_NAME + ": Unable to register driver: error %d\n", -ret); + goto bail_unit; + } + + /* not fatal if it doesn't work */ + if (qib_init_qibfs()) + printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); + goto bail; /* all OK */ + +bail_unit: + idr_destroy(&qib_unit_table); +bail_cq_wq: + destroy_workqueue(qib_cq_wq); +bail_wq: + destroy_workqueue(qib_wq); +bail_dev: + qib_dev_cleanup(); +bail: + return ret; +} + +module_init(qlogic_ib_init); + +/* + * Do the non-unit driver cleanup, memory free, etc. at unload. + */ +static void __exit qlogic_ib_cleanup(void) +{ + int ret; + + ret = qib_exit_qibfs(); + if (ret) + printk(KERN_ERR QIB_DRV_NAME ": " + "Unable to cleanup counter filesystem: " + "error %d\n", -ret); + + pci_unregister_driver(&qib_driver); + + destroy_workqueue(qib_wq); + destroy_workqueue(qib_cq_wq); + + qib_cpulist_count = 0; + kfree(qib_cpulist); + + idr_destroy(&qib_unit_table); + qib_dev_cleanup(); +} + +module_exit(qlogic_ib_cleanup); + +/* this can only be called after a successful initialization */ +static void cleanup_device_data(struct qib_devdata *dd) +{ + int ctxt; + int pidx; + struct qib_ctxtdata **tmp; + unsigned long flags; + + /* users can't do anything more with chip */ + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].statusp) + *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; + + if (!qib_wc_pat) + qib_disable_wc(dd); + + if (dd->pioavailregs_dma) { + dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, + (void *) dd->pioavailregs_dma, + dd->pioavailregs_phys); + dd->pioavailregs_dma = NULL; + } + + if (dd->pageshadow) { + struct page **tmpp = dd->pageshadow; + dma_addr_t *tmpd = dd->physshadow; + int i, cnt = 0; + + for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { + int ctxt_tidbase = ctxt * dd->rcvtidcnt; + int maxtid = ctxt_tidbase + dd->rcvtidcnt; + + for (i = ctxt_tidbase; i < maxtid; i++) { + if (!tmpp[i]) + continue; + pci_unmap_page(dd->pcidev, tmpd[i], + PAGE_SIZE, PCI_DMA_FROMDEVICE); + qib_release_user_pages(&tmpp[i], 1); + tmpp[i] = NULL; + cnt++; + } + } + + tmpp = dd->pageshadow; + dd->pageshadow = NULL; + vfree(tmpp); + } + + /* + * Free any resources still in use (usually just kernel contexts) + * at unload; we do for ctxtcnt, because that's what we allocate. + * We acquire lock to be really paranoid that rcd isn't being + * accessed from some interrupt-related code (that should not happen, + * but best to be sure). + */ + spin_lock_irqsave(&dd->uctxt_lock, flags); + tmp = dd->rcd; + dd->rcd = NULL; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { + struct qib_ctxtdata *rcd = tmp[ctxt]; + + tmp[ctxt] = NULL; /* debugging paranoia */ + qib_free_ctxtdata(dd, rcd); + } + kfree(tmp); + kfree(dd->boardname); +} + +/* + * Clean up on unit shutdown, or error during unit load after + * successful initialization. + */ +static void qib_postinit_cleanup(struct qib_devdata *dd) +{ + /* + * Clean up chip-specific stuff. + * We check for NULL here, because it's outside + * the kregbase check, and we need to call it + * after the free_irq. Thus it's possible that + * the function pointers were never initialized. + */ + if (dd->f_cleanup) + dd->f_cleanup(dd); + + qib_pcie_ddcleanup(dd); + + cleanup_device_data(dd); + + qib_free_devdata(dd); +} + +static int __devinit qib_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret, j, pidx, initfail; + struct qib_devdata *dd = NULL; + + ret = qib_pcie_init(pdev, ent); + if (ret) + goto bail; + + /* + * Do device-specific initialiation, function table setup, dd + * allocation, etc. + */ + switch (ent->device) { + case PCI_DEVICE_ID_QLOGIC_IB_6120: +#ifdef CONFIG_PCI_MSI + dd = qib_init_iba6120_funcs(pdev, ent); +#else + qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " + "work if CONFIG_PCI_MSI is not enabled\n", + ent->device); +#endif + break; + + case PCI_DEVICE_ID_QLOGIC_IB_7220: + dd = qib_init_iba7220_funcs(pdev, ent); + break; + + case PCI_DEVICE_ID_QLOGIC_IB_7322: + dd = qib_init_iba7322_funcs(pdev, ent); + break; + + default: + qib_early_err(&pdev->dev, "Failing on unknown QLogic " + "deviceid 0x%x\n", ent->device); + ret = -ENODEV; + } + + if (IS_ERR(dd)) + ret = PTR_ERR(dd); + if (ret) + goto bail; /* error already printed */ + + /* do the generic initialization */ + initfail = qib_init(dd, 0); + + ret = qib_register_ib_device(dd); + + /* + * Now ready for use. this should be cleared whenever we + * detect a reset, or initiate one. If earlier failure, + * we still create devices, so diags, etc. can be used + * to determine cause of problem. + */ + if (!qib_mini_init && !initfail && !ret) + dd->flags |= QIB_INITTED; + + j = qib_device_create(dd); + if (j) + qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); + j = qibfs_add(dd); + if (j) + qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", + -j); + + if (qib_mini_init || initfail || ret) { + qib_stop_timers(dd); + for (pidx = 0; pidx < dd->num_pports; ++pidx) + dd->f_quiet_serdes(dd->pport + pidx); + if (initfail) + ret = initfail; + goto bail; + } + + if (!qib_wc_pat) { + ret = qib_enable_wc(dd); + if (ret) { + qib_dev_err(dd, "Write combining not enabled " + "(err %d): performance may be poor\n", + -ret); + ret = 0; + } + } + + qib_verify_pioperf(dd); +bail: + return ret; +} + +static void __devexit qib_remove_one(struct pci_dev *pdev) +{ + struct qib_devdata *dd = pci_get_drvdata(pdev); + int ret; + + /* unregister from IB core */ + qib_unregister_ib_device(dd); + + /* + * Disable the IB link, disable interrupts on the device, + * clear dma engines, etc. + */ + if (!qib_mini_init) + qib_shutdown_device(dd); + + qib_stop_timers(dd); + + /* wait until all of our (qsfp) schedule_work() calls complete */ + flush_scheduled_work(); + + ret = qibfs_remove(dd); + if (ret) + qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", + -ret); + + qib_device_remove(dd); + + qib_postinit_cleanup(dd); +} + +/** + * qib_create_rcvhdrq - create a receive header queue + * @dd: the qlogic_ib device + * @rcd: the context data + * + * This must be contiguous memory (from an i/o perspective), and must be + * DMA'able (which means for some systems, it will go through an IOMMU, + * or be forced into a low address range). + */ +int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) +{ + unsigned amt; + + if (!rcd->rcvhdrq) { + dma_addr_t phys_hdrqtail; + gfp_t gfp_flags; + + amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * + sizeof(u32), PAGE_SIZE); + gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? + GFP_USER : GFP_KERNEL; + rcd->rcvhdrq = dma_alloc_coherent( + &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, + gfp_flags | __GFP_COMP); + + if (!rcd->rcvhdrq) { + qib_dev_err(dd, "attempt to allocate %d bytes " + "for ctxt %u rcvhdrq failed\n", + amt, rcd->ctxt); + goto bail; + } + + if (rcd->ctxt >= dd->first_user_ctxt) { + rcd->user_event_mask = vmalloc_user(PAGE_SIZE); + if (!rcd->user_event_mask) + goto bail_free_hdrq; + } + + if (!(dd->flags & QIB_NODMA_RTAIL)) { + rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( + &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, + gfp_flags); + if (!rcd->rcvhdrtail_kvaddr) + goto bail_free; + rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; + } + + rcd->rcvhdrq_size = amt; + } + + /* clear for security and sanity on each use */ + memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); + if (rcd->rcvhdrtail_kvaddr) + memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); + return 0; + +bail_free: + qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " + "rcvhdrqtailaddr failed\n", rcd->ctxt); + vfree(rcd->user_event_mask); + rcd->user_event_mask = NULL; +bail_free_hdrq: + dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, + rcd->rcvhdrq_phys); + rcd->rcvhdrq = NULL; +bail: + return -ENOMEM; +} + +/** + * allocate eager buffers, both kernel and user contexts. + * @rcd: the context we are setting up. + * + * Allocate the eager TID buffers and program them into hip. + * They are no longer completely contiguous, we do multiple allocation + * calls. Otherwise we get the OOM code involved, by asking for too + * much per call, with disastrous results on some kernels. + */ +int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) +{ + struct qib_devdata *dd = rcd->dd; + unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; + size_t size; + gfp_t gfp_flags; + + /* + * GFP_USER, but without GFP_FS, so buffer cache can be + * coalesced (we hope); otherwise, even at order 4, + * heavy filesystem activity makes these fail, and we can + * use compound pages. + */ + gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; + + egrcnt = rcd->rcvegrcnt; + egroff = rcd->rcvegr_tid_base; + egrsize = dd->rcvegrbufsize; + + chunk = rcd->rcvegrbuf_chunks; + egrperchunk = rcd->rcvegrbufs_perchunk; + size = rcd->rcvegrbuf_size; + if (!rcd->rcvegrbuf) { + rcd->rcvegrbuf = + kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), + GFP_KERNEL); + if (!rcd->rcvegrbuf) + goto bail; + } + if (!rcd->rcvegrbuf_phys) { + rcd->rcvegrbuf_phys = + kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), + GFP_KERNEL); + if (!rcd->rcvegrbuf_phys) + goto bail_rcvegrbuf; + } + for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { + if (rcd->rcvegrbuf[e]) + continue; + rcd->rcvegrbuf[e] = + dma_alloc_coherent(&dd->pcidev->dev, size, + &rcd->rcvegrbuf_phys[e], + gfp_flags); + if (!rcd->rcvegrbuf[e]) + goto bail_rcvegrbuf_phys; + } + + rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; + + for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { + dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; + unsigned i; + + for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { + dd->f_put_tid(dd, e + egroff + + (u64 __iomem *) + ((char __iomem *) + dd->kregbase + + dd->rcvegrbase), + RCVHQ_RCV_TYPE_EAGER, pa); + pa += egrsize; + } + cond_resched(); /* don't hog the cpu */ + } + + return 0; + +bail_rcvegrbuf_phys: + for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) + dma_free_coherent(&dd->pcidev->dev, size, + rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); + kfree(rcd->rcvegrbuf_phys); + rcd->rcvegrbuf_phys = NULL; +bail_rcvegrbuf: + kfree(rcd->rcvegrbuf); + rcd->rcvegrbuf = NULL; +bail: + return -ENOMEM; +} + +int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) +{ + u64 __iomem *qib_kregbase = NULL; + void __iomem *qib_piobase = NULL; + u64 __iomem *qib_userbase = NULL; + u64 qib_kreglen; + u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; + u64 qib_pio4koffset = dd->piobufbase >> 32; + u64 qib_pio2klen = dd->piobcnt2k * dd->palign; + u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; + u64 qib_physaddr = dd->physaddr; + u64 qib_piolen; + u64 qib_userlen = 0; + + /* + * Free the old mapping because the kernel will try to reuse the + * old mapping and not create a new mapping with the + * write combining attribute. + */ + iounmap(dd->kregbase); + dd->kregbase = NULL; + + /* + * Assumes chip address space looks like: + * - kregs + sregs + cregs + uregs (in any order) + * - piobufs (2K and 4K bufs in either order) + * or: + * - kregs + sregs + cregs (in any order) + * - piobufs (2K and 4K bufs in either order) + * - uregs + */ + if (dd->piobcnt4k == 0) { + qib_kreglen = qib_pio2koffset; + qib_piolen = qib_pio2klen; + } else if (qib_pio2koffset < qib_pio4koffset) { + qib_kreglen = qib_pio2koffset; + qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; + } else { + qib_kreglen = qib_pio4koffset; + qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; + } + qib_piolen += vl15buflen; + /* Map just the configured ports (not all hw ports) */ + if (dd->uregbase > qib_kreglen) + qib_userlen = dd->ureg_align * dd->cfgctxts; + + /* Sanity checks passed, now create the new mappings */ + qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); + if (!qib_kregbase) + goto bail; + + qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); + if (!qib_piobase) + goto bail_kregbase; + + if (qib_userlen) { + qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, + qib_userlen); + if (!qib_userbase) + goto bail_piobase; + } + + dd->kregbase = qib_kregbase; + dd->kregend = (u64 __iomem *) + ((char __iomem *) qib_kregbase + qib_kreglen); + dd->piobase = qib_piobase; + dd->pio2kbase = (void __iomem *) + (((char __iomem *) dd->piobase) + + qib_pio2koffset - qib_kreglen); + if (dd->piobcnt4k) + dd->pio4kbase = (void __iomem *) + (((char __iomem *) dd->piobase) + + qib_pio4koffset - qib_kreglen); + if (qib_userlen) + /* ureg will now be accessed relative to dd->userbase */ + dd->userbase = qib_userbase; + return 0; + +bail_piobase: + iounmap(qib_piobase); +bail_kregbase: + iounmap(qib_kregbase); +bail: + return -ENOMEM; +} diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c new file mode 100644 index 000000000000..54a40828a106 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/delay.h> + +#include "qib.h" +#include "qib_common.h" + +/** + * qib_format_hwmsg - format a single hwerror message + * @msg message buffer + * @msgl length of message buffer + * @hwmsg message to add to message buffer + */ +static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg) +{ + strlcat(msg, "[", msgl); + strlcat(msg, hwmsg, msgl); + strlcat(msg, "]", msgl); +} + +/** + * qib_format_hwerrors - format hardware error messages for display + * @hwerrs hardware errors bit vector + * @hwerrmsgs hardware error descriptions + * @nhwerrmsgs number of hwerrmsgs + * @msg message buffer + * @msgl message buffer length + */ +void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs, + size_t nhwerrmsgs, char *msg, size_t msgl) +{ + int i; + + for (i = 0; i < nhwerrmsgs; i++) + if (hwerrs & hwerrmsgs[i].mask) + qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg); +} + +static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev) +{ + struct ib_event event; + struct qib_devdata *dd = ppd->dd; + + event.device = &dd->verbs_dev.ibdev; + event.element.port_num = ppd->port; + event.event = ev; + ib_dispatch_event(&event); +} + +void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) +{ + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + u32 lstate; + u8 ltstate; + enum ib_event_type ev = 0; + + lstate = dd->f_iblink_state(ibcs); /* linkstate */ + ltstate = dd->f_ibphys_portstate(ibcs); + + /* + * If linkstate transitions into INIT from any of the various down + * states, or if it transitions from any of the up (INIT or better) + * states into any of the down states (except link recovery), then + * call the chip-specific code to take appropriate actions. + */ + if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && + ltstate == IB_PHYSPORTSTATE_LINKUP) { + /* transitioned to UP */ + if (dd->f_ib_updown(ppd, 1, ibcs)) + goto skip_ibchange; /* chip-code handled */ + } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | + QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) { + if (ltstate != IB_PHYSPORTSTATE_LINKUP && + ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN && + dd->f_ib_updown(ppd, 0, ibcs)) + goto skip_ibchange; /* chip-code handled */ + qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT); + } + + if (lstate != IB_PORT_DOWN) { + /* lstate is INIT, ARMED, or ACTIVE */ + if (lstate != IB_PORT_ACTIVE) { + *ppd->statusp &= ~QIB_STATUS_IB_READY; + if (ppd->lflags & QIBL_LINKACTIVE) + ev = IB_EVENT_PORT_ERR; + spin_lock_irqsave(&ppd->lflags_lock, flags); + if (lstate == IB_PORT_ARMED) { + ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV; + ppd->lflags &= ~(QIBL_LINKINIT | + QIBL_LINKDOWN | QIBL_LINKACTIVE); + } else { + ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV; + ppd->lflags &= ~(QIBL_LINKARMED | + QIBL_LINKDOWN | QIBL_LINKACTIVE); + } + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + /* start a 75msec timer to clear symbol errors */ + mod_timer(&ppd->symerr_clear_timer, + msecs_to_jiffies(75)); + } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { + /* active, but not active defered */ + qib_hol_up(ppd); /* useful only for 6120 now */ + *ppd->statusp |= + QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF; + qib_clear_symerror_on_linkup((unsigned long)ppd); + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV; + ppd->lflags &= ~(QIBL_LINKINIT | + QIBL_LINKDOWN | QIBL_LINKARMED); + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + if (dd->flags & QIB_HAS_SEND_DMA) + qib_sdma_process_event(ppd, + qib_sdma_event_e30_go_running); + ev = IB_EVENT_PORT_ACTIVE; + dd->f_setextled(ppd, 1); + } + } else { /* down */ + if (ppd->lflags & QIBL_LINKACTIVE) + ev = IB_EVENT_PORT_ERR; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV; + ppd->lflags &= ~(QIBL_LINKINIT | + QIBL_LINKACTIVE | QIBL_LINKARMED); + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + *ppd->statusp &= ~QIB_STATUS_IB_READY; + } + +skip_ibchange: + ppd->lastibcstat = ibcs; + if (ev) + signal_ib_event(ppd, ev); + return; +} + +void qib_clear_symerror_on_linkup(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + + if (ppd->lflags & QIBL_LINKACTIVE) + return; + + ppd->ibport_data.z_symbol_error_counter = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); +} + +/* + * Handle receive interrupts for user ctxts; this means a user + * process was waiting for a packet to arrive, and didn't want + * to poll. + */ +void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr) +{ + struct qib_ctxtdata *rcd; + unsigned long flags; + int i; + + spin_lock_irqsave(&dd->uctxt_lock, flags); + for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) { + if (!(ctxtr & (1ULL << i))) + continue; + rcd = dd->rcd[i]; + if (!rcd || !rcd->cnt) + continue; + + if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) { + wake_up_interruptible(&rcd->wait); + dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS, + rcd->ctxt); + } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG, + &rcd->flag)) { + rcd->urgent++; + wake_up_interruptible(&rcd->wait); + } + } + spin_unlock_irqrestore(&dd->uctxt_lock, flags); +} + +void qib_bad_intrstatus(struct qib_devdata *dd) +{ + static int allbits; + + /* separate routine, for better optimization of qib_intr() */ + + /* + * We print the message and disable interrupts, in hope of + * having a better chance of debugging the problem. + */ + qib_dev_err(dd, "Read of chip interrupt status failed" + " disabling interrupts\n"); + if (allbits++) { + /* disable interrupt delivery, something is very wrong */ + if (allbits == 2) + dd->f_set_intr_state(dd, 0); + if (allbits == 3) { + qib_dev_err(dd, "2nd bad interrupt status, " + "unregistering interrupts\n"); + dd->flags |= QIB_BADINTR; + dd->flags &= ~QIB_INITTED; + dd->f_free_irq(dd); + } + } +} diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c new file mode 100644 index 000000000000..4b80eb153d57 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "qib.h" + +/** + * qib_alloc_lkey - allocate an lkey + * @rkt: lkey table in which to allocate the lkey + * @mr: memory region that this lkey protects + * + * Returns 1 if successful, otherwise returns 0. + */ + +int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) +{ + unsigned long flags; + u32 r; + u32 n; + int ret; + + spin_lock_irqsave(&rkt->lock, flags); + + /* Find the next available LKEY */ + r = rkt->next; + n = r; + for (;;) { + if (rkt->table[r] == NULL) + break; + r = (r + 1) & (rkt->max - 1); + if (r == n) { + spin_unlock_irqrestore(&rkt->lock, flags); + ret = 0; + goto bail; + } + } + rkt->next = (r + 1) & (rkt->max - 1); + /* + * Make sure lkey is never zero which is reserved to indicate an + * unrestricted LKEY. + */ + rkt->gen++; + mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | + ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) + << 8); + if (mr->lkey == 0) { + mr->lkey |= 1 << 8; + rkt->gen++; + } + rkt->table[r] = mr; + spin_unlock_irqrestore(&rkt->lock, flags); + + ret = 1; + +bail: + return ret; +} + +/** + * qib_free_lkey - free an lkey + * @rkt: table from which to free the lkey + * @lkey: lkey id to free + */ +int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) +{ + unsigned long flags; + u32 lkey = mr->lkey; + u32 r; + int ret; + + spin_lock_irqsave(&dev->lk_table.lock, flags); + if (lkey == 0) { + if (dev->dma_mr && dev->dma_mr == mr) { + ret = atomic_read(&dev->dma_mr->refcount); + if (!ret) + dev->dma_mr = NULL; + } else + ret = 0; + } else { + r = lkey >> (32 - ib_qib_lkey_table_size); + ret = atomic_read(&dev->lk_table.table[r]->refcount); + if (!ret) + dev->lk_table.table[r] = NULL; + } + spin_unlock_irqrestore(&dev->lk_table.lock, flags); + + if (ret) + ret = -EBUSY; + return ret; +} + +/** + * qib_lkey_ok - check IB SGE for validity and initialize + * @rkt: table containing lkey to check SGE against + * @isge: outgoing internal SGE + * @sge: SGE to check + * @acc: access flags + * + * Return 1 if valid and successful, otherwise returns 0. + * + * Check the IB SGE for validity and initialize our internal version + * of it. + */ +int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, + struct qib_sge *isge, struct ib_sge *sge, int acc) +{ + struct qib_mregion *mr; + unsigned n, m; + size_t off; + int ret = 0; + unsigned long flags; + + /* + * We use LKEY == zero for kernel virtual addresses + * (see qib_get_dma_mr and qib_dma.c). + */ + spin_lock_irqsave(&rkt->lock, flags); + if (sge->lkey == 0) { + struct qib_ibdev *dev = to_idev(pd->ibpd.device); + + if (pd->user) + goto bail; + if (!dev->dma_mr) + goto bail; + atomic_inc(&dev->dma_mr->refcount); + isge->mr = dev->dma_mr; + isge->vaddr = (void *) sge->addr; + isge->length = sge->length; + isge->sge_length = sge->length; + isge->m = 0; + isge->n = 0; + goto ok; + } + mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; + if (unlikely(mr == NULL || mr->lkey != sge->lkey || + mr->pd != &pd->ibpd)) + goto bail; + + off = sge->addr - mr->user_base; + if (unlikely(sge->addr < mr->user_base || + off + sge->length > mr->length || + (mr->access_flags & acc) != acc)) + goto bail; + + off += mr->offset; + m = 0; + n = 0; + while (off >= mr->map[m]->segs[n].length) { + off -= mr->map[m]->segs[n].length; + n++; + if (n >= QIB_SEGSZ) { + m++; + n = 0; + } + } + atomic_inc(&mr->refcount); + isge->mr = mr; + isge->vaddr = mr->map[m]->segs[n].vaddr + off; + isge->length = mr->map[m]->segs[n].length - off; + isge->sge_length = sge->length; + isge->m = m; + isge->n = n; +ok: + ret = 1; +bail: + spin_unlock_irqrestore(&rkt->lock, flags); + return ret; +} + +/** + * qib_rkey_ok - check the IB virtual address, length, and RKEY + * @dev: infiniband device + * @ss: SGE state + * @len: length of data + * @vaddr: virtual address to place data + * @rkey: rkey to check + * @acc: access flags + * + * Return 1 if successful, otherwise 0. + */ +int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, + u32 len, u64 vaddr, u32 rkey, int acc) +{ + struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; + struct qib_mregion *mr; + unsigned n, m; + size_t off; + int ret = 0; + unsigned long flags; + + /* + * We use RKEY == zero for kernel virtual addresses + * (see qib_get_dma_mr and qib_dma.c). + */ + spin_lock_irqsave(&rkt->lock, flags); + if (rkey == 0) { + struct qib_pd *pd = to_ipd(qp->ibqp.pd); + struct qib_ibdev *dev = to_idev(pd->ibpd.device); + + if (pd->user) + goto bail; + if (!dev->dma_mr) + goto bail; + atomic_inc(&dev->dma_mr->refcount); + sge->mr = dev->dma_mr; + sge->vaddr = (void *) vaddr; + sge->length = len; + sge->sge_length = len; + sge->m = 0; + sge->n = 0; + goto ok; + } + + mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; + if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) + goto bail; + + off = vaddr - mr->iova; + if (unlikely(vaddr < mr->iova || off + len > mr->length || + (mr->access_flags & acc) == 0)) + goto bail; + + off += mr->offset; + m = 0; + n = 0; + while (off >= mr->map[m]->segs[n].length) { + off -= mr->map[m]->segs[n].length; + n++; + if (n >= QIB_SEGSZ) { + m++; + n = 0; + } + } + atomic_inc(&mr->refcount); + sge->mr = mr; + sge->vaddr = mr->map[m]->segs[n].vaddr + off; + sge->length = mr->map[m]->segs[n].length - off; + sge->sge_length = len; + sge->m = m; + sge->n = n; +ok: + ret = 1; +bail: + spin_unlock_irqrestore(&rkt->lock, flags); + return ret; +} + +/* + * Initialize the memory region specified by the work reqeust. + */ +int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) +{ + struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; + struct qib_pd *pd = to_ipd(qp->ibqp.pd); + struct qib_mregion *mr; + u32 rkey = wr->wr.fast_reg.rkey; + unsigned i, n, m; + int ret = -EINVAL; + unsigned long flags; + u64 *page_list; + size_t ps; + + spin_lock_irqsave(&rkt->lock, flags); + if (pd->user || rkey == 0) + goto bail; + + mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; + if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) + goto bail; + + if (wr->wr.fast_reg.page_list_len > mr->max_segs) + goto bail; + + ps = 1UL << wr->wr.fast_reg.page_shift; + if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) + goto bail; + + mr->user_base = wr->wr.fast_reg.iova_start; + mr->iova = wr->wr.fast_reg.iova_start; + mr->lkey = rkey; + mr->length = wr->wr.fast_reg.length; + mr->access_flags = wr->wr.fast_reg.access_flags; + page_list = wr->wr.fast_reg.page_list->page_list; + m = 0; + n = 0; + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { + mr->map[m]->segs[n].vaddr = (void *) page_list[i]; + mr->map[m]->segs[n].length = ps; + if (++n == QIB_SEGSZ) { + m++; + n = 0; + } + } + + ret = 0; +bail: + spin_unlock_irqrestore(&rkt->lock, flags); + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c new file mode 100644 index 000000000000..94b0d1f3a8f0 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -0,0 +1,2173 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <rdma/ib_smi.h> + +#include "qib.h" +#include "qib_mad.h" + +static int reply(struct ib_smp *smp) +{ + /* + * The verbs framework will handle the directed/LID route + * packet changes. + */ + smp->method = IB_MGMT_METHOD_GET_RESP; + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + smp->status |= IB_SMP_DIRECTION; + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) +{ + struct ib_mad_send_buf *send_buf; + struct ib_mad_agent *agent; + struct ib_smp *smp; + int ret; + unsigned long flags; + unsigned long timeout; + + agent = ibp->send_agent; + if (!agent) + return; + + /* o14-3.2.1 */ + if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) + return; + + /* o14-2 */ + if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) + return; + + send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, + IB_MGMT_MAD_DATA, GFP_ATOMIC); + if (IS_ERR(send_buf)) + return; + + smp = send_buf->mad; + smp->base_version = IB_MGMT_BASE_VERSION; + smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + smp->class_version = 1; + smp->method = IB_MGMT_METHOD_TRAP; + ibp->tid++; + smp->tid = cpu_to_be64(ibp->tid); + smp->attr_id = IB_SMP_ATTR_NOTICE; + /* o14-1: smp->mkey = 0; */ + memcpy(smp->data, data, len); + + spin_lock_irqsave(&ibp->lock, flags); + if (!ibp->sm_ah) { + if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { + struct ib_ah *ah; + struct ib_ah_attr attr; + + memset(&attr, 0, sizeof attr); + attr.dlid = ibp->sm_lid; + attr.port_num = ppd_from_ibp(ibp)->port; + ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); + if (IS_ERR(ah)) + ret = -EINVAL; + else { + send_buf->ah = ah; + ibp->sm_ah = to_iah(ah); + ret = 0; + } + } else + ret = -EINVAL; + } else { + send_buf->ah = &ibp->sm_ah->ibah; + ret = 0; + } + spin_unlock_irqrestore(&ibp->lock, flags); + + if (!ret) + ret = ib_post_send_mad(send_buf, NULL); + if (!ret) { + /* 4.096 usec. */ + timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; + ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); + } else { + ib_free_send_mad(send_buf); + ibp->trap_timeout = 0; + } +} + +/* + * Send a bad [PQ]_Key trap (ch. 14.3.8). + */ +void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, + u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) +{ + struct ib_mad_notice_attr data; + + if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) + ibp->pkey_violations++; + else + ibp->qkey_violations++; + ibp->n_pkt_drops++; + + /* Send violation trap */ + data.generic_type = IB_NOTICE_TYPE_SECURITY; + data.prod_type_msb = 0; + data.prod_type_lsb = IB_NOTICE_PROD_CA; + data.trap_num = trap_num; + data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); + data.toggle_count = 0; + memset(&data.details, 0, sizeof data.details); + data.details.ntc_257_258.lid1 = lid1; + data.details.ntc_257_258.lid2 = lid2; + data.details.ntc_257_258.key = cpu_to_be32(key); + data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); + data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); + + qib_send_trap(ibp, &data, sizeof data); +} + +/* + * Send a bad M_Key trap (ch. 14.3.9). + */ +static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) +{ + struct ib_mad_notice_attr data; + + /* Send violation trap */ + data.generic_type = IB_NOTICE_TYPE_SECURITY; + data.prod_type_msb = 0; + data.prod_type_lsb = IB_NOTICE_PROD_CA; + data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; + data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); + data.toggle_count = 0; + memset(&data.details, 0, sizeof data.details); + data.details.ntc_256.lid = data.issuer_lid; + data.details.ntc_256.method = smp->method; + data.details.ntc_256.attr_id = smp->attr_id; + data.details.ntc_256.attr_mod = smp->attr_mod; + data.details.ntc_256.mkey = smp->mkey; + if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + u8 hop_cnt; + + data.details.ntc_256.dr_slid = smp->dr_slid; + data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; + hop_cnt = smp->hop_cnt; + if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { + data.details.ntc_256.dr_trunc_hop |= + IB_NOTICE_TRAP_DR_TRUNC; + hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); + } + data.details.ntc_256.dr_trunc_hop |= hop_cnt; + memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, + hop_cnt); + } + + qib_send_trap(ibp, &data, sizeof data); +} + +/* + * Send a Port Capability Mask Changed trap (ch. 14.3.11). + */ +void qib_cap_mask_chg(struct qib_ibport *ibp) +{ + struct ib_mad_notice_attr data; + + data.generic_type = IB_NOTICE_TYPE_INFO; + data.prod_type_msb = 0; + data.prod_type_lsb = IB_NOTICE_PROD_CA; + data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; + data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); + data.toggle_count = 0; + memset(&data.details, 0, sizeof data.details); + data.details.ntc_144.lid = data.issuer_lid; + data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); + + qib_send_trap(ibp, &data, sizeof data); +} + +/* + * Send a System Image GUID Changed trap (ch. 14.3.12). + */ +void qib_sys_guid_chg(struct qib_ibport *ibp) +{ + struct ib_mad_notice_attr data; + + data.generic_type = IB_NOTICE_TYPE_INFO; + data.prod_type_msb = 0; + data.prod_type_lsb = IB_NOTICE_PROD_CA; + data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; + data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); + data.toggle_count = 0; + memset(&data.details, 0, sizeof data.details); + data.details.ntc_145.lid = data.issuer_lid; + data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; + + qib_send_trap(ibp, &data, sizeof data); +} + +/* + * Send a Node Description Changed trap (ch. 14.3.13). + */ +void qib_node_desc_chg(struct qib_ibport *ibp) +{ + struct ib_mad_notice_attr data; + + data.generic_type = IB_NOTICE_TYPE_INFO; + data.prod_type_msb = 0; + data.prod_type_lsb = IB_NOTICE_PROD_CA; + data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; + data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); + data.toggle_count = 0; + memset(&data.details, 0, sizeof data.details); + data.details.ntc_144.lid = data.issuer_lid; + data.details.ntc_144.local_changes = 1; + data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; + + qib_send_trap(ibp, &data, sizeof data); +} + +static int subn_get_nodedescription(struct ib_smp *smp, + struct ib_device *ibdev) +{ + if (smp->attr_mod) + smp->status |= IB_SMP_INVALID_FIELD; + + memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); + + return reply(smp); +} + +static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct ib_node_info *nip = (struct ib_node_info *)&smp->data; + struct qib_devdata *dd = dd_from_ibdev(ibdev); + u32 vendor, majrev, minrev; + unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ + + /* GUID 0 is illegal */ + if (smp->attr_mod || pidx >= dd->num_pports || + dd->pport[pidx].guid == 0) + smp->status |= IB_SMP_INVALID_FIELD; + else + nip->port_guid = dd->pport[pidx].guid; + + nip->base_version = 1; + nip->class_version = 1; + nip->node_type = 1; /* channel adapter */ + nip->num_ports = ibdev->phys_port_cnt; + /* This is already in network order */ + nip->sys_guid = ib_qib_sys_image_guid; + nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ + nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); + nip->device_id = cpu_to_be16(dd->deviceid); + majrev = dd->majrev; + minrev = dd->minrev; + nip->revision = cpu_to_be32((majrev << 16) | minrev); + nip->local_port_num = port; + vendor = dd->vendorid; + nip->vendor_id[0] = QIB_SRC_OUI_1; + nip->vendor_id[1] = QIB_SRC_OUI_2; + nip->vendor_id[2] = QIB_SRC_OUI_3; + + return reply(smp); +} + +static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + u32 startgx = 8 * be32_to_cpu(smp->attr_mod); + __be64 *p = (__be64 *) smp->data; + unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ + + /* 32 blocks of 8 64-bit GUIDs per block */ + + memset(smp->data, 0, sizeof(smp->data)); + + if (startgx == 0 && pidx < dd->num_pports) { + struct qib_pportdata *ppd = dd->pport + pidx; + struct qib_ibport *ibp = &ppd->ibport_data; + __be64 g = ppd->guid; + unsigned i; + + /* GUID 0 is illegal */ + if (g == 0) + smp->status |= IB_SMP_INVALID_FIELD; + else { + /* The first is a copy of the read-only HW GUID. */ + p[0] = g; + for (i = 1; i < QIB_GUIDS_PER_PORT; i++) + p[i] = ibp->guids[i - 1]; + } + } else + smp->status |= IB_SMP_INVALID_FIELD; + + return reply(smp); +} + +static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) +{ + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); +} + +static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) +{ + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); +} + +static int get_overrunthreshold(struct qib_pportdata *ppd) +{ + return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); +} + +/** + * set_overrunthreshold - set the overrun threshold + * @ppd: the physical port data + * @n: the new threshold + * + * Note that this will only take effect when the link state changes. + */ +static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) +{ + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, + (u32)n); + return 0; +} + +static int get_phyerrthreshold(struct qib_pportdata *ppd) +{ + return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); +} + +/** + * set_phyerrthreshold - set the physical error threshold + * @ppd: the physical port data + * @n: the new threshold + * + * Note that this will only take effect when the link state changes. + */ +static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) +{ + (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, + (u32)n); + return 0; +} + +/** + * get_linkdowndefaultstate - get the default linkdown state + * @ppd: the physical port data + * + * Returns zero if the default is POLL, 1 if the default is SLEEP. + */ +static int get_linkdowndefaultstate(struct qib_pportdata *ppd) +{ + return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == + IB_LINKINITCMD_SLEEP; +} + +static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) +{ + int ret = 0; + + /* Is the mkey in the process of expiring? */ + if (ibp->mkey_lease_timeout && + time_after_eq(jiffies, ibp->mkey_lease_timeout)) { + /* Clear timeout and mkey protection field. */ + ibp->mkey_lease_timeout = 0; + ibp->mkeyprot = 0; + } + + /* M_Key checking depends on Portinfo:M_Key_protect_bits */ + if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && + ibp->mkey != smp->mkey && + (smp->method == IB_MGMT_METHOD_SET || + smp->method == IB_MGMT_METHOD_TRAP_REPRESS || + (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { + if (ibp->mkey_violations != 0xFFFF) + ++ibp->mkey_violations; + if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) + ibp->mkey_lease_timeout = jiffies + + ibp->mkey_lease_period * HZ; + /* Generate a trap notice. */ + qib_bad_mkey(ibp, smp); + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } else if (ibp->mkey_lease_timeout) + ibp->mkey_lease_timeout = 0; + + return ret; +} + +static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct qib_devdata *dd; + struct qib_pportdata *ppd; + struct qib_ibport *ibp; + struct ib_port_info *pip = (struct ib_port_info *)smp->data; + u16 lid; + u8 mtu; + int ret; + u32 state; + u32 port_num = be32_to_cpu(smp->attr_mod); + + if (port_num == 0) + port_num = port; + else { + if (port_num > ibdev->phys_port_cnt) { + smp->status |= IB_SMP_INVALID_FIELD; + ret = reply(smp); + goto bail; + } + if (port_num != port) { + ibp = to_iport(ibdev, port_num); + ret = check_mkey(ibp, smp, 0); + if (ret) + goto bail; + } + } + + dd = dd_from_ibdev(ibdev); + /* IB numbers ports from 1, hdw from 0 */ + ppd = dd->pport + (port_num - 1); + ibp = &ppd->ibport_data; + + /* Clear all fields. Only set the non-zero fields. */ + memset(smp->data, 0, sizeof(smp->data)); + + /* Only return the mkey if the protection field allows it. */ + if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey || + ibp->mkeyprot == 0) + pip->mkey = ibp->mkey; + pip->gid_prefix = ibp->gid_prefix; + lid = ppd->lid; + pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; + pip->sm_lid = cpu_to_be16(ibp->sm_lid); + pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); + /* pip->diag_code; */ + pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); + pip->local_port_num = port; + pip->link_width_enabled = ppd->link_width_enabled; + pip->link_width_supported = ppd->link_width_supported; + pip->link_width_active = ppd->link_width_active; + state = dd->f_iblink_state(ppd->lastibcstat); + pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; + + pip->portphysstate_linkdown = + (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | + (get_linkdowndefaultstate(ppd) ? 1 : 2); + pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; + pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | + ppd->link_speed_enabled; + switch (ppd->ibmtu) { + default: /* something is wrong; fall through */ + case 4096: + mtu = IB_MTU_4096; + break; + case 2048: + mtu = IB_MTU_2048; + break; + case 1024: + mtu = IB_MTU_1024; + break; + case 512: + mtu = IB_MTU_512; + break; + case 256: + mtu = IB_MTU_256; + break; + } + pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; + pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ + pip->vl_high_limit = ibp->vl_high_limit; + pip->vl_arb_high_cap = + dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); + pip->vl_arb_low_cap = + dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); + /* InitTypeReply = 0 */ + pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; + /* HCAs ignore VLStallCount and HOQLife */ + /* pip->vlstallcnt_hoqlife; */ + pip->operationalvl_pei_peo_fpi_fpo = + dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; + pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); + /* P_KeyViolations are counted by hardware. */ + pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); + pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); + /* Only the hardware GUID is supported for now */ + pip->guid_cap = QIB_GUIDS_PER_PORT; + pip->clientrereg_resv_subnetto = ibp->subnet_timeout; + /* 32.768 usec. response time (guessing) */ + pip->resv_resptimevalue = 3; + pip->localphyerrors_overrunerrors = + (get_phyerrthreshold(ppd) << 4) | + get_overrunthreshold(ppd); + /* pip->max_credit_hint; */ + if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { + u32 v; + + v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); + pip->link_roundtrip_latency[0] = v >> 16; + pip->link_roundtrip_latency[1] = v >> 8; + pip->link_roundtrip_latency[2] = v; + } + + ret = reply(smp); + +bail: + return ret; +} + +/** + * get_pkeys - return the PKEY table + * @dd: the qlogic_ib device + * @port: the IB port number + * @pkeys: the pkey table is placed here + */ +static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) +{ + struct qib_pportdata *ppd = dd->pport + port - 1; + /* + * always a kernel context, no locking needed. + * If we get here with ppd setup, no need to check + * that pd is valid. + */ + struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; + + memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); + + return 0; +} + +static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); + u16 *p = (u16 *) smp->data; + __be16 *q = (__be16 *) smp->data; + + /* 64 blocks of 32 16-bit P_Key entries */ + + memset(smp->data, 0, sizeof(smp->data)); + if (startpx == 0) { + struct qib_devdata *dd = dd_from_ibdev(ibdev); + unsigned i, n = qib_get_npkeys(dd); + + get_pkeys(dd, port, p); + + for (i = 0; i < n; i++) + q[i] = cpu_to_be16(p[i]); + } else + smp->status |= IB_SMP_INVALID_FIELD; + + return reply(smp); +} + +static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + u32 startgx = 8 * be32_to_cpu(smp->attr_mod); + __be64 *p = (__be64 *) smp->data; + unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ + + /* 32 blocks of 8 64-bit GUIDs per block */ + + if (startgx == 0 && pidx < dd->num_pports) { + struct qib_pportdata *ppd = dd->pport + pidx; + struct qib_ibport *ibp = &ppd->ibport_data; + unsigned i; + + /* The first entry is read-only. */ + for (i = 1; i < QIB_GUIDS_PER_PORT; i++) + ibp->guids[i - 1] = p[i]; + } else + smp->status |= IB_SMP_INVALID_FIELD; + + /* The only GUID we support is the first read-only entry. */ + return subn_get_guidinfo(smp, ibdev, port); +} + +/** + * subn_set_portinfo - set port information + * @smp: the incoming SM packet + * @ibdev: the infiniband device + * @port: the port on the device + * + * Set Portinfo (see ch. 14.2.5.6). + */ +static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct ib_port_info *pip = (struct ib_port_info *)smp->data; + struct ib_event event; + struct qib_devdata *dd; + struct qib_pportdata *ppd; + struct qib_ibport *ibp; + char clientrereg = 0; + unsigned long flags; + u16 lid, smlid; + u8 lwe; + u8 lse; + u8 state; + u8 vls; + u8 msl; + u16 lstate; + int ret, ore, mtu; + u32 port_num = be32_to_cpu(smp->attr_mod); + + if (port_num == 0) + port_num = port; + else { + if (port_num > ibdev->phys_port_cnt) + goto err; + /* Port attributes can only be set on the receiving port */ + if (port_num != port) + goto get_only; + } + + dd = dd_from_ibdev(ibdev); + /* IB numbers ports from 1, hdw from 0 */ + ppd = dd->pport + (port_num - 1); + ibp = &ppd->ibport_data; + event.device = ibdev; + event.element.port_num = port; + + ibp->mkey = pip->mkey; + ibp->gid_prefix = pip->gid_prefix; + ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); + + lid = be16_to_cpu(pip->lid); + /* Must be a valid unicast LID address. */ + if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) + goto err; + if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { + if (ppd->lid != lid) + qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); + if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) + qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); + qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); + event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } + + smlid = be16_to_cpu(pip->sm_lid); + msl = pip->neighbormtu_mastersmsl & 0xF; + /* Must be a valid unicast LID address. */ + if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) + goto err; + if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { + spin_lock_irqsave(&ibp->lock, flags); + if (ibp->sm_ah) { + if (smlid != ibp->sm_lid) + ibp->sm_ah->attr.dlid = smlid; + if (msl != ibp->sm_sl) + ibp->sm_ah->attr.sl = msl; + } + spin_unlock_irqrestore(&ibp->lock, flags); + if (smlid != ibp->sm_lid) + ibp->sm_lid = smlid; + if (msl != ibp->sm_sl) + ibp->sm_sl = msl; + event.event = IB_EVENT_SM_CHANGE; + ib_dispatch_event(&event); + } + + /* Allow 1x or 4x to be set (see 14.2.6.6). */ + lwe = pip->link_width_enabled; + if (lwe) { + if (lwe == 0xFF) + lwe = ppd->link_width_supported; + else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) + goto err; + set_link_width_enabled(ppd, lwe); + } + + lse = pip->linkspeedactive_enabled & 0xF; + if (lse) { + /* + * The IB 1.2 spec. only allows link speed values + * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific + * speeds. + */ + if (lse == 15) + lse = ppd->link_speed_supported; + else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) + goto err; + set_link_speed_enabled(ppd, lse); + } + + /* Set link down default state. */ + switch (pip->portphysstate_linkdown & 0xF) { + case 0: /* NOP */ + break; + case 1: /* SLEEP */ + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, + IB_LINKINITCMD_SLEEP); + break; + case 2: /* POLL */ + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, + IB_LINKINITCMD_POLL); + break; + default: + goto err; + } + + ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; + ibp->vl_high_limit = pip->vl_high_limit; + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, + ibp->vl_high_limit); + + mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); + if (mtu == -1) + goto err; + qib_set_mtu(ppd, mtu); + + /* Set operational VLs */ + vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; + if (vls) { + if (vls > ppd->vls_supported) + goto err; + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); + } + + if (pip->mkey_violations == 0) + ibp->mkey_violations = 0; + + if (pip->pkey_violations == 0) + ibp->pkey_violations = 0; + + if (pip->qkey_violations == 0) + ibp->qkey_violations = 0; + + ore = pip->localphyerrors_overrunerrors; + if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) + goto err; + + if (set_overrunthreshold(ppd, (ore & 0xF))) + goto err; + + ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; + + if (pip->clientrereg_resv_subnetto & 0x80) { + clientrereg = 1; + event.event = IB_EVENT_CLIENT_REREGISTER; + ib_dispatch_event(&event); + } + + /* + * Do the port state change now that the other link parameters + * have been set. + * Changing the port physical state only makes sense if the link + * is down or is being set to down. + */ + state = pip->linkspeed_portstate & 0xF; + lstate = (pip->portphysstate_linkdown >> 4) & 0xF; + if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) + goto err; + + /* + * Only state changes of DOWN, ARM, and ACTIVE are valid + * and must be in the correct state to take effect (see 7.2.6). + */ + switch (state) { + case IB_PORT_NOP: + if (lstate == 0) + break; + /* FALLTHROUGH */ + case IB_PORT_DOWN: + if (lstate == 0) + lstate = QIB_IB_LINKDOWN_ONLY; + else if (lstate == 1) + lstate = QIB_IB_LINKDOWN_SLEEP; + else if (lstate == 2) + lstate = QIB_IB_LINKDOWN; + else if (lstate == 3) + lstate = QIB_IB_LINKDOWN_DISABLE; + else + goto err; + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_LINKV; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); + qib_set_linkstate(ppd, lstate); + /* + * Don't send a reply if the response would be sent + * through the disabled port. + */ + if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + goto done; + } + qib_wait_linkstate(ppd, QIBL_LINKV, 10); + break; + case IB_PORT_ARMED: + qib_set_linkstate(ppd, QIB_IB_LINKARM); + break; + case IB_PORT_ACTIVE: + qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); + break; + default: + /* XXX We have already partially updated our state! */ + goto err; + } + + ret = subn_get_portinfo(smp, ibdev, port); + + if (clientrereg) + pip->clientrereg_resv_subnetto |= 0x80; + + goto done; + +err: + smp->status |= IB_SMP_INVALID_FIELD; +get_only: + ret = subn_get_portinfo(smp, ibdev, port); +done: + return ret; +} + +/** + * rm_pkey - decrecment the reference count for the given PKEY + * @dd: the qlogic_ib device + * @key: the PKEY index + * + * Return true if this was the last reference and the hardware table entry + * needs to be changed. + */ +static int rm_pkey(struct qib_pportdata *ppd, u16 key) +{ + int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { + if (ppd->pkeys[i] != key) + continue; + if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { + ppd->pkeys[i] = 0; + ret = 1; + goto bail; + } + break; + } + + ret = 0; + +bail: + return ret; +} + +/** + * add_pkey - add the given PKEY to the hardware table + * @dd: the qlogic_ib device + * @key: the PKEY + * + * Return an error code if unable to add the entry, zero if no change, + * or 1 if the hardware PKEY register needs to be updated. + */ +static int add_pkey(struct qib_pportdata *ppd, u16 key) +{ + int i; + u16 lkey = key & 0x7FFF; + int any = 0; + int ret; + + if (lkey == 0x7FFF) { + ret = 0; + goto bail; + } + + /* Look for an empty slot or a matching PKEY. */ + for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { + if (!ppd->pkeys[i]) { + any++; + continue; + } + /* If it matches exactly, try to increment the ref count */ + if (ppd->pkeys[i] == key) { + if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { + ret = 0; + goto bail; + } + /* Lost the race. Look for an empty slot below. */ + atomic_dec(&ppd->pkeyrefs[i]); + any++; + } + /* + * It makes no sense to have both the limited and unlimited + * PKEY set at the same time since the unlimited one will + * disable the limited one. + */ + if ((ppd->pkeys[i] & 0x7FFF) == lkey) { + ret = -EEXIST; + goto bail; + } + } + if (!any) { + ret = -EBUSY; + goto bail; + } + for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { + if (!ppd->pkeys[i] && + atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { + /* for qibstats, etc. */ + ppd->pkeys[i] = key; + ret = 1; + goto bail; + } + } + ret = -EBUSY; + +bail: + return ret; +} + +/** + * set_pkeys - set the PKEY table for ctxt 0 + * @dd: the qlogic_ib device + * @port: the IB port number + * @pkeys: the PKEY table + */ +static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) +{ + struct qib_pportdata *ppd; + struct qib_ctxtdata *rcd; + int i; + int changed = 0; + + /* + * IB port one/two always maps to context zero/one, + * always a kernel context, no locking needed + * If we get here with ppd setup, no need to check + * that rcd is valid. + */ + ppd = dd->pport + (port - 1); + rcd = dd->rcd[ppd->hw_pidx]; + + for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { + u16 key = pkeys[i]; + u16 okey = rcd->pkeys[i]; + + if (key == okey) + continue; + /* + * The value of this PKEY table entry is changing. + * Remove the old entry in the hardware's array of PKEYs. + */ + if (okey & 0x7FFF) + changed |= rm_pkey(ppd, okey); + if (key & 0x7FFF) { + int ret = add_pkey(ppd, key); + + if (ret < 0) + key = 0; + else + changed |= ret; + } + rcd->pkeys[i] = key; + } + if (changed) { + struct ib_event event; + + (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); + + event.event = IB_EVENT_PKEY_CHANGE; + event.device = &dd->verbs_dev.ibdev; + event.element.port_num = 1; + ib_dispatch_event(&event); + } + return 0; +} + +static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); + __be16 *p = (__be16 *) smp->data; + u16 *q = (u16 *) smp->data; + struct qib_devdata *dd = dd_from_ibdev(ibdev); + unsigned i, n = qib_get_npkeys(dd); + + for (i = 0; i < n; i++) + q[i] = be16_to_cpu(p[i]); + + if (startpx != 0 || set_pkeys(dd, port, q) != 0) + smp->status |= IB_SMP_INVALID_FIELD; + + return subn_get_pkeytable(smp, ibdev, port); +} + +static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct qib_ibport *ibp = to_iport(ibdev, port); + u8 *p = (u8 *) smp->data; + unsigned i; + + memset(smp->data, 0, sizeof(smp->data)); + + if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) + smp->status |= IB_SMP_UNSUP_METHOD; + else + for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) + *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; + + return reply(smp); +} + +static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + struct qib_ibport *ibp = to_iport(ibdev, port); + u8 *p = (u8 *) smp->data; + unsigned i; + + if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { + smp->status |= IB_SMP_UNSUP_METHOD; + return reply(smp); + } + + for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { + ibp->sl_to_vl[i] = *p >> 4; + ibp->sl_to_vl[i + 1] = *p & 0xF; + } + qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), + _QIB_EVENT_SL2VL_CHANGE_BIT); + + return subn_get_sl_to_vl(smp, ibdev, port); +} + +static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + unsigned which = be32_to_cpu(smp->attr_mod) >> 16; + struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); + + memset(smp->data, 0, sizeof(smp->data)); + + if (ppd->vls_supported == IB_VL_VL0) + smp->status |= IB_SMP_UNSUP_METHOD; + else if (which == IB_VLARB_LOWPRI_0_31) + (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, + smp->data); + else if (which == IB_VLARB_HIGHPRI_0_31) + (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, + smp->data); + else + smp->status |= IB_SMP_INVALID_FIELD; + + return reply(smp); +} + +static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + unsigned which = be32_to_cpu(smp->attr_mod) >> 16; + struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); + + if (ppd->vls_supported == IB_VL_VL0) + smp->status |= IB_SMP_UNSUP_METHOD; + else if (which == IB_VLARB_LOWPRI_0_31) + (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, + smp->data); + else if (which == IB_VLARB_HIGHPRI_0_31) + (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, + smp->data); + else + smp->status |= IB_SMP_INVALID_FIELD; + + return subn_get_vl_arb(smp, ibdev, port); +} + +static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, + u8 port) +{ + /* + * For now, we only send the trap once so no need to process this. + * o13-6, o13-7, + * o14-3.a4 The SMA shall not send any message in response to a valid + * SubnTrapRepress() message. + */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; +} + +static int pma_get_classportinfo(struct ib_perf *pmp, + struct ib_device *ibdev) +{ + struct ib_pma_classportinfo *p = + (struct ib_pma_classportinfo *)pmp->data; + struct qib_devdata *dd = dd_from_ibdev(ibdev); + + memset(pmp->data, 0, sizeof(pmp->data)); + + if (pmp->attr_mod != 0) + pmp->status |= IB_SMP_INVALID_FIELD; + + /* Note that AllPortSelect is not valid */ + p->base_version = 1; + p->class_version = 1; + p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + /* + * Set the most significant bit of CM2 to indicate support for + * congestion statistics + */ + p->reserved[0] = dd->psxmitwait_supported << 7; + /* + * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. + */ + p->resp_time_value = 18; + + return reply((struct ib_smp *) pmp); +} + +static int pma_get_portsamplescontrol(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portsamplescontrol *p = + (struct ib_pma_portsamplescontrol *)pmp->data; + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_devdata *dd = dd_from_dev(dev); + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + unsigned long flags; + u8 port_select = p->port_select; + + memset(pmp->data, 0, sizeof(pmp->data)); + + p->port_select = port_select; + if (pmp->attr_mod != 0 || port_select != port) { + pmp->status |= IB_SMP_INVALID_FIELD; + goto bail; + } + spin_lock_irqsave(&ibp->lock, flags); + p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); + p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); + p->counter_width = 4; /* 32 bit counters */ + p->counter_mask0_9 = COUNTER_MASK0_9; + p->sample_start = cpu_to_be32(ibp->pma_sample_start); + p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); + p->tag = cpu_to_be16(ibp->pma_tag); + p->counter_select[0] = ibp->pma_counter_select[0]; + p->counter_select[1] = ibp->pma_counter_select[1]; + p->counter_select[2] = ibp->pma_counter_select[2]; + p->counter_select[3] = ibp->pma_counter_select[3]; + p->counter_select[4] = ibp->pma_counter_select[4]; + spin_unlock_irqrestore(&ibp->lock, flags); + +bail: + return reply((struct ib_smp *) pmp); +} + +static int pma_set_portsamplescontrol(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portsamplescontrol *p = + (struct ib_pma_portsamplescontrol *)pmp->data; + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_devdata *dd = dd_from_dev(dev); + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + unsigned long flags; + u8 status, xmit_flags; + int ret; + + if (pmp->attr_mod != 0 || p->port_select != port) { + pmp->status |= IB_SMP_INVALID_FIELD; + ret = reply((struct ib_smp *) pmp); + goto bail; + } + + spin_lock_irqsave(&ibp->lock, flags); + + /* Port Sampling code owns the PS* HW counters */ + xmit_flags = ppd->cong_stats.flags; + ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; + status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); + if (status == IB_PMA_SAMPLE_STATUS_DONE || + (status == IB_PMA_SAMPLE_STATUS_RUNNING && + xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { + ibp->pma_sample_start = be32_to_cpu(p->sample_start); + ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); + ibp->pma_tag = be16_to_cpu(p->tag); + ibp->pma_counter_select[0] = p->counter_select[0]; + ibp->pma_counter_select[1] = p->counter_select[1]; + ibp->pma_counter_select[2] = p->counter_select[2]; + ibp->pma_counter_select[3] = p->counter_select[3]; + ibp->pma_counter_select[4] = p->counter_select[4]; + dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, + ibp->pma_sample_start); + } + spin_unlock_irqrestore(&ibp->lock, flags); + + ret = pma_get_portsamplescontrol(pmp, ibdev, port); + +bail: + return ret; +} + +static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, + __be16 sel) +{ + u64 ret; + + switch (sel) { + case IB_PMA_PORT_XMIT_DATA: + ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); + break; + case IB_PMA_PORT_RCV_DATA: + ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); + break; + case IB_PMA_PORT_XMIT_PKTS: + ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); + break; + case IB_PMA_PORT_RCV_PKTS: + ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); + break; + case IB_PMA_PORT_XMIT_WAIT: + ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); + break; + default: + ret = 0; + } + + return ret; +} + +/* This function assumes that the xmit_wait lock is already held */ +static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) +{ + u32 delta; + + delta = get_counter(&ppd->ibport_data, ppd, + IB_PMA_PORT_XMIT_WAIT); + return ppd->cong_stats.counter + delta; +} + +static void cache_hw_sample_counters(struct qib_pportdata *ppd) +{ + struct qib_ibport *ibp = &ppd->ibport_data; + + ppd->cong_stats.counter_cache.psxmitdata = + get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); + ppd->cong_stats.counter_cache.psrcvdata = + get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); + ppd->cong_stats.counter_cache.psxmitpkts = + get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); + ppd->cong_stats.counter_cache.psrcvpkts = + get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); + ppd->cong_stats.counter_cache.psxmitwait = + get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); +} + +static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, + __be16 sel) +{ + u64 ret; + + switch (sel) { + case IB_PMA_PORT_XMIT_DATA: + ret = ppd->cong_stats.counter_cache.psxmitdata; + break; + case IB_PMA_PORT_RCV_DATA: + ret = ppd->cong_stats.counter_cache.psrcvdata; + break; + case IB_PMA_PORT_XMIT_PKTS: + ret = ppd->cong_stats.counter_cache.psxmitpkts; + break; + case IB_PMA_PORT_RCV_PKTS: + ret = ppd->cong_stats.counter_cache.psrcvpkts; + break; + case IB_PMA_PORT_XMIT_WAIT: + ret = ppd->cong_stats.counter_cache.psxmitwait; + break; + default: + ret = 0; + } + + return ret; +} + +static int pma_get_portsamplesresult(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portsamplesresult *p = + (struct ib_pma_portsamplesresult *)pmp->data; + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_devdata *dd = dd_from_dev(dev); + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + unsigned long flags; + u8 status; + int i; + + memset(pmp->data, 0, sizeof(pmp->data)); + spin_lock_irqsave(&ibp->lock, flags); + p->tag = cpu_to_be16(ibp->pma_tag); + if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) + p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; + else { + status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); + p->sample_status = cpu_to_be16(status); + if (status == IB_PMA_SAMPLE_STATUS_DONE) { + cache_hw_sample_counters(ppd); + ppd->cong_stats.counter = + xmit_wait_get_value_delta(ppd); + dd->f_set_cntr_sample(ppd, + QIB_CONG_TIMER_PSINTERVAL, 0); + ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; + } + } + for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) + p->counter[i] = cpu_to_be32( + get_cache_hw_sample_counters( + ppd, ibp->pma_counter_select[i])); + spin_unlock_irqrestore(&ibp->lock, flags); + + return reply((struct ib_smp *) pmp); +} + +static int pma_get_portsamplesresult_ext(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portsamplesresult_ext *p = + (struct ib_pma_portsamplesresult_ext *)pmp->data; + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_devdata *dd = dd_from_dev(dev); + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + unsigned long flags; + u8 status; + int i; + + /* Port Sampling code owns the PS* HW counters */ + memset(pmp->data, 0, sizeof(pmp->data)); + spin_lock_irqsave(&ibp->lock, flags); + p->tag = cpu_to_be16(ibp->pma_tag); + if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) + p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; + else { + status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); + p->sample_status = cpu_to_be16(status); + /* 64 bits */ + p->extended_width = cpu_to_be32(0x80000000); + if (status == IB_PMA_SAMPLE_STATUS_DONE) { + cache_hw_sample_counters(ppd); + ppd->cong_stats.counter = + xmit_wait_get_value_delta(ppd); + dd->f_set_cntr_sample(ppd, + QIB_CONG_TIMER_PSINTERVAL, 0); + ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; + } + } + for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) + p->counter[i] = cpu_to_be64( + get_cache_hw_sample_counters( + ppd, ibp->pma_counter_select[i])); + spin_unlock_irqrestore(&ibp->lock, flags); + + return reply((struct ib_smp *) pmp); +} + +static int pma_get_portcounters(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) + pmp->data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_verbs_counters cntrs; + u8 port_select = p->port_select; + + qib_get_counters(ppd, &cntrs); + + /* Adjust counters for any resets done. */ + cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; + cntrs.link_error_recovery_counter -= + ibp->z_link_error_recovery_counter; + cntrs.link_downed_counter -= ibp->z_link_downed_counter; + cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; + cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; + cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; + cntrs.port_xmit_data -= ibp->z_port_xmit_data; + cntrs.port_rcv_data -= ibp->z_port_rcv_data; + cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; + cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; + cntrs.local_link_integrity_errors -= + ibp->z_local_link_integrity_errors; + cntrs.excessive_buffer_overrun_errors -= + ibp->z_excessive_buffer_overrun_errors; + cntrs.vl15_dropped -= ibp->z_vl15_dropped; + cntrs.vl15_dropped += ibp->n_vl15_dropped; + + memset(pmp->data, 0, sizeof(pmp->data)); + + p->port_select = port_select; + if (pmp->attr_mod != 0 || port_select != port) + pmp->status |= IB_SMP_INVALID_FIELD; + + if (cntrs.symbol_error_counter > 0xFFFFUL) + p->symbol_error_counter = cpu_to_be16(0xFFFF); + else + p->symbol_error_counter = + cpu_to_be16((u16)cntrs.symbol_error_counter); + if (cntrs.link_error_recovery_counter > 0xFFUL) + p->link_error_recovery_counter = 0xFF; + else + p->link_error_recovery_counter = + (u8)cntrs.link_error_recovery_counter; + if (cntrs.link_downed_counter > 0xFFUL) + p->link_downed_counter = 0xFF; + else + p->link_downed_counter = (u8)cntrs.link_downed_counter; + if (cntrs.port_rcv_errors > 0xFFFFUL) + p->port_rcv_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_errors = + cpu_to_be16((u16) cntrs.port_rcv_errors); + if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_remphys_errors = + cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); + if (cntrs.port_xmit_discards > 0xFFFFUL) + p->port_xmit_discards = cpu_to_be16(0xFFFF); + else + p->port_xmit_discards = + cpu_to_be16((u16)cntrs.port_xmit_discards); + if (cntrs.local_link_integrity_errors > 0xFUL) + cntrs.local_link_integrity_errors = 0xFUL; + if (cntrs.excessive_buffer_overrun_errors > 0xFUL) + cntrs.excessive_buffer_overrun_errors = 0xFUL; + p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | + cntrs.excessive_buffer_overrun_errors; + if (cntrs.vl15_dropped > 0xFFFFUL) + p->vl15_dropped = cpu_to_be16(0xFFFF); + else + p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); + if (cntrs.port_xmit_data > 0xFFFFFFFFUL) + p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); + else + p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); + if (cntrs.port_rcv_data > 0xFFFFFFFFUL) + p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); + else + p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); + if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) + p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); + else + p->port_xmit_packets = + cpu_to_be32((u32)cntrs.port_xmit_packets); + if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) + p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); + else + p->port_rcv_packets = + cpu_to_be32((u32) cntrs.port_rcv_packets); + + return reply((struct ib_smp *) pmp); +} + +static int pma_get_portcounters_cong(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + /* Congestion PMA packets start at offset 24 not 64 */ + struct ib_pma_portcounters_cong *p = + (struct ib_pma_portcounters_cong *)pmp->reserved; + struct qib_verbs_counters cntrs; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = dd_from_ppd(ppd); + u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF; + u64 xmit_wait_counter; + unsigned long flags; + + /* + * This check is performed only in the GET method because the + * SET method ends up calling this anyway. + */ + if (!dd->psxmitwait_supported) + pmp->status |= IB_SMP_UNSUP_METH_ATTR; + if (port_select != port) + pmp->status |= IB_SMP_INVALID_FIELD; + + qib_get_counters(ppd, &cntrs); + spin_lock_irqsave(&ppd->ibport_data.lock, flags); + xmit_wait_counter = xmit_wait_get_value_delta(ppd); + spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + + /* Adjust counters for any resets done. */ + cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; + cntrs.link_error_recovery_counter -= + ibp->z_link_error_recovery_counter; + cntrs.link_downed_counter -= ibp->z_link_downed_counter; + cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; + cntrs.port_rcv_remphys_errors -= + ibp->z_port_rcv_remphys_errors; + cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; + cntrs.local_link_integrity_errors -= + ibp->z_local_link_integrity_errors; + cntrs.excessive_buffer_overrun_errors -= + ibp->z_excessive_buffer_overrun_errors; + cntrs.vl15_dropped -= ibp->z_vl15_dropped; + cntrs.vl15_dropped += ibp->n_vl15_dropped; + cntrs.port_xmit_data -= ibp->z_port_xmit_data; + cntrs.port_rcv_data -= ibp->z_port_rcv_data; + cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; + cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; + + memset(pmp->reserved, 0, sizeof(pmp->reserved) + + sizeof(pmp->data)); + + /* + * Set top 3 bits to indicate interval in picoseconds in + * remaining bits. + */ + p->port_check_rate = + cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | + (dd->psxmitwait_check_rate & + ~(QIB_XMIT_RATE_PICO << 13))); + p->port_adr_events = cpu_to_be64(0); + p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); + p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); + p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); + p->port_xmit_packets = + cpu_to_be64(cntrs.port_xmit_packets); + p->port_rcv_packets = + cpu_to_be64(cntrs.port_rcv_packets); + if (cntrs.symbol_error_counter > 0xFFFFUL) + p->symbol_error_counter = cpu_to_be16(0xFFFF); + else + p->symbol_error_counter = + cpu_to_be16( + (u16)cntrs.symbol_error_counter); + if (cntrs.link_error_recovery_counter > 0xFFUL) + p->link_error_recovery_counter = 0xFF; + else + p->link_error_recovery_counter = + (u8)cntrs.link_error_recovery_counter; + if (cntrs.link_downed_counter > 0xFFUL) + p->link_downed_counter = 0xFF; + else + p->link_downed_counter = + (u8)cntrs.link_downed_counter; + if (cntrs.port_rcv_errors > 0xFFFFUL) + p->port_rcv_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_errors = + cpu_to_be16((u16) cntrs.port_rcv_errors); + if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_remphys_errors = + cpu_to_be16( + (u16)cntrs.port_rcv_remphys_errors); + if (cntrs.port_xmit_discards > 0xFFFFUL) + p->port_xmit_discards = cpu_to_be16(0xFFFF); + else + p->port_xmit_discards = + cpu_to_be16((u16)cntrs.port_xmit_discards); + if (cntrs.local_link_integrity_errors > 0xFUL) + cntrs.local_link_integrity_errors = 0xFUL; + if (cntrs.excessive_buffer_overrun_errors > 0xFUL) + cntrs.excessive_buffer_overrun_errors = 0xFUL; + p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | + cntrs.excessive_buffer_overrun_errors; + if (cntrs.vl15_dropped > 0xFFFFUL) + p->vl15_dropped = cpu_to_be16(0xFFFF); + else + p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); + + return reply((struct ib_smp *)pmp); +} + +static int pma_get_portcounters_ext(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters_ext *p = + (struct ib_pma_portcounters_ext *)pmp->data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + u64 swords, rwords, spkts, rpkts, xwait; + u8 port_select = p->port_select; + + memset(pmp->data, 0, sizeof(pmp->data)); + + p->port_select = port_select; + if (pmp->attr_mod != 0 || port_select != port) { + pmp->status |= IB_SMP_INVALID_FIELD; + goto bail; + } + + qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); + + /* Adjust counters for any resets done. */ + swords -= ibp->z_port_xmit_data; + rwords -= ibp->z_port_rcv_data; + spkts -= ibp->z_port_xmit_packets; + rpkts -= ibp->z_port_rcv_packets; + + p->port_xmit_data = cpu_to_be64(swords); + p->port_rcv_data = cpu_to_be64(rwords); + p->port_xmit_packets = cpu_to_be64(spkts); + p->port_rcv_packets = cpu_to_be64(rpkts); + p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); + p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); + p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); + p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); + +bail: + return reply((struct ib_smp *) pmp); +} + +static int pma_set_portcounters(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) + pmp->data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_verbs_counters cntrs; + + /* + * Since the HW doesn't support clearing counters, we save the + * current count and subtract it from future responses. + */ + qib_get_counters(ppd, &cntrs); + + if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) + ibp->z_symbol_error_counter = cntrs.symbol_error_counter; + + if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) + ibp->z_link_error_recovery_counter = + cntrs.link_error_recovery_counter; + + if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) + ibp->z_link_downed_counter = cntrs.link_downed_counter; + + if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) + ibp->z_port_rcv_errors = cntrs.port_rcv_errors; + + if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) + ibp->z_port_rcv_remphys_errors = + cntrs.port_rcv_remphys_errors; + + if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) + ibp->z_port_xmit_discards = cntrs.port_xmit_discards; + + if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) + ibp->z_local_link_integrity_errors = + cntrs.local_link_integrity_errors; + + if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) + ibp->z_excessive_buffer_overrun_errors = + cntrs.excessive_buffer_overrun_errors; + + if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { + ibp->n_vl15_dropped = 0; + ibp->z_vl15_dropped = cntrs.vl15_dropped; + } + + if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) + ibp->z_port_xmit_data = cntrs.port_xmit_data; + + if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) + ibp->z_port_rcv_data = cntrs.port_rcv_data; + + if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) + ibp->z_port_xmit_packets = cntrs.port_xmit_packets; + + if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) + ibp->z_port_rcv_packets = cntrs.port_rcv_packets; + + return pma_get_portcounters(pmp, ibdev, port); +} + +static int pma_set_portcounters_cong(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = dd_from_ppd(ppd); + struct qib_verbs_counters cntrs; + u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF; + int ret = 0; + unsigned long flags; + + qib_get_counters(ppd, &cntrs); + /* Get counter values before we save them */ + ret = pma_get_portcounters_cong(pmp, ibdev, port); + + if (counter_select & IB_PMA_SEL_CONG_XMIT) { + spin_lock_irqsave(&ppd->ibport_data.lock, flags); + ppd->cong_stats.counter = 0; + dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, + 0x0); + spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + } + if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { + ibp->z_port_xmit_data = cntrs.port_xmit_data; + ibp->z_port_rcv_data = cntrs.port_rcv_data; + ibp->z_port_xmit_packets = cntrs.port_xmit_packets; + ibp->z_port_rcv_packets = cntrs.port_rcv_packets; + } + if (counter_select & IB_PMA_SEL_CONG_ALL) { + ibp->z_symbol_error_counter = + cntrs.symbol_error_counter; + ibp->z_link_error_recovery_counter = + cntrs.link_error_recovery_counter; + ibp->z_link_downed_counter = + cntrs.link_downed_counter; + ibp->z_port_rcv_errors = cntrs.port_rcv_errors; + ibp->z_port_rcv_remphys_errors = + cntrs.port_rcv_remphys_errors; + ibp->z_port_xmit_discards = + cntrs.port_xmit_discards; + ibp->z_local_link_integrity_errors = + cntrs.local_link_integrity_errors; + ibp->z_excessive_buffer_overrun_errors = + cntrs.excessive_buffer_overrun_errors; + ibp->n_vl15_dropped = 0; + ibp->z_vl15_dropped = cntrs.vl15_dropped; + } + + return ret; +} + +static int pma_set_portcounters_ext(struct ib_perf *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) + pmp->data; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + u64 swords, rwords, spkts, rpkts, xwait; + + qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); + + if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) + ibp->z_port_xmit_data = swords; + + if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) + ibp->z_port_rcv_data = rwords; + + if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) + ibp->z_port_xmit_packets = spkts; + + if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) + ibp->z_port_rcv_packets = rpkts; + + if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) + ibp->n_unicast_xmit = 0; + + if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) + ibp->n_unicast_rcv = 0; + + if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) + ibp->n_multicast_xmit = 0; + + if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) + ibp->n_multicast_rcv = 0; + + return pma_get_portcounters_ext(pmp, ibdev, port); +} + +static int process_subn(struct ib_device *ibdev, int mad_flags, + u8 port, struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + struct ib_smp *smp = (struct ib_smp *)out_mad; + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + int ret; + + *out_mad = *in_mad; + if (smp->class_version != 1) { + smp->status |= IB_SMP_UNSUP_VERSION; + ret = reply(smp); + goto bail; + } + + ret = check_mkey(ibp, smp, mad_flags); + if (ret) { + u32 port_num = be32_to_cpu(smp->attr_mod); + + /* + * If this is a get/set portinfo, we already check the + * M_Key if the MAD is for another port and the M_Key + * is OK on the receiving port. This check is needed + * to increment the error counters when the M_Key + * fails to match on *both* ports. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && + (smp->method == IB_MGMT_METHOD_GET || + smp->method == IB_MGMT_METHOD_SET) && + port_num && port_num <= ibdev->phys_port_cnt && + port != port_num) + (void) check_mkey(to_iport(ibdev, port_num), smp, 0); + goto bail; + } + + switch (smp->method) { + case IB_MGMT_METHOD_GET: + switch (smp->attr_id) { + case IB_SMP_ATTR_NODE_DESC: + ret = subn_get_nodedescription(smp, ibdev); + goto bail; + case IB_SMP_ATTR_NODE_INFO: + ret = subn_get_nodeinfo(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_GUID_INFO: + ret = subn_get_guidinfo(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_PORT_INFO: + ret = subn_get_portinfo(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_PKEY_TABLE: + ret = subn_get_pkeytable(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_SL_TO_VL_TABLE: + ret = subn_get_sl_to_vl(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_VL_ARB_TABLE: + ret = subn_get_vl_arb(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_SM_INFO: + if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { + ret = IB_MAD_RESULT_SUCCESS | + IB_MAD_RESULT_CONSUMED; + goto bail; + } + if (ibp->port_cap_flags & IB_PORT_SM) { + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + } + /* FALLTHROUGH */ + default: + smp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply(smp); + goto bail; + } + + case IB_MGMT_METHOD_SET: + switch (smp->attr_id) { + case IB_SMP_ATTR_GUID_INFO: + ret = subn_set_guidinfo(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_PORT_INFO: + ret = subn_set_portinfo(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_PKEY_TABLE: + ret = subn_set_pkeytable(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_SL_TO_VL_TABLE: + ret = subn_set_sl_to_vl(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_VL_ARB_TABLE: + ret = subn_set_vl_arb(smp, ibdev, port); + goto bail; + case IB_SMP_ATTR_SM_INFO: + if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { + ret = IB_MAD_RESULT_SUCCESS | + IB_MAD_RESULT_CONSUMED; + goto bail; + } + if (ibp->port_cap_flags & IB_PORT_SM) { + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + } + /* FALLTHROUGH */ + default: + smp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply(smp); + goto bail; + } + + case IB_MGMT_METHOD_TRAP_REPRESS: + if (smp->attr_id == IB_SMP_ATTR_NOTICE) + ret = subn_trap_repress(smp, ibdev, port); + else { + smp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply(smp); + } + goto bail; + + case IB_MGMT_METHOD_TRAP: + case IB_MGMT_METHOD_REPORT: + case IB_MGMT_METHOD_REPORT_RESP: + case IB_MGMT_METHOD_GET_RESP: + /* + * The ib_mad module will call us to process responses + * before checking for other consumers. + * Just tell the caller to process it normally. + */ + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + + case IB_MGMT_METHOD_SEND: + if (ib_get_smp_direction(smp) && + smp->attr_id == QIB_VENDOR_IPG) { + ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, + smp->data[0]); + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } else + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + + default: + smp->status |= IB_SMP_UNSUP_METHOD; + ret = reply(smp); + } + +bail: + return ret; +} + +static int process_perf(struct ib_device *ibdev, u8 port, + struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + struct ib_perf *pmp = (struct ib_perf *)out_mad; + int ret; + + *out_mad = *in_mad; + if (pmp->class_version != 1) { + pmp->status |= IB_SMP_UNSUP_VERSION; + ret = reply((struct ib_smp *) pmp); + goto bail; + } + + switch (pmp->method) { + case IB_MGMT_METHOD_GET: + switch (pmp->attr_id) { + case IB_PMA_CLASS_PORT_INFO: + ret = pma_get_classportinfo(pmp, ibdev); + goto bail; + case IB_PMA_PORT_SAMPLES_CONTROL: + ret = pma_get_portsamplescontrol(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_SAMPLES_RESULT: + ret = pma_get_portsamplesresult(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_SAMPLES_RESULT_EXT: + ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS: + ret = pma_get_portcounters(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS_EXT: + ret = pma_get_portcounters_ext(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS_CONG: + ret = pma_get_portcounters_cong(pmp, ibdev, port); + goto bail; + default: + pmp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_smp *) pmp); + goto bail; + } + + case IB_MGMT_METHOD_SET: + switch (pmp->attr_id) { + case IB_PMA_PORT_SAMPLES_CONTROL: + ret = pma_set_portsamplescontrol(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS: + ret = pma_set_portcounters(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS_EXT: + ret = pma_set_portcounters_ext(pmp, ibdev, port); + goto bail; + case IB_PMA_PORT_COUNTERS_CONG: + ret = pma_set_portcounters_cong(pmp, ibdev, port); + goto bail; + default: + pmp->status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_smp *) pmp); + goto bail; + } + + case IB_MGMT_METHOD_TRAP: + case IB_MGMT_METHOD_GET_RESP: + /* + * The ib_mad module will call us to process responses + * before checking for other consumers. + * Just tell the caller to process it normally. + */ + ret = IB_MAD_RESULT_SUCCESS; + goto bail; + + default: + pmp->status |= IB_SMP_UNSUP_METHOD; + ret = reply((struct ib_smp *) pmp); + } + +bail: + return ret; +} + +/** + * qib_process_mad - process an incoming MAD packet + * @ibdev: the infiniband device this packet came in on + * @mad_flags: MAD flags + * @port: the port number this packet came in on + * @in_wc: the work completion entry for this packet + * @in_grh: the global route header for this packet + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + * + * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not + * interested in processing. + * + * Note that the verbs framework has already done the MAD sanity checks, + * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE + * MADs. + * + * This is called by the ib_mad module. + */ +int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + int ret; + + switch (in_mad->mad_hdr.mgmt_class) { + case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: + case IB_MGMT_CLASS_SUBN_LID_ROUTED: + ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); + goto bail; + + case IB_MGMT_CLASS_PERF_MGMT: + ret = process_perf(ibdev, port, in_mad, out_mad); + goto bail; + + default: + ret = IB_MAD_RESULT_SUCCESS; + } + +bail: + return ret; +} + +static void send_handler(struct ib_mad_agent *agent, + struct ib_mad_send_wc *mad_send_wc) +{ + ib_free_send_mad(mad_send_wc->send_buf); +} + +static void xmit_wait_timer_func(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + struct qib_devdata *dd = dd_from_ppd(ppd); + unsigned long flags; + u8 status; + + spin_lock_irqsave(&ppd->ibport_data.lock, flags); + if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { + status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); + if (status == IB_PMA_SAMPLE_STATUS_DONE) { + /* save counter cache */ + cache_hw_sample_counters(ppd); + ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; + } else + goto done; + } + ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); + dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); +done: + spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + mod_timer(&ppd->cong_stats.timer, jiffies + HZ); +} + +int qib_create_agents(struct qib_ibdev *dev) +{ + struct qib_devdata *dd = dd_from_dev(dev); + struct ib_mad_agent *agent; + struct qib_ibport *ibp; + int p; + int ret; + + for (p = 0; p < dd->num_pports; p++) { + ibp = &dd->pport[p].ibport_data; + agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, + NULL, 0, send_handler, + NULL, NULL); + if (IS_ERR(agent)) { + ret = PTR_ERR(agent); + goto err; + } + + /* Initialize xmit_wait structure */ + dd->pport[p].cong_stats.counter = 0; + init_timer(&dd->pport[p].cong_stats.timer); + dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; + dd->pport[p].cong_stats.timer.data = + (unsigned long)(&dd->pport[p]); + dd->pport[p].cong_stats.timer.expires = 0; + add_timer(&dd->pport[p].cong_stats.timer); + + ibp->send_agent = agent; + } + + return 0; + +err: + for (p = 0; p < dd->num_pports; p++) { + ibp = &dd->pport[p].ibport_data; + if (ibp->send_agent) { + agent = ibp->send_agent; + ibp->send_agent = NULL; + ib_unregister_mad_agent(agent); + } + } + + return ret; +} + +void qib_free_agents(struct qib_ibdev *dev) +{ + struct qib_devdata *dd = dd_from_dev(dev); + struct ib_mad_agent *agent; + struct qib_ibport *ibp; + int p; + + for (p = 0; p < dd->num_pports; p++) { + ibp = &dd->pport[p].ibport_data; + if (ibp->send_agent) { + agent = ibp->send_agent; + ibp->send_agent = NULL; + ib_unregister_mad_agent(agent); + } + if (ibp->sm_ah) { + ib_destroy_ah(&ibp->sm_ah->ibah); + ibp->sm_ah = NULL; + } + if (dd->pport[p].cong_stats.timer.data) + del_timer_sync(&dd->pport[p].cong_stats.timer); + } +} diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h new file mode 100644 index 000000000000..147aff9117d7 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mad.h @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) +#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) +#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) +#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) + +struct ib_node_info { + u8 base_version; + u8 class_version; + u8 node_type; + u8 num_ports; + __be64 sys_guid; + __be64 node_guid; + __be64 port_guid; + __be16 partition_cap; + __be16 device_id; + __be32 revision; + u8 local_port_num; + u8 vendor_id[3]; +} __attribute__ ((packed)); + +struct ib_mad_notice_attr { + u8 generic_type; + u8 prod_type_msb; + __be16 prod_type_lsb; + __be16 trap_num; + __be16 issuer_lid; + __be16 toggle_count; + + union { + struct { + u8 details[54]; + } raw_data; + + struct { + __be16 reserved; + __be16 lid; /* where violation happened */ + u8 port_num; /* where violation happened */ + } __attribute__ ((packed)) ntc_129_131; + + struct { + __be16 reserved; + __be16 lid; /* LID where change occured */ + u8 reserved2; + u8 local_changes; /* low bit - local changes */ + __be32 new_cap_mask; /* new capability mask */ + u8 reserved3; + u8 change_flags; /* low 3 bits only */ + } __attribute__ ((packed)) ntc_144; + + struct { + __be16 reserved; + __be16 lid; /* lid where sys guid changed */ + __be16 reserved2; + __be64 new_sys_guid; + } __attribute__ ((packed)) ntc_145; + + struct { + __be16 reserved; + __be16 lid; + __be16 dr_slid; + u8 method; + u8 reserved2; + __be16 attr_id; + __be32 attr_mod; + __be64 mkey; + u8 reserved3; + u8 dr_trunc_hop; + u8 dr_rtn_path[30]; + } __attribute__ ((packed)) ntc_256; + + struct { + __be16 reserved; + __be16 lid1; + __be16 lid2; + __be32 key; + __be32 sl_qp1; /* SL: high 4 bits */ + __be32 qp2; /* high 8 bits reserved */ + union ib_gid gid1; + union ib_gid gid2; + } __attribute__ ((packed)) ntc_257_258; + + } details; +}; + +/* + * Generic trap/notice types + */ +#define IB_NOTICE_TYPE_FATAL 0x80 +#define IB_NOTICE_TYPE_URGENT 0x81 +#define IB_NOTICE_TYPE_SECURITY 0x82 +#define IB_NOTICE_TYPE_SM 0x83 +#define IB_NOTICE_TYPE_INFO 0x84 + +/* + * Generic trap/notice producers + */ +#define IB_NOTICE_PROD_CA cpu_to_be16(1) +#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2) +#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3) +#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4) + +/* + * Generic trap/notice numbers + */ +#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129) +#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130) +#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131) +#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144) +#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145) +#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256) +#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257) +#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258) + +/* + * Repress trap/notice flags + */ +#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0) +#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1) +#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2) +#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3) +#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4) +#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5) +#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6) +#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7) + +/* + * Generic trap/notice other local changes flags (trap 144). + */ +#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */ +#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */ +#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01 + +/* + * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256). + */ +#define IB_NOTICE_TRAP_DR_NOTICE 0x80 +#define IB_NOTICE_TRAP_DR_TRUNC 0x40 + +struct ib_vl_weight_elem { + u8 vl; /* Only low 4 bits, upper 4 bits reserved */ + u8 weight; +}; + +#define IB_VLARB_LOWPRI_0_31 1 +#define IB_VLARB_LOWPRI_32_63 2 +#define IB_VLARB_HIGHPRI_0_31 3 +#define IB_VLARB_HIGHPRI_32_63 4 + +/* + * PMA class portinfo capability mask bits + */ +#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8) +#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9) +#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12) + +#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) +#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) +#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) +#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) +#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) +#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) +#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00) + +struct ib_perf { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 unused; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + u8 reserved[40]; + u8 data[192]; +} __attribute__ ((packed)); + +struct ib_pma_classportinfo { + u8 base_version; + u8 class_version; + __be16 cap_mask; + u8 reserved[3]; + u8 resp_time_value; /* only lower 5 bits */ + union ib_gid redirect_gid; + __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ + __be16 redirect_lid; + __be16 redirect_pkey; + __be32 redirect_qp; /* only lower 24 bits */ + __be32 redirect_qkey; + union ib_gid trap_gid; + __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ + __be16 trap_lid; + __be16 trap_pkey; + __be32 trap_hl_qp; /* 8, 24 bits respectively */ + __be32 trap_qkey; +} __attribute__ ((packed)); + +struct ib_pma_portsamplescontrol { + u8 opcode; + u8 port_select; + u8 tick; + u8 counter_width; /* only lower 3 bits */ + __be32 counter_mask0_9; /* 2, 10 * 3, bits */ + __be16 counter_mask10_14; /* 1, 5 * 3, bits */ + u8 sample_mechanisms; + u8 sample_status; /* only lower 2 bits */ + __be64 option_mask; + __be64 vendor_mask; + __be32 sample_start; + __be32 sample_interval; + __be16 tag; + __be16 counter_select[15]; +} __attribute__ ((packed)); + +struct ib_pma_portsamplesresult { + __be16 tag; + __be16 sample_status; /* only lower 2 bits */ + __be32 counter[15]; +} __attribute__ ((packed)); + +struct ib_pma_portsamplesresult_ext { + __be16 tag; + __be16 sample_status; /* only lower 2 bits */ + __be32 extended_width; /* only upper 2 bits */ + __be64 counter[15]; +} __attribute__ ((packed)); + +struct ib_pma_portcounters { + u8 reserved; + u8 port_select; + __be16 counter_select; + __be16 symbol_error_counter; + u8 link_error_recovery_counter; + u8 link_downed_counter; + __be16 port_rcv_errors; + __be16 port_rcv_remphys_errors; + __be16 port_rcv_switch_relay_errors; + __be16 port_xmit_discards; + u8 port_xmit_constraint_errors; + u8 port_rcv_constraint_errors; + u8 reserved1; + u8 lli_ebor_errors; /* 4, 4, bits */ + __be16 reserved2; + __be16 vl15_dropped; + __be32 port_xmit_data; + __be32 port_rcv_data; + __be32 port_xmit_packets; + __be32 port_rcv_packets; +} __attribute__ ((packed)); + +struct ib_pma_portcounters_cong { + u8 reserved; + u8 reserved1; + __be16 port_check_rate; + __be16 symbol_error_counter; + u8 link_error_recovery_counter; + u8 link_downed_counter; + __be16 port_rcv_errors; + __be16 port_rcv_remphys_errors; + __be16 port_rcv_switch_relay_errors; + __be16 port_xmit_discards; + u8 port_xmit_constraint_errors; + u8 port_rcv_constraint_errors; + u8 reserved2; + u8 lli_ebor_errors; /* 4, 4, bits */ + __be16 reserved3; + __be16 vl15_dropped; + __be64 port_xmit_data; + __be64 port_rcv_data; + __be64 port_xmit_packets; + __be64 port_rcv_packets; + __be64 port_xmit_wait; + __be64 port_adr_events; +} __attribute__ ((packed)); + +#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00 +#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01 + +#define QIB_XMIT_RATE_UNSUPPORTED 0x0 +#define QIB_XMIT_RATE_PICO 0x7 +/* number of 4nsec cycles equaling 2secs */ +#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC + +#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) +#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) +#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) +#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) +#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) +#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) +#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) +#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) +#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) +#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) +#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) +#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) +#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) + +#define IB_PMA_SEL_CONG_ALL 0x01 +#define IB_PMA_SEL_CONG_PORT_DATA 0x02 +#define IB_PMA_SEL_CONG_XMIT 0x04 +#define IB_PMA_SEL_CONG_ROUTING 0x08 + +struct ib_pma_portcounters_ext { + u8 reserved; + u8 port_select; + __be16 counter_select; + __be32 reserved1; + __be64 port_xmit_data; + __be64 port_rcv_data; + __be64 port_xmit_packets; + __be64 port_rcv_packets; + __be64 port_unicast_xmit_packets; + __be64 port_unicast_rcv_packets; + __be64 port_multicast_xmit_packets; + __be64 port_multicast_rcv_packets; +} __attribute__ ((packed)); + +#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) +#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) +#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) +#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) +#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) +#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) +#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) +#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) + +/* + * The PortSamplesControl.CounterMasks field is an array of 3 bit fields + * which specify the N'th counter's capabilities. See ch. 16.1.3.2. + * We support 5 counters which only count the mandatory quantities. + */ +#define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) +#define COUNTER_MASK0_9 \ + cpu_to_be32(COUNTER_MASK(1, 0) | \ + COUNTER_MASK(1, 1) | \ + COUNTER_MASK(1, 2) | \ + COUNTER_MASK(1, 3) | \ + COUNTER_MASK(1, 4)) diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c new file mode 100644 index 000000000000..8b73a11d571c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mmap.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <asm/pgtable.h> + +#include "qib_verbs.h" + +/** + * qib_release_mmap_info - free mmap info structure + * @ref: a pointer to the kref within struct qib_mmap_info + */ +void qib_release_mmap_info(struct kref *ref) +{ + struct qib_mmap_info *ip = + container_of(ref, struct qib_mmap_info, ref); + struct qib_ibdev *dev = to_idev(ip->context->device); + + spin_lock_irq(&dev->pending_lock); + list_del(&ip->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + + vfree(ip->obj); + kfree(ip); +} + +/* + * open and close keep track of how many times the CQ is mapped, + * to avoid releasing it. + */ +static void qib_vma_open(struct vm_area_struct *vma) +{ + struct qib_mmap_info *ip = vma->vm_private_data; + + kref_get(&ip->ref); +} + +static void qib_vma_close(struct vm_area_struct *vma) +{ + struct qib_mmap_info *ip = vma->vm_private_data; + + kref_put(&ip->ref, qib_release_mmap_info); +} + +static struct vm_operations_struct qib_vm_ops = { + .open = qib_vma_open, + .close = qib_vma_close, +}; + +/** + * qib_mmap - create a new mmap region + * @context: the IB user context of the process making the mmap() call + * @vma: the VMA to be initialized + * Return zero if the mmap is OK. Otherwise, return an errno. + */ +int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct qib_ibdev *dev = to_idev(context->device); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long size = vma->vm_end - vma->vm_start; + struct qib_mmap_info *ip, *pp; + int ret = -EINVAL; + + /* + * Search the device's list of objects waiting for a mmap call. + * Normally, this list is very short since a call to create a + * CQ, QP, or SRQ is soon followed by a call to mmap(). + */ + spin_lock_irq(&dev->pending_lock); + list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, + pending_mmaps) { + /* Only the creator is allowed to mmap the object */ + if (context != ip->context || (__u64) offset != ip->offset) + continue; + /* Don't allow a mmap larger than the object. */ + if (size > ip->size) + break; + + list_del_init(&ip->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + + ret = remap_vmalloc_range(vma, ip->obj, 0); + if (ret) + goto done; + vma->vm_ops = &qib_vm_ops; + vma->vm_private_data = ip; + qib_vma_open(vma); + goto done; + } + spin_unlock_irq(&dev->pending_lock); +done: + return ret; +} + +/* + * Allocate information for qib_mmap + */ +struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj) { + struct qib_mmap_info *ip; + + ip = kmalloc(sizeof *ip, GFP_KERNEL); + if (!ip) + goto bail; + + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + INIT_LIST_HEAD(&ip->pending_mmaps); + ip->size = size; + ip->context = context; + ip->obj = obj; + kref_init(&ip->ref); + +bail: + return ip; +} + +void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, + u32 size, void *obj) +{ + size = PAGE_ALIGN(size); + + spin_lock_irq(&dev->mmap_offset_lock); + if (dev->mmap_offset == 0) + dev->mmap_offset = PAGE_SIZE; + ip->offset = dev->mmap_offset; + dev->mmap_offset += size; + spin_unlock_irq(&dev->mmap_offset_lock); + + ip->size = size; + ip->obj = obj; +} diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c new file mode 100644 index 000000000000..5f95f0f6385d --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <rdma/ib_umem.h> +#include <rdma/ib_smi.h> + +#include "qib.h" + +/* Fast memory region */ +struct qib_fmr { + struct ib_fmr ibfmr; + u8 page_shift; + struct qib_mregion mr; /* must be last */ +}; + +static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct qib_fmr, ibfmr); +} + +/** + * qib_get_dma_mr - get a DMA memory region + * @pd: protection domain for this memory region + * @acc: access flags + * + * Returns the memory region on success, otherwise returns an errno. + * Note that all DMA addresses should be created via the + * struct ib_dma_mapping_ops functions (see qib_dma.c). + */ +struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct qib_ibdev *dev = to_idev(pd->device); + struct qib_mr *mr; + struct ib_mr *ret; + unsigned long flags; + + if (to_ipd(pd)->user) { + ret = ERR_PTR(-EPERM); + goto bail; + } + + mr = kzalloc(sizeof *mr, GFP_KERNEL); + if (!mr) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + mr->mr.access_flags = acc; + atomic_set(&mr->mr.refcount, 0); + + spin_lock_irqsave(&dev->lk_table.lock, flags); + if (!dev->dma_mr) + dev->dma_mr = &mr->mr; + spin_unlock_irqrestore(&dev->lk_table.lock, flags); + + ret = &mr->ibmr; + +bail: + return ret; +} + +static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) +{ + struct qib_mr *mr; + int m, i = 0; + + /* Allocate struct plus pointers to first level page tables. */ + m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; + mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); + if (!mr) + goto done; + + /* Allocate first level page tables. */ + for (; i < m; i++) { + mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); + if (!mr->mr.map[i]) + goto bail; + } + mr->mr.mapsz = m; + mr->mr.max_segs = count; + + /* + * ib_reg_phys_mr() will initialize mr->ibmr except for + * lkey and rkey. + */ + if (!qib_alloc_lkey(lk_table, &mr->mr)) + goto bail; + mr->ibmr.lkey = mr->mr.lkey; + mr->ibmr.rkey = mr->mr.lkey; + + atomic_set(&mr->mr.refcount, 0); + goto done; + +bail: + while (i) + kfree(mr->mr.map[--i]); + kfree(mr); + mr = NULL; + +done: + return mr; +} + +/** + * qib_reg_phys_mr - register a physical memory region + * @pd: protection domain for this memory region + * @buffer_list: pointer to the list of physical buffers to register + * @num_phys_buf: the number of physical buffers to register + * @iova_start: the starting address passed over IB which maps to this MR + * + * Returns the memory region on success, otherwise returns an errno. + */ +struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 *iova_start) +{ + struct qib_mr *mr; + int n, m, i; + struct ib_mr *ret; + + mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); + if (mr == NULL) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + mr->mr.pd = pd; + mr->mr.user_base = *iova_start; + mr->mr.iova = *iova_start; + mr->mr.length = 0; + mr->mr.offset = 0; + mr->mr.access_flags = acc; + mr->umem = NULL; + + m = 0; + n = 0; + for (i = 0; i < num_phys_buf; i++) { + mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; + mr->mr.map[m]->segs[n].length = buffer_list[i].size; + mr->mr.length += buffer_list[i].size; + n++; + if (n == QIB_SEGSZ) { + m++; + n = 0; + } + } + + ret = &mr->ibmr; + +bail: + return ret; +} + +/** + * qib_reg_user_mr - register a userspace memory region + * @pd: protection domain for this memory region + * @start: starting userspace address + * @length: length of region to register + * @virt_addr: virtual address to use (from HCA's point of view) + * @mr_access_flags: access flags for this memory region + * @udata: unused by the QLogic_IB driver + * + * Returns the memory region on success, otherwise returns an errno. + */ +struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata) +{ + struct qib_mr *mr; + struct ib_umem *umem; + struct ib_umem_chunk *chunk; + int n, m, i; + struct ib_mr *ret; + + if (length == 0) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + + umem = ib_umem_get(pd->uobject->context, start, length, + mr_access_flags, 0); + if (IS_ERR(umem)) + return (void *) umem; + + n = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) + n += chunk->nents; + + mr = alloc_mr(n, &to_idev(pd->device)->lk_table); + if (!mr) { + ret = ERR_PTR(-ENOMEM); + ib_umem_release(umem); + goto bail; + } + + mr->mr.pd = pd; + mr->mr.user_base = start; + mr->mr.iova = virt_addr; + mr->mr.length = length; + mr->mr.offset = umem->offset; + mr->mr.access_flags = mr_access_flags; + mr->umem = umem; + + m = 0; + n = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) { + for (i = 0; i < chunk->nents; i++) { + void *vaddr; + + vaddr = page_address(sg_page(&chunk->page_list[i])); + if (!vaddr) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + mr->mr.map[m]->segs[n].vaddr = vaddr; + mr->mr.map[m]->segs[n].length = umem->page_size; + n++; + if (n == QIB_SEGSZ) { + m++; + n = 0; + } + } + } + ret = &mr->ibmr; + +bail: + return ret; +} + +/** + * qib_dereg_mr - unregister and free a memory region + * @ibmr: the memory region to free + * + * Returns 0 on success. + * + * Note that this is called to free MRs created by qib_get_dma_mr() + * or qib_reg_user_mr(). + */ +int qib_dereg_mr(struct ib_mr *ibmr) +{ + struct qib_mr *mr = to_imr(ibmr); + struct qib_ibdev *dev = to_idev(ibmr->device); + int ret; + int i; + + ret = qib_free_lkey(dev, &mr->mr); + if (ret) + return ret; + + i = mr->mr.mapsz; + while (i) + kfree(mr->mr.map[--i]); + if (mr->umem) + ib_umem_release(mr->umem); + kfree(mr); + return 0; +} + +/* + * Allocate a memory region usable with the + * IB_WR_FAST_REG_MR send work request. + * + * Return the memory region on success, otherwise return an errno. + */ +struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) +{ + struct qib_mr *mr; + + mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); + if (mr == NULL) + return ERR_PTR(-ENOMEM); + + mr->mr.pd = pd; + mr->mr.user_base = 0; + mr->mr.iova = 0; + mr->mr.length = 0; + mr->mr.offset = 0; + mr->mr.access_flags = 0; + mr->umem = NULL; + + return &mr->ibmr; +} + +struct ib_fast_reg_page_list * +qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) +{ + unsigned size = page_list_len * sizeof(u64); + struct ib_fast_reg_page_list *pl; + + if (size > PAGE_SIZE) + return ERR_PTR(-EINVAL); + + pl = kmalloc(sizeof *pl, GFP_KERNEL); + if (!pl) + return ERR_PTR(-ENOMEM); + + pl->page_list = kmalloc(size, GFP_KERNEL); + if (!pl->page_list) + goto err_free; + + return pl; + +err_free: + kfree(pl); + return ERR_PTR(-ENOMEM); +} + +void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl) +{ + kfree(pl->page_list); + kfree(pl); +} + +/** + * qib_alloc_fmr - allocate a fast memory region + * @pd: the protection domain for this memory region + * @mr_access_flags: access flags for this memory region + * @fmr_attr: fast memory region attributes + * + * Returns the memory region on success, otherwise returns an errno. + */ +struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct qib_fmr *fmr; + int m, i = 0; + struct ib_fmr *ret; + + /* Allocate struct plus pointers to first level page tables. */ + m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; + fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); + if (!fmr) + goto bail; + + /* Allocate first level page tables. */ + for (; i < m; i++) { + fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], + GFP_KERNEL); + if (!fmr->mr.map[i]) + goto bail; + } + fmr->mr.mapsz = m; + + /* + * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & + * rkey. + */ + if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) + goto bail; + fmr->ibfmr.rkey = fmr->mr.lkey; + fmr->ibfmr.lkey = fmr->mr.lkey; + /* + * Resources are allocated but no valid mapping (RKEY can't be + * used). + */ + fmr->mr.pd = pd; + fmr->mr.user_base = 0; + fmr->mr.iova = 0; + fmr->mr.length = 0; + fmr->mr.offset = 0; + fmr->mr.access_flags = mr_access_flags; + fmr->mr.max_segs = fmr_attr->max_pages; + fmr->page_shift = fmr_attr->page_shift; + + atomic_set(&fmr->mr.refcount, 0); + ret = &fmr->ibfmr; + goto done; + +bail: + while (i) + kfree(fmr->mr.map[--i]); + kfree(fmr); + ret = ERR_PTR(-ENOMEM); + +done: + return ret; +} + +/** + * qib_map_phys_fmr - set up a fast memory region + * @ibmfr: the fast memory region to set up + * @page_list: the list of pages to associate with the fast memory region + * @list_len: the number of pages to associate with the fast memory region + * @iova: the virtual address of the start of the fast memory region + * + * This may be called from interrupt context. + */ + +int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova) +{ + struct qib_fmr *fmr = to_ifmr(ibfmr); + struct qib_lkey_table *rkt; + unsigned long flags; + int m, n, i; + u32 ps; + int ret; + + if (atomic_read(&fmr->mr.refcount)) + return -EBUSY; + + if (list_len > fmr->mr.max_segs) { + ret = -EINVAL; + goto bail; + } + rkt = &to_idev(ibfmr->device)->lk_table; + spin_lock_irqsave(&rkt->lock, flags); + fmr->mr.user_base = iova; + fmr->mr.iova = iova; + ps = 1 << fmr->page_shift; + fmr->mr.length = list_len * ps; + m = 0; + n = 0; + for (i = 0; i < list_len; i++) { + fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; + fmr->mr.map[m]->segs[n].length = ps; + if (++n == QIB_SEGSZ) { + m++; + n = 0; + } + } + spin_unlock_irqrestore(&rkt->lock, flags); + ret = 0; + +bail: + return ret; +} + +/** + * qib_unmap_fmr - unmap fast memory regions + * @fmr_list: the list of fast memory regions to unmap + * + * Returns 0 on success. + */ +int qib_unmap_fmr(struct list_head *fmr_list) +{ + struct qib_fmr *fmr; + struct qib_lkey_table *rkt; + unsigned long flags; + + list_for_each_entry(fmr, fmr_list, ibfmr.list) { + rkt = &to_idev(fmr->ibfmr.device)->lk_table; + spin_lock_irqsave(&rkt->lock, flags); + fmr->mr.user_base = 0; + fmr->mr.iova = 0; + fmr->mr.length = 0; + spin_unlock_irqrestore(&rkt->lock, flags); + } + return 0; +} + +/** + * qib_dealloc_fmr - deallocate a fast memory region + * @ibfmr: the fast memory region to deallocate + * + * Returns 0 on success. + */ +int qib_dealloc_fmr(struct ib_fmr *ibfmr) +{ + struct qib_fmr *fmr = to_ifmr(ibfmr); + int ret; + int i; + + ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); + if (ret) + return ret; + + i = fmr->mr.mapsz; + while (i) + kfree(fmr->mr.map[--i]); + kfree(fmr); + return 0; +} diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c new file mode 100644 index 000000000000..c926bf4541df --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -0,0 +1,738 @@ +/* + * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> +#include <linux/aer.h> + +#include "qib.h" + +/* + * This file contains PCIe utility routines that are common to the + * various QLogic InfiniPath adapters + */ + +/* + * Code to adjust PCIe capabilities. + * To minimize the change footprint, we call it + * from qib_pcie_params, which every chip-specific + * file calls, even though this violates some + * expectations of harmlessness. + */ +static int qib_tune_pcie_caps(struct qib_devdata *); +static int qib_tune_pcie_coalesce(struct qib_devdata *); + +/* + * Do all the common PCIe setup and initialization. + * devdata is not yet allocated, and is not allocated until after this + * routine returns success. Therefore qib_dev_err() can't be used for error + * printing. + */ +int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + /* + * This can happen (in theory) iff: + * We did a chip reset, and then failed to reprogram the + * BAR, or the chip reset due to an internal error. We then + * unloaded the driver and reloaded it. + * + * Both reset cases set the BAR back to initial state. For + * the latter case, the AER sticky error bit at offset 0x718 + * should be set, but the Linux kernel doesn't yet know + * about that, it appears. If the original BAR was retained + * in the kernel data structures, this may be OK. + */ + qib_early_err(&pdev->dev, "pci enable failed: error %d\n", + -ret); + goto done; + } + + ret = pci_request_regions(pdev, QIB_DRV_NAME); + if (ret) { + qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret); + goto bail; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + /* + * If the 64 bit setup fails, try 32 bit. Some systems + * do not setup 64 bit maps on systems with 2GB or less + * memory installed. + */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret); + goto bail; + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } else + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + qib_early_err(&pdev->dev, + "Unable to set DMA consistent mask: %d\n", ret); + + pci_set_master(pdev); + ret = pci_enable_pcie_error_reporting(pdev); + if (ret) + qib_early_err(&pdev->dev, + "Unable to enable pcie error reporting: %d\n", + ret); + goto done; + +bail: + pci_disable_device(pdev); + pci_release_regions(pdev); +done: + return ret; +} + +/* + * Do remaining PCIe setup, once dd is allocated, and save away + * fields required to re-initialize after a chip reset, or for + * various other purposes + */ +int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned long len; + resource_size_t addr; + + dd->pcidev = pdev; + pci_set_drvdata(pdev, dd); + + addr = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + +#if defined(__powerpc__) + /* There isn't a generic way to specify writethrough mappings */ + dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU); +#else + dd->kregbase = ioremap_nocache(addr, len); +#endif + + if (!dd->kregbase) + return -ENOMEM; + + dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len); + dd->physaddr = addr; /* used for io_remap, etc. */ + + /* + * Save BARs to rewrite after device reset. Save all 64 bits of + * BAR, just in case. + */ + dd->pcibar0 = addr; + dd->pcibar1 = addr >> 32; + dd->deviceid = ent->device; /* save for later use */ + dd->vendorid = ent->vendor; + + return 0; +} + +/* + * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior + * to releasing the dd memory. + * void because none of the core pcie cleanup returns are void + */ +void qib_pcie_ddcleanup(struct qib_devdata *dd) +{ + u64 __iomem *base = (void __iomem *) dd->kregbase; + + dd->kregbase = NULL; + iounmap(base); + if (dd->piobase) + iounmap(dd->piobase); + if (dd->userbase) + iounmap(dd->userbase); + + pci_disable_device(dd->pcidev); + pci_release_regions(dd->pcidev); + + pci_set_drvdata(dd->pcidev, NULL); +} + +static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, + struct msix_entry *msix_entry) +{ + int ret; + u32 tabsize = 0; + u16 msix_flags; + + pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); + tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); + if (tabsize > *msixcnt) + tabsize = *msixcnt; + ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); + if (ret > 0) { + tabsize = ret; + ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); + } + if (ret) { + qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " + "falling back to INTx\n", tabsize, ret); + tabsize = 0; + } + *msixcnt = tabsize; + + if (ret) + qib_enable_intx(dd->pcidev); + +} + +/** + * We save the msi lo and hi values, so we can restore them after + * chip reset (the kernel PCI infrastructure doesn't yet handle that + * correctly. + */ +static int qib_msi_setup(struct qib_devdata *dd, int pos) +{ + struct pci_dev *pdev = dd->pcidev; + u16 control; + int ret; + + ret = pci_enable_msi(pdev); + if (ret) + qib_dev_err(dd, "pci_enable_msi failed: %d, " + "interrupts may not work\n", ret); + /* continue even if it fails, we may still be OK... */ + + pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, + &dd->msi_lo); + pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, + &dd->msi_hi); + pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + /* now save the data (vector) info */ + pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT) + ? 12 : 8), + &dd->msi_data); + return ret; +} + +int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, + struct msix_entry *entry) +{ + u16 linkstat, speed; + int pos = 0, pose, ret = 1; + + pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); + if (!pose) { + qib_dev_err(dd, "Can't find PCI Express capability!\n"); + /* set up something... */ + dd->lbus_width = 1; + dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ + goto bail; + } + + pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX); + if (nent && *nent && pos) { + qib_msix_setup(dd, pos, nent, entry); + ret = 0; /* did it, either MSIx or INTx */ + } else { + pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); + if (pos) + ret = qib_msi_setup(dd, pos); + else + qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); + } + if (!pos) + qib_enable_intx(dd->pcidev); + + pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); + /* + * speed is bits 0-3, linkwidth is bits 4-8 + * no defines for them in headers + */ + speed = linkstat & 0xf; + linkstat >>= 4; + linkstat &= 0x1f; + dd->lbus_width = linkstat; + + switch (speed) { + case 1: + dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ + break; + case 2: + dd->lbus_speed = 5000; /* Gen1, 5GHz */ + break; + default: /* not defined, assume gen1 */ + dd->lbus_speed = 2500; + break; + } + + /* + * Check against expected pcie width and complain if "wrong" + * on first initialization, not afterwards (i.e., reset). + */ + if (minw && linkstat < minw) + qib_dev_err(dd, + "PCIe width %u (x%u HCA), performance reduced\n", + linkstat, minw); + + qib_tune_pcie_caps(dd); + + qib_tune_pcie_coalesce(dd); + +bail: + /* fill in string, even on errors */ + snprintf(dd->lbus_info, sizeof(dd->lbus_info), + "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width); + return ret; +} + +/* + * Setup pcie interrupt stuff again after a reset. I'd like to just call + * pci_enable_msi() again for msi, but when I do that, + * the MSI enable bit doesn't get set in the command word, and + * we switch to to a different interrupt vector, which is confusing, + * so I instead just do it all inline. Perhaps somehow can tie this + * into the PCIe hotplug support at some point + */ +int qib_reinit_intr(struct qib_devdata *dd) +{ + int pos; + u16 control; + int ret = 0; + + /* If we aren't using MSI, don't restore it */ + if (!dd->msi_lo) + goto bail; + + pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); + if (!pos) { + qib_dev_err(dd, "Can't find MSI capability, " + "can't restore MSI settings\n"); + ret = 0; + /* nothing special for MSIx, just MSI */ + goto bail; + } + pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, + dd->msi_lo); + pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, + dd->msi_hi); + pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); + if (!(control & PCI_MSI_FLAGS_ENABLE)) { + control |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, + control); + } + /* now rewrite the data (vector) info */ + pci_write_config_word(dd->pcidev, pos + + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), + dd->msi_data); + ret = 1; +bail: + if (!ret && (dd->flags & QIB_HAS_INTX)) { + qib_enable_intx(dd->pcidev); + ret = 1; + } + + /* and now set the pci master bit again */ + pci_set_master(dd->pcidev); + + return ret; +} + +/* + * Disable msi interrupt if enabled, and clear msi_lo. + * This is used primarily for the fallback to INTx, but + * is also used in reinit after reset, and during cleanup. + */ +void qib_nomsi(struct qib_devdata *dd) +{ + dd->msi_lo = 0; + pci_disable_msi(dd->pcidev); +} + +/* + * Same as qib_nosmi, but for MSIx. + */ +void qib_nomsix(struct qib_devdata *dd) +{ + pci_disable_msix(dd->pcidev); +} + +/* + * Similar to pci_intx(pdev, 1), except that we make sure + * msi(x) is off. + */ +void qib_enable_intx(struct pci_dev *pdev) +{ + u16 cw, new; + int pos; + + /* first, turn on INTx */ + pci_read_config_word(pdev, PCI_COMMAND, &cw); + new = cw & ~PCI_COMMAND_INTX_DISABLE; + if (new != cw) + pci_write_config_word(pdev, PCI_COMMAND, new); + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); + if (pos) { + /* then turn off MSI */ + pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); + new = cw & ~PCI_MSI_FLAGS_ENABLE; + if (new != cw) + pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new); + } + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + /* then turn off MSIx */ + pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw); + new = cw & ~PCI_MSIX_FLAGS_ENABLE; + if (new != cw) + pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new); + } +} + +/* + * These two routines are helper routines for the device reset code + * to move all the pcie code out of the chip-specific driver code. + */ +void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline) +{ + pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd); + pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline); + pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); +} + +void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) +{ + int r; + r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, + dd->pcibar0); + if (r) + qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r); + r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, + dd->pcibar1); + if (r) + qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r); + /* now re-enable memory access, and restore cosmetic settings */ + pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd); + pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline); + pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); + r = pci_enable_device(dd->pcidev); + if (r) + qib_dev_err(dd, "pci_enable_device failed after " + "reset: %d\n", r); +} + +/* code to adjust PCIe capabilities. */ + +static int fld2val(int wd, int mask) +{ + int lsbmask; + + if (!mask) + return 0; + wd &= mask; + lsbmask = mask ^ (mask & (mask - 1)); + wd /= lsbmask; + return wd; +} + +static int val2fld(int wd, int mask) +{ + int lsbmask; + + if (!mask) + return 0; + lsbmask = mask ^ (mask & (mask - 1)); + wd *= lsbmask; + return wd; +} + +static int qib_pcie_coalesce; +module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO); +MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets"); + +/* + * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300 + * chipsets. This is known to be unsafe for some revisions of some + * of these chipsets, with some BIOS settings, and enabling it on those + * systems may result in the system crashing, and/or data corruption. + */ +static int qib_tune_pcie_coalesce(struct qib_devdata *dd) +{ + int r; + struct pci_dev *parent; + int ppos; + u16 devid; + u32 mask, bits, val; + + if (!qib_pcie_coalesce) + return 0; + + /* Find out supported and configured values for parent (root) */ + parent = dd->pcidev->bus->self; + if (parent->bus->parent) { + qib_devinfo(dd->pcidev, "Parent not root\n"); + return 1; + } + ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (!ppos) + return 1; + if (parent->vendor != 0x8086) + return 1; + + /* + * - bit 12: Max_rdcmp_Imt_EN: need to set to 1 + * - bit 11: COALESCE_FORCE: need to set to 0 + * - bit 10: COALESCE_EN: need to set to 1 + * (but limitations on some on some chipsets) + * + * On the Intel 5000, 5100, and 7300 chipsets, there is + * also: - bit 25:24: COALESCE_MODE, need to set to 0 + */ + devid = parent->device; + if (devid >= 0x25e2 && devid <= 0x25fa) { + u8 rev; + + /* 5000 P/V/X/Z */ + pci_read_config_byte(parent, PCI_REVISION_ID, &rev); + if (rev <= 0xb2) + bits = 1U << 10; + else + bits = 7U << 10; + mask = (3U << 24) | (7U << 10); + } else if (devid >= 0x65e2 && devid <= 0x65fa) { + /* 5100 */ + bits = 1U << 10; + mask = (3U << 24) | (7U << 10); + } else if (devid >= 0x4021 && devid <= 0x402e) { + /* 5400 */ + bits = 7U << 10; + mask = 7U << 10; + } else if (devid >= 0x3604 && devid <= 0x360a) { + /* 7300 */ + bits = 7U << 10; + mask = (3U << 24) | (7U << 10); + } else { + /* not one of the chipsets that we know about */ + return 1; + } + pci_read_config_dword(parent, 0x48, &val); + val &= ~mask; + val |= bits; + r = pci_write_config_dword(parent, 0x48, val); + return 0; +} + +/* + * BIOS may not set PCIe bus-utilization parameters for best performance. + * Check and optionally adjust them to maximize our throughput. + */ +static int qib_pcie_caps; +module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); +MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)"); + +static int qib_tune_pcie_caps(struct qib_devdata *dd) +{ + int ret = 1; /* Assume the worst */ + struct pci_dev *parent; + int ppos, epos; + u16 pcaps, pctl, ecaps, ectl; + int rc_sup, ep_sup; + int rc_cur, ep_cur; + + /* Find out supported and configured values for parent (root) */ + parent = dd->pcidev->bus->self; + if (parent->bus->parent) { + qib_devinfo(dd->pcidev, "Parent not root\n"); + goto bail; + } + ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); + if (ppos) { + pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps); + pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); + } else + goto bail; + /* Find out supported and configured values for endpoint (us) */ + epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); + if (epos) { + pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); + pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl); + } else + goto bail; + ret = 0; + /* Find max payload supported by root, endpoint */ + rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); + ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD); + if (rc_sup > ep_sup) + rc_sup = ep_sup; + + rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD); + ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD); + + /* If Supported greater than limit in module param, limit it */ + if (rc_sup > (qib_pcie_caps & 7)) + rc_sup = qib_pcie_caps & 7; + /* If less than (allowed, supported), bump root payload */ + if (rc_sup > rc_cur) { + rc_cur = rc_sup; + pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | + val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); + pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); + } + /* If less than (allowed, supported), bump endpoint payload */ + if (rc_sup > ep_cur) { + ep_cur = rc_sup; + ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | + val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); + pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); + } + + /* + * Now the Read Request size. + * No field for max supported, but PCIe spec limits it to 4096, + * which is code '5' (log2(4096) - 7) + */ + rc_sup = 5; + if (rc_sup > ((qib_pcie_caps >> 4) & 7)) + rc_sup = (qib_pcie_caps >> 4) & 7; + rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ); + ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ); + + if (rc_sup > rc_cur) { + rc_cur = rc_sup; + pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | + val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); + pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); + } + if (rc_sup > ep_cur) { + ep_cur = rc_sup; + ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | + val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); + pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); + } +bail: + return ret; +} +/* End of PCIe capability tuning */ + +/* + * From here through qib_pci_err_handler definition is invoked via + * PCI error infrastructure, registered via pci + */ +static pci_ers_result_t +qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct qib_devdata *dd = pci_get_drvdata(pdev); + pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; + + switch (state) { + case pci_channel_io_normal: + qib_devinfo(pdev, "State Normal, ignoring\n"); + break; + + case pci_channel_io_frozen: + qib_devinfo(pdev, "State Frozen, requesting reset\n"); + pci_disable_device(pdev); + ret = PCI_ERS_RESULT_NEED_RESET; + break; + + case pci_channel_io_perm_failure: + qib_devinfo(pdev, "State Permanent Failure, disabling\n"); + if (dd) { + /* no more register accesses! */ + dd->flags &= ~QIB_PRESENT; + qib_disable_after_error(dd); + } + /* else early, or other problem */ + ret = PCI_ERS_RESULT_DISCONNECT; + break; + + default: /* shouldn't happen */ + qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n", + state); + break; + } + return ret; +} + +static pci_ers_result_t +qib_pci_mmio_enabled(struct pci_dev *pdev) +{ + u64 words = 0U; + struct qib_devdata *dd = pci_get_drvdata(pdev); + pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; + + if (dd && dd->pport) { + words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV); + if (words == ~0ULL) + ret = PCI_ERS_RESULT_NEED_RESET; + } + qib_devinfo(pdev, "QIB mmio_enabled function called, " + "read wordscntr %Lx, returning %d\n", words, ret); + return ret; +} + +static pci_ers_result_t +qib_pci_slot_reset(struct pci_dev *pdev) +{ + qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static pci_ers_result_t +qib_pci_link_reset(struct pci_dev *pdev) +{ + qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void +qib_pci_resume(struct pci_dev *pdev) +{ + struct qib_devdata *dd = pci_get_drvdata(pdev); + qib_devinfo(pdev, "QIB resume function called\n"); + pci_cleanup_aer_uncorrect_error_status(pdev); + /* + * Running jobs will fail, since it's asynchronous + * unlike sysfs-requested reset. Better than + * doing nothing. + */ + qib_init(dd, 1); /* same as re-init after reset */ +} + +struct pci_error_handlers qib_pci_err_handler = { + .error_detected = qib_pci_error_detected, + .mmio_enabled = qib_pci_mmio_enabled, + .link_reset = qib_pci_link_reset, + .slot_reset = qib_pci_slot_reset, + .resume = qib_pci_resume, +}; diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c new file mode 100644 index 000000000000..10b8c444dd31 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_pio_copy.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2009 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "qib.h" + +/** + * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits + * @to: destination, in MMIO space (must be 64-bit aligned) + * @from: source (must be 64-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in multiples of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void qib_pio_copy(void __iomem *to, const void *from, size_t count) +{ +#ifdef CONFIG_64BIT + u64 __iomem *dst = to; + const u64 *src = from; + const u64 *end = src + (count >> 1); + + while (src < end) + __raw_writeq(*src++, dst++); + if (count & 1) + __raw_writel(*(const u32 *)src, dst); +#else + u32 __iomem *dst = to; + const u32 *src = from; + const u32 *end = src + count; + + while (src < end) + __raw_writel(*src++, dst++); +#endif +} diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c new file mode 100644 index 000000000000..e0f65e39076b --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -0,0 +1,1255 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/vmalloc.h> + +#include "qib.h" + +#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) +#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) + +static inline unsigned mk_qpn(struct qib_qpn_table *qpt, + struct qpn_map *map, unsigned off) +{ + return (map - qpt->map) * BITS_PER_PAGE + off; +} + +static inline unsigned find_next_offset(struct qib_qpn_table *qpt, + struct qpn_map *map, unsigned off, + unsigned r) +{ + if (qpt->mask) { + off++; + if ((off & qpt->mask) >> 1 != r) + off = ((off & qpt->mask) ? + (off | qpt->mask) + 1 : off) | (r << 1); + } else + off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); + return off; +} + +/* + * Convert the AETH credit code into the number of credits. + */ +static u32 credit_table[31] = { + 0, /* 0 */ + 1, /* 1 */ + 2, /* 2 */ + 3, /* 3 */ + 4, /* 4 */ + 6, /* 5 */ + 8, /* 6 */ + 12, /* 7 */ + 16, /* 8 */ + 24, /* 9 */ + 32, /* A */ + 48, /* B */ + 64, /* C */ + 96, /* D */ + 128, /* E */ + 192, /* F */ + 256, /* 10 */ + 384, /* 11 */ + 512, /* 12 */ + 768, /* 13 */ + 1024, /* 14 */ + 1536, /* 15 */ + 2048, /* 16 */ + 3072, /* 17 */ + 4096, /* 18 */ + 6144, /* 19 */ + 8192, /* 1A */ + 12288, /* 1B */ + 16384, /* 1C */ + 24576, /* 1D */ + 32768 /* 1E */ +}; + +static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map) +{ + unsigned long page = get_zeroed_page(GFP_KERNEL); + + /* + * Free the page if someone raced with us installing it. + */ + + spin_lock(&qpt->lock); + if (map->page) + free_page(page); + else + map->page = (void *)page; + spin_unlock(&qpt->lock); +} + +/* + * Allocate the next available QPN or + * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. + */ +static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, + enum ib_qp_type type, u8 port) +{ + u32 i, offset, max_scan, qpn; + struct qpn_map *map; + u32 ret; + int r; + + if (type == IB_QPT_SMI || type == IB_QPT_GSI) { + unsigned n; + + ret = type == IB_QPT_GSI; + n = 1 << (ret + 2 * (port - 1)); + spin_lock(&qpt->lock); + if (qpt->flags & n) + ret = -EINVAL; + else + qpt->flags |= n; + spin_unlock(&qpt->lock); + goto bail; + } + + r = smp_processor_id(); + if (r >= dd->n_krcv_queues) + r %= dd->n_krcv_queues; + qpn = qpt->last + 1; + if (qpn >= QPN_MAX) + qpn = 2; + if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) + qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | + (r << 1); + offset = qpn & BITS_PER_PAGE_MASK; + map = &qpt->map[qpn / BITS_PER_PAGE]; + max_scan = qpt->nmaps - !offset; + for (i = 0;;) { + if (unlikely(!map->page)) { + get_map_page(qpt, map); + if (unlikely(!map->page)) + break; + } + do { + if (!test_and_set_bit(offset, map->page)) { + qpt->last = qpn; + ret = qpn; + goto bail; + } + offset = find_next_offset(qpt, map, offset, r); + qpn = mk_qpn(qpt, map, offset); + /* + * This test differs from alloc_pidmap(). + * If find_next_offset() does find a zero + * bit, we don't need to check for QPN + * wrapping around past our starting QPN. + * We just need to be sure we don't loop + * forever. + */ + } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); + /* + * In order to keep the number of pages allocated to a + * minimum, we scan the all existing pages before increasing + * the size of the bitmap table. + */ + if (++i > max_scan) { + if (qpt->nmaps == QPNMAP_ENTRIES) + break; + map = &qpt->map[qpt->nmaps++]; + offset = qpt->mask ? (r << 1) : 0; + } else if (map < &qpt->map[qpt->nmaps]) { + ++map; + offset = qpt->mask ? (r << 1) : 0; + } else { + map = &qpt->map[0]; + offset = qpt->mask ? (r << 1) : 2; + } + qpn = mk_qpn(qpt, map, offset); + } + + ret = -ENOMEM; + +bail: + return ret; +} + +static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) +{ + struct qpn_map *map; + + map = qpt->map + qpn / BITS_PER_PAGE; + if (map->page) + clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); +} + +/* + * Put the QP into the hash table. + * The hash table holds a reference to the QP. + */ +static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) +{ + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + unsigned n = qp->ibqp.qp_num % dev->qp_table_size; + unsigned long flags; + + spin_lock_irqsave(&dev->qpt_lock, flags); + + if (qp->ibqp.qp_num == 0) + ibp->qp0 = qp; + else if (qp->ibqp.qp_num == 1) + ibp->qp1 = qp; + else { + qp->next = dev->qp_table[n]; + dev->qp_table[n] = qp; + } + atomic_inc(&qp->refcount); + + spin_unlock_irqrestore(&dev->qpt_lock, flags); +} + +/* + * Remove the QP from the table so it can't be found asynchronously by + * the receive interrupt routine. + */ +static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) +{ + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct qib_qp *q, **qpp; + unsigned long flags; + + qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; + + spin_lock_irqsave(&dev->qpt_lock, flags); + + if (ibp->qp0 == qp) { + ibp->qp0 = NULL; + atomic_dec(&qp->refcount); + } else if (ibp->qp1 == qp) { + ibp->qp1 = NULL; + atomic_dec(&qp->refcount); + } else + for (; (q = *qpp) != NULL; qpp = &q->next) + if (q == qp) { + *qpp = qp->next; + qp->next = NULL; + atomic_dec(&qp->refcount); + break; + } + + spin_unlock_irqrestore(&dev->qpt_lock, flags); +} + +/** + * qib_free_all_qps - check for QPs still in use + * @qpt: the QP table to empty + * + * There should not be any QPs still in use. + * Free memory for table. + */ +unsigned qib_free_all_qps(struct qib_devdata *dd) +{ + struct qib_ibdev *dev = &dd->verbs_dev; + unsigned long flags; + struct qib_qp *qp; + unsigned n, qp_inuse = 0; + + for (n = 0; n < dd->num_pports; n++) { + struct qib_ibport *ibp = &dd->pport[n].ibport_data; + + if (!qib_mcast_tree_empty(ibp)) + qp_inuse++; + if (ibp->qp0) + qp_inuse++; + if (ibp->qp1) + qp_inuse++; + } + + spin_lock_irqsave(&dev->qpt_lock, flags); + for (n = 0; n < dev->qp_table_size; n++) { + qp = dev->qp_table[n]; + dev->qp_table[n] = NULL; + + for (; qp; qp = qp->next) + qp_inuse++; + } + spin_unlock_irqrestore(&dev->qpt_lock, flags); + + return qp_inuse; +} + +/** + * qib_lookup_qpn - return the QP with the given QPN + * @qpt: the QP table + * @qpn: the QP number to look up + * + * The caller is responsible for decrementing the QP reference count + * when done. + */ +struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) +{ + struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; + unsigned long flags; + struct qib_qp *qp; + + spin_lock_irqsave(&dev->qpt_lock, flags); + + if (qpn == 0) + qp = ibp->qp0; + else if (qpn == 1) + qp = ibp->qp1; + else + for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; + qp = qp->next) + if (qp->ibqp.qp_num == qpn) + break; + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock_irqrestore(&dev->qpt_lock, flags); + return qp; +} + +/** + * qib_reset_qp - initialize the QP state to the reset state + * @qp: the QP to reset + * @type: the QP type + */ +static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) +{ + qp->remote_qpn = 0; + qp->qkey = 0; + qp->qp_access_flags = 0; + atomic_set(&qp->s_dma_busy, 0); + qp->s_flags &= QIB_S_SIGNAL_REQ_WR; + qp->s_hdrwords = 0; + qp->s_wqe = NULL; + qp->s_draining = 0; + qp->s_next_psn = 0; + qp->s_last_psn = 0; + qp->s_sending_psn = 0; + qp->s_sending_hpsn = 0; + qp->s_psn = 0; + qp->r_psn = 0; + qp->r_msn = 0; + if (type == IB_QPT_RC) { + qp->s_state = IB_OPCODE_RC_SEND_LAST; + qp->r_state = IB_OPCODE_RC_SEND_LAST; + } else { + qp->s_state = IB_OPCODE_UC_SEND_LAST; + qp->r_state = IB_OPCODE_UC_SEND_LAST; + } + qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; + qp->r_nak_state = 0; + qp->r_aflags = 0; + qp->r_flags = 0; + qp->s_head = 0; + qp->s_tail = 0; + qp->s_cur = 0; + qp->s_acked = 0; + qp->s_last = 0; + qp->s_ssn = 1; + qp->s_lsn = 0; + qp->s_mig_state = IB_MIG_MIGRATED; + memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); + qp->r_head_ack_queue = 0; + qp->s_tail_ack_queue = 0; + qp->s_num_rd_atomic = 0; + if (qp->r_rq.wq) { + qp->r_rq.wq->head = 0; + qp->r_rq.wq->tail = 0; + } + qp->r_sge.num_sge = 0; +} + +static void clear_mr_refs(struct qib_qp *qp, int clr_sends) +{ + unsigned n; + + if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) + while (qp->s_rdma_read_sge.num_sge) { + atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); + if (--qp->s_rdma_read_sge.num_sge) + qp->s_rdma_read_sge.sge = + *qp->s_rdma_read_sge.sg_list++; + } + + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + + if (clr_sends) { + while (qp->s_last != qp->s_head) { + struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); + unsigned i; + + for (i = 0; i < wqe->wr.num_sge; i++) { + struct qib_sge *sge = &wqe->sg_list[i]; + + atomic_dec(&sge->mr->refcount); + } + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->ibqp.qp_type == IB_QPT_SMI || + qp->ibqp.qp_type == IB_QPT_GSI) + atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); + if (++qp->s_last >= qp->s_size) + qp->s_last = 0; + } + if (qp->s_rdma_mr) { + atomic_dec(&qp->s_rdma_mr->refcount); + qp->s_rdma_mr = NULL; + } + } + + if (qp->ibqp.qp_type != IB_QPT_RC) + return; + + for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { + struct qib_ack_entry *e = &qp->s_ack_queue[n]; + + if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && + e->rdma_sge.mr) { + atomic_dec(&e->rdma_sge.mr->refcount); + e->rdma_sge.mr = NULL; + } + } +} + +/** + * qib_error_qp - put a QP into the error state + * @qp: the QP to put into the error state + * @err: the receive completion error to signal if a RWQE is active + * + * Flushes both send and receive work queues. + * Returns true if last WQE event should be generated. + * The QP s_lock should be held and interrupts disabled. + * If we are already in error state, just return. + */ +int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) +{ + struct qib_ibdev *dev = to_idev(qp->ibqp.device); + struct ib_wc wc; + int ret = 0; + + if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) + goto bail; + + qp->state = IB_QPS_ERR; + + if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { + qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + del_timer(&qp->s_timer); + } + spin_lock(&dev->pending_lock); + if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { + qp->s_flags &= ~QIB_S_ANY_WAIT_IO; + list_del_init(&qp->iowait); + } + spin_unlock(&dev->pending_lock); + + if (!(qp->s_flags & QIB_S_BUSY)) { + qp->s_hdrwords = 0; + if (qp->s_rdma_mr) { + atomic_dec(&qp->s_rdma_mr->refcount); + qp->s_rdma_mr = NULL; + } + if (qp->s_tx) { + qib_put_txreq(qp->s_tx); + qp->s_tx = NULL; + } + } + + /* Schedule the sending tasklet to drain the send work queue. */ + if (qp->s_last != qp->s_head) + qib_schedule_send(qp); + + clear_mr_refs(qp, 0); + + memset(&wc, 0, sizeof(wc)); + wc.qp = &qp->ibqp; + wc.opcode = IB_WC_RECV; + + if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { + wc.wr_id = qp->r_wr_id; + wc.status = err; + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + } + wc.status = IB_WC_WR_FLUSH_ERR; + + if (qp->r_rq.wq) { + struct qib_rwq *wq; + u32 head; + u32 tail; + + spin_lock(&qp->r_rq.lock); + + /* sanity check pointers before trusting them */ + wq = qp->r_rq.wq; + head = wq->head; + if (head >= qp->r_rq.size) + head = 0; + tail = wq->tail; + if (tail >= qp->r_rq.size) + tail = 0; + while (tail != head) { + wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; + if (++tail >= qp->r_rq.size) + tail = 0; + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + } + wq->tail = tail; + + spin_unlock(&qp->r_rq.lock); + } else if (qp->ibqp.event_handler) + ret = 1; + +bail: + return ret; +} + +/** + * qib_modify_qp - modify the attributes of a queue pair + * @ibqp: the queue pair who's attributes we're modifying + * @attr: the new attributes + * @attr_mask: the mask of attributes to modify + * @udata: user data for libibverbs.so + * + * Returns 0 on success, otherwise returns an errno. + */ +int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_qp *qp = to_iqp(ibqp); + enum ib_qp_state cur_state, new_state; + struct ib_event ev; + int lastwqe = 0; + int mig = 0; + int ret; + u32 pmtu = 0; /* for gcc warning only */ + + spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_lock); + + cur_state = attr_mask & IB_QP_CUR_STATE ? + attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, + attr_mask)) + goto inval; + + if (attr_mask & IB_QP_AV) { + if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE) + goto inval; + if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) + goto inval; + } + + if (attr_mask & IB_QP_ALT_PATH) { + if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE) + goto inval; + if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) + goto inval; + if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) + goto inval; + } + + if (attr_mask & IB_QP_PKEY_INDEX) + if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) + goto inval; + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + if (attr->min_rnr_timer > 31) + goto inval; + + if (attr_mask & IB_QP_PORT) + if (qp->ibqp.qp_type == IB_QPT_SMI || + qp->ibqp.qp_type == IB_QPT_GSI || + attr->port_num == 0 || + attr->port_num > ibqp->device->phys_port_cnt) + goto inval; + + if (attr_mask & IB_QP_DEST_QPN) + if (attr->dest_qp_num > QIB_QPN_MASK) + goto inval; + + if (attr_mask & IB_QP_RETRY_CNT) + if (attr->retry_cnt > 7) + goto inval; + + if (attr_mask & IB_QP_RNR_RETRY) + if (attr->rnr_retry > 7) + goto inval; + + /* + * Don't allow invalid path_mtu values. OK to set greater + * than the active mtu (or even the max_cap, if we have tuned + * that to a small mtu. We'll set qp->path_mtu + * to the lesser of requested attribute mtu and active, + * for packetizing messages. + * Note that the QP port has to be set in INIT and MTU in RTR. + */ + if (attr_mask & IB_QP_PATH_MTU) { + struct qib_devdata *dd = dd_from_dev(dev); + int mtu, pidx = qp->port_num - 1; + + mtu = ib_mtu_enum_to_int(attr->path_mtu); + if (mtu == -1) + goto inval; + if (mtu > dd->pport[pidx].ibmtu) { + switch (dd->pport[pidx].ibmtu) { + case 4096: + pmtu = IB_MTU_4096; + break; + case 2048: + pmtu = IB_MTU_2048; + break; + case 1024: + pmtu = IB_MTU_1024; + break; + case 512: + pmtu = IB_MTU_512; + break; + case 256: + pmtu = IB_MTU_256; + break; + default: + pmtu = IB_MTU_2048; + } + } else + pmtu = attr->path_mtu; + } + + if (attr_mask & IB_QP_PATH_MIG_STATE) { + if (attr->path_mig_state == IB_MIG_REARM) { + if (qp->s_mig_state == IB_MIG_ARMED) + goto inval; + if (new_state != IB_QPS_RTS) + goto inval; + } else if (attr->path_mig_state == IB_MIG_MIGRATED) { + if (qp->s_mig_state == IB_MIG_REARM) + goto inval; + if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) + goto inval; + if (qp->s_mig_state == IB_MIG_ARMED) + mig = 1; + } else + goto inval; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) + goto inval; + + switch (new_state) { + case IB_QPS_RESET: + if (qp->state != IB_QPS_RESET) { + qp->state = IB_QPS_RESET; + spin_lock(&dev->pending_lock); + if (!list_empty(&qp->iowait)) + list_del_init(&qp->iowait); + spin_unlock(&dev->pending_lock); + qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); + spin_unlock(&qp->s_lock); + spin_unlock_irq(&qp->r_lock); + /* Stop the sending work queue and retry timer */ + cancel_work_sync(&qp->s_work); + del_timer_sync(&qp->s_timer); + wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); + if (qp->s_tx) { + qib_put_txreq(qp->s_tx); + qp->s_tx = NULL; + } + remove_qp(dev, qp); + wait_event(qp->wait, !atomic_read(&qp->refcount)); + spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_lock); + clear_mr_refs(qp, 1); + qib_reset_qp(qp, ibqp->qp_type); + } + break; + + case IB_QPS_RTR: + /* Allow event to retrigger if QP set to RTR more than once */ + qp->r_flags &= ~QIB_R_COMM_EST; + qp->state = new_state; + break; + + case IB_QPS_SQD: + qp->s_draining = qp->s_last != qp->s_cur; + qp->state = new_state; + break; + + case IB_QPS_SQE: + if (qp->ibqp.qp_type == IB_QPT_RC) + goto inval; + qp->state = new_state; + break; + + case IB_QPS_ERR: + lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + break; + + default: + qp->state = new_state; + break; + } + + if (attr_mask & IB_QP_PKEY_INDEX) + qp->s_pkey_index = attr->pkey_index; + + if (attr_mask & IB_QP_PORT) + qp->port_num = attr->port_num; + + if (attr_mask & IB_QP_DEST_QPN) + qp->remote_qpn = attr->dest_qp_num; + + if (attr_mask & IB_QP_SQ_PSN) { + qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; + qp->s_psn = qp->s_next_psn; + qp->s_sending_psn = qp->s_next_psn; + qp->s_last_psn = qp->s_next_psn - 1; + qp->s_sending_hpsn = qp->s_last_psn; + } + + if (attr_mask & IB_QP_RQ_PSN) + qp->r_psn = attr->rq_psn & QIB_PSN_MASK; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->qp_access_flags = attr->qp_access_flags; + + if (attr_mask & IB_QP_AV) { + qp->remote_ah_attr = attr->ah_attr; + qp->s_srate = attr->ah_attr.static_rate; + } + + if (attr_mask & IB_QP_ALT_PATH) { + qp->alt_ah_attr = attr->alt_ah_attr; + qp->s_alt_pkey_index = attr->alt_pkey_index; + } + + if (attr_mask & IB_QP_PATH_MIG_STATE) { + qp->s_mig_state = attr->path_mig_state; + if (mig) { + qp->remote_ah_attr = qp->alt_ah_attr; + qp->port_num = qp->alt_ah_attr.port_num; + qp->s_pkey_index = qp->s_alt_pkey_index; + } + } + + if (attr_mask & IB_QP_PATH_MTU) + qp->path_mtu = pmtu; + + if (attr_mask & IB_QP_RETRY_CNT) { + qp->s_retry_cnt = attr->retry_cnt; + qp->s_retry = attr->retry_cnt; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + qp->s_rnr_retry_cnt = attr->rnr_retry; + qp->s_rnr_retry = attr->rnr_retry; + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + qp->r_min_rnr_timer = attr->min_rnr_timer; + + if (attr_mask & IB_QP_TIMEOUT) + qp->timeout = attr->timeout; + + if (attr_mask & IB_QP_QKEY) + qp->qkey = attr->qkey; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->r_max_rd_atomic = attr->max_dest_rd_atomic; + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) + qp->s_max_rd_atomic = attr->max_rd_atomic; + + spin_unlock(&qp->s_lock); + spin_unlock_irq(&qp->r_lock); + + if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + insert_qp(dev, qp); + + if (lastwqe) { + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_QP_LAST_WQE_REACHED; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } + if (mig) { + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_PATH_MIG; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } + ret = 0; + goto bail; + +inval: + spin_unlock(&qp->s_lock); + spin_unlock_irq(&qp->r_lock); + ret = -EINVAL; + +bail: + return ret; +} + +int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_qp_init_attr *init_attr) +{ + struct qib_qp *qp = to_iqp(ibqp); + + attr->qp_state = qp->state; + attr->cur_qp_state = attr->qp_state; + attr->path_mtu = qp->path_mtu; + attr->path_mig_state = qp->s_mig_state; + attr->qkey = qp->qkey; + attr->rq_psn = qp->r_psn & QIB_PSN_MASK; + attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; + attr->dest_qp_num = qp->remote_qpn; + attr->qp_access_flags = qp->qp_access_flags; + attr->cap.max_send_wr = qp->s_size - 1; + attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; + attr->cap.max_send_sge = qp->s_max_sge; + attr->cap.max_recv_sge = qp->r_rq.max_sge; + attr->cap.max_inline_data = 0; + attr->ah_attr = qp->remote_ah_attr; + attr->alt_ah_attr = qp->alt_ah_attr; + attr->pkey_index = qp->s_pkey_index; + attr->alt_pkey_index = qp->s_alt_pkey_index; + attr->en_sqd_async_notify = 0; + attr->sq_draining = qp->s_draining; + attr->max_rd_atomic = qp->s_max_rd_atomic; + attr->max_dest_rd_atomic = qp->r_max_rd_atomic; + attr->min_rnr_timer = qp->r_min_rnr_timer; + attr->port_num = qp->port_num; + attr->timeout = qp->timeout; + attr->retry_cnt = qp->s_retry_cnt; + attr->rnr_retry = qp->s_rnr_retry_cnt; + attr->alt_port_num = qp->alt_ah_attr.port_num; + attr->alt_timeout = qp->alt_timeout; + + init_attr->event_handler = qp->ibqp.event_handler; + init_attr->qp_context = qp->ibqp.qp_context; + init_attr->send_cq = qp->ibqp.send_cq; + init_attr->recv_cq = qp->ibqp.recv_cq; + init_attr->srq = qp->ibqp.srq; + init_attr->cap = attr->cap; + if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) + init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; + else + init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; + init_attr->qp_type = qp->ibqp.qp_type; + init_attr->port_num = qp->port_num; + return 0; +} + +/** + * qib_compute_aeth - compute the AETH (syndrome + MSN) + * @qp: the queue pair to compute the AETH for + * + * Returns the AETH. + */ +__be32 qib_compute_aeth(struct qib_qp *qp) +{ + u32 aeth = qp->r_msn & QIB_MSN_MASK; + + if (qp->ibqp.srq) { + /* + * Shared receive queues don't generate credits. + * Set the credit field to the invalid value. + */ + aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; + } else { + u32 min, max, x; + u32 credits; + struct qib_rwq *wq = qp->r_rq.wq; + u32 head; + u32 tail; + + /* sanity check pointers before trusting them */ + head = wq->head; + if (head >= qp->r_rq.size) + head = 0; + tail = wq->tail; + if (tail >= qp->r_rq.size) + tail = 0; + /* + * Compute the number of credits available (RWQEs). + * XXX Not holding the r_rq.lock here so there is a small + * chance that the pair of reads are not atomic. + */ + credits = head - tail; + if ((int)credits < 0) + credits += qp->r_rq.size; + /* + * Binary search the credit table to find the code to + * use. + */ + min = 0; + max = 31; + for (;;) { + x = (min + max) / 2; + if (credit_table[x] == credits) + break; + if (credit_table[x] > credits) + max = x; + else if (min == x) + break; + else + min = x; + } + aeth |= x << QIB_AETH_CREDIT_SHIFT; + } + return cpu_to_be32(aeth); +} + +/** + * qib_create_qp - create a queue pair for a device + * @ibpd: the protection domain who's device we create the queue pair for + * @init_attr: the attributes of the queue pair + * @udata: user data for libibverbs.so + * + * Returns the queue pair on success, otherwise returns an errno. + * + * Called by the ib_create_qp() core verbs function. + */ +struct ib_qp *qib_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct qib_qp *qp; + int err; + struct qib_swqe *swq = NULL; + struct qib_ibdev *dev; + struct qib_devdata *dd; + size_t sz; + size_t sg_list_sz; + struct ib_qp *ret; + + if (init_attr->cap.max_send_sge > ib_qib_max_sges || + init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + + /* Check receive queue parameters if no SRQ is specified. */ + if (!init_attr->srq) { + if (init_attr->cap.max_recv_sge > ib_qib_max_sges || + init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + if (init_attr->cap.max_send_sge + + init_attr->cap.max_send_wr + + init_attr->cap.max_recv_sge + + init_attr->cap.max_recv_wr == 0) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + } + + switch (init_attr->qp_type) { + case IB_QPT_SMI: + case IB_QPT_GSI: + if (init_attr->port_num == 0 || + init_attr->port_num > ibpd->device->phys_port_cnt) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + case IB_QPT_UC: + case IB_QPT_RC: + case IB_QPT_UD: + sz = sizeof(struct qib_sge) * + init_attr->cap.max_send_sge + + sizeof(struct qib_swqe); + swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); + if (swq == NULL) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + sz = sizeof(*qp); + sg_list_sz = 0; + if (init_attr->srq) { + struct qib_srq *srq = to_isrq(init_attr->srq); + + if (srq->rq.max_sge > 1) + sg_list_sz = sizeof(*qp->r_sg_list) * + (srq->rq.max_sge - 1); + } else if (init_attr->cap.max_recv_sge > 1) + sg_list_sz = sizeof(*qp->r_sg_list) * + (init_attr->cap.max_recv_sge - 1); + qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); + if (!qp) { + ret = ERR_PTR(-ENOMEM); + goto bail_swq; + } + if (init_attr->srq) + sz = 0; + else { + qp->r_rq.size = init_attr->cap.max_recv_wr + 1; + qp->r_rq.max_sge = init_attr->cap.max_recv_sge; + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + + sizeof(struct qib_rwqe); + qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) + + qp->r_rq.size * sz); + if (!qp->r_rq.wq) { + ret = ERR_PTR(-ENOMEM); + goto bail_qp; + } + } + + /* + * ib_create_qp() will initialize qp->ibqp + * except for qp->ibqp.qp_num. + */ + spin_lock_init(&qp->r_lock); + spin_lock_init(&qp->s_lock); + spin_lock_init(&qp->r_rq.lock); + atomic_set(&qp->refcount, 0); + init_waitqueue_head(&qp->wait); + init_waitqueue_head(&qp->wait_dma); + init_timer(&qp->s_timer); + qp->s_timer.data = (unsigned long)qp; + INIT_WORK(&qp->s_work, qib_do_send); + INIT_LIST_HEAD(&qp->iowait); + INIT_LIST_HEAD(&qp->rspwait); + qp->state = IB_QPS_RESET; + qp->s_wq = swq; + qp->s_size = init_attr->cap.max_send_wr + 1; + qp->s_max_sge = init_attr->cap.max_send_sge; + if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) + qp->s_flags = QIB_S_SIGNAL_REQ_WR; + dev = to_idev(ibpd->device); + dd = dd_from_dev(dev); + err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, + init_attr->port_num); + if (err < 0) { + ret = ERR_PTR(err); + vfree(qp->r_rq.wq); + goto bail_qp; + } + qp->ibqp.qp_num = err; + qp->port_num = init_attr->port_num; + qp->processor_id = smp_processor_id(); + qib_reset_qp(qp, init_attr->qp_type); + break; + + default: + /* Don't support raw QPs */ + ret = ERR_PTR(-ENOSYS); + goto bail; + } + + init_attr->cap.max_inline_data = 0; + + /* + * Return the address of the RWQ as the offset to mmap. + * See qib_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + if (!qp->r_rq.wq) { + __u64 offset = 0; + + err = ib_copy_to_udata(udata, &offset, + sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } + } else { + u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; + + qp->ip = qib_create_mmap_info(dev, s, + ibpd->uobject->context, + qp->r_rq.wq); + if (!qp->ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_ip; + } + + err = ib_copy_to_udata(udata, &(qp->ip->offset), + sizeof(qp->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } + } + } + + spin_lock(&dev->n_qps_lock); + if (dev->n_qps_allocated == ib_qib_max_qps) { + spin_unlock(&dev->n_qps_lock); + ret = ERR_PTR(-ENOMEM); + goto bail_ip; + } + + dev->n_qps_allocated++; + spin_unlock(&dev->n_qps_lock); + + if (qp->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + + ret = &qp->ibqp; + goto bail; + +bail_ip: + if (qp->ip) + kref_put(&qp->ip->ref, qib_release_mmap_info); + else + vfree(qp->r_rq.wq); + free_qpn(&dev->qpn_table, qp->ibqp.qp_num); +bail_qp: + kfree(qp); +bail_swq: + vfree(swq); +bail: + return ret; +} + +/** + * qib_destroy_qp - destroy a queue pair + * @ibqp: the queue pair to destroy + * + * Returns 0 on success. + * + * Note that this can be called while the QP is actively sending or + * receiving! + */ +int qib_destroy_qp(struct ib_qp *ibqp) +{ + struct qib_qp *qp = to_iqp(ibqp); + struct qib_ibdev *dev = to_idev(ibqp->device); + + /* Make sure HW and driver activity is stopped. */ + spin_lock_irq(&qp->s_lock); + if (qp->state != IB_QPS_RESET) { + qp->state = IB_QPS_RESET; + spin_lock(&dev->pending_lock); + if (!list_empty(&qp->iowait)) + list_del_init(&qp->iowait); + spin_unlock(&dev->pending_lock); + qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); + spin_unlock_irq(&qp->s_lock); + cancel_work_sync(&qp->s_work); + del_timer_sync(&qp->s_timer); + wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); + if (qp->s_tx) { + qib_put_txreq(qp->s_tx); + qp->s_tx = NULL; + } + remove_qp(dev, qp); + wait_event(qp->wait, !atomic_read(&qp->refcount)); + clear_mr_refs(qp, 1); + } else + spin_unlock_irq(&qp->s_lock); + + /* all user's cleaned up, mark it available */ + free_qpn(&dev->qpn_table, qp->ibqp.qp_num); + spin_lock(&dev->n_qps_lock); + dev->n_qps_allocated--; + spin_unlock(&dev->n_qps_lock); + + if (qp->ip) + kref_put(&qp->ip->ref, qib_release_mmap_info); + else + vfree(qp->r_rq.wq); + vfree(qp->s_wq); + kfree(qp); + return 0; +} + +/** + * qib_init_qpn_table - initialize the QP number table for a device + * @qpt: the QPN table + */ +void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) +{ + spin_lock_init(&qpt->lock); + qpt->last = 1; /* start with QPN 2 */ + qpt->nmaps = 1; + qpt->mask = dd->qpn_mask; +} + +/** + * qib_free_qpn_table - free the QP number table for a device + * @qpt: the QPN table + */ +void qib_free_qpn_table(struct qib_qpn_table *qpt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qpt->map); i++) + if (qpt->map[i].page) + free_page((unsigned long) qpt->map[i].page); +} + +/** + * qib_get_credit - flush the send work queue of a QP + * @qp: the qp who's send work queue to flush + * @aeth: the Acknowledge Extended Transport Header + * + * The QP s_lock should be held. + */ +void qib_get_credit(struct qib_qp *qp, u32 aeth) +{ + u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; + + /* + * If the credit is invalid, we can send + * as many packets as we like. Otherwise, we have to + * honor the credit field. + */ + if (credit == QIB_AETH_CREDIT_INVAL) { + if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { + qp->s_flags |= QIB_S_UNLIMITED_CREDIT; + if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; + qib_schedule_send(qp); + } + } + } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { + /* Compute new LSN (i.e., MSN + credit) */ + credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; + if (qib_cmp24(credit, qp->s_lsn) > 0) { + qp->s_lsn = credit; + if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; + qib_schedule_send(qp); + } + } + } +} diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c new file mode 100644 index 000000000000..35b3604b691d --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -0,0 +1,564 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> + +#include "qib.h" +#include "qib_qsfp.h" + +/* + * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver + * in qib_twsi.c + */ +#define QSFP_MAX_RETRY 4 + +static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) +{ + struct qib_devdata *dd = ppd->dd; + u32 out, mask; + int ret, cnt, pass = 0; + int stuck = 0; + u8 *buff = bp; + + ret = mutex_lock_interruptible(&dd->eep_lock); + if (ret) + goto no_unlock; + + if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) { + ret = -ENXIO; + goto bail; + } + + /* + * We presume, if we are called at all, that this board has + * QSFP. This is on the same i2c chain as the legacy parts, + * but only responds if the module is selected via GPIO pins. + * Further, there are very long setup and hold requirements + * on MODSEL. + */ + mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; + out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; + if (ppd->hw_pidx) { + mask <<= QSFP_GPIO_PORT2_SHIFT; + out <<= QSFP_GPIO_PORT2_SHIFT; + } + + dd->f_gpio_mod(dd, out, mask, mask); + + /* + * Module could take up to 2 Msec to respond to MOD_SEL, and there + * is no way to tell if it is ready, so we must wait. + */ + msleep(2); + + /* Make sure TWSI bus is in sane state. */ + ret = qib_twsi_reset(dd); + if (ret) { + qib_dev_porterr(dd, ppd->port, + "QSFP interface Reset for read failed\n"); + ret = -EIO; + stuck = 1; + goto deselect; + } + + /* All QSFP modules are at A0 */ + + cnt = 0; + while (cnt < len) { + unsigned in_page; + int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; + if ((in_page + wlen) > QSFP_PAGESIZE) + wlen = QSFP_PAGESIZE - in_page; + ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen); + /* Some QSFP's fail first try. Retry as experiment */ + if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY) + continue; + if (ret) { + /* qib_twsi_blk_rd() 1 for error, else 0 */ + ret = -EIO; + goto deselect; + } + addr += wlen; + cnt += wlen; + } + ret = cnt; + +deselect: + /* + * Module could take up to 10 uSec after transfer before + * ready to respond to MOD_SEL negation, and there is no way + * to tell if it is ready, so we must wait. + */ + udelay(10); + /* set QSFP MODSEL, RST. LP all high */ + dd->f_gpio_mod(dd, mask, mask, mask); + + /* + * Module could take up to 2 Msec to respond to MOD_SEL + * going away, and there is no way to tell if it is ready. + * so we must wait. + */ + if (stuck) + qib_dev_err(dd, "QSFP interface bus stuck non-idle\n"); + + if (pass >= QSFP_MAX_RETRY && ret) + qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n"); + else if (pass) + qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); + + msleep(2); + +bail: + mutex_unlock(&dd->eep_lock); + +no_unlock: + return ret; +} + +/* + * qsfp_write + * We do not ordinarily write the QSFP, but this is needed to select + * the page on non-flat QSFPs, and possibly later unusual cases + */ +static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, + int len) +{ + struct qib_devdata *dd = ppd->dd; + u32 out, mask; + int ret, cnt; + u8 *buff = bp; + + ret = mutex_lock_interruptible(&dd->eep_lock); + if (ret) + goto no_unlock; + + if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) { + ret = -ENXIO; + goto bail; + } + + /* + * We presume, if we are called at all, that this board has + * QSFP. This is on the same i2c chain as the legacy parts, + * but only responds if the module is selected via GPIO pins. + * Further, there are very long setup and hold requirements + * on MODSEL. + */ + mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; + out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; + if (ppd->hw_pidx) { + mask <<= QSFP_GPIO_PORT2_SHIFT; + out <<= QSFP_GPIO_PORT2_SHIFT; + } + dd->f_gpio_mod(dd, out, mask, mask); + + /* + * Module could take up to 2 Msec to respond to MOD_SEL, + * and there is no way to tell if it is ready, so we must wait. + */ + msleep(2); + + /* Make sure TWSI bus is in sane state. */ + ret = qib_twsi_reset(dd); + if (ret) { + qib_dev_porterr(dd, ppd->port, + "QSFP interface Reset for write failed\n"); + ret = -EIO; + goto deselect; + } + + /* All QSFP modules are at A0 */ + + cnt = 0; + while (cnt < len) { + unsigned in_page; + int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; + if ((in_page + wlen) > QSFP_PAGESIZE) + wlen = QSFP_PAGESIZE - in_page; + ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen); + if (ret) { + /* qib_twsi_blk_wr() 1 for error, else 0 */ + ret = -EIO; + goto deselect; + } + addr += wlen; + cnt += wlen; + } + ret = cnt; + +deselect: + /* + * Module could take up to 10 uSec after transfer before + * ready to respond to MOD_SEL negation, and there is no way + * to tell if it is ready, so we must wait. + */ + udelay(10); + /* set QSFP MODSEL, RST, LP high */ + dd->f_gpio_mod(dd, mask, mask, mask); + /* + * Module could take up to 2 Msec to respond to MOD_SEL + * going away, and there is no way to tell if it is ready. + * so we must wait. + */ + msleep(2); + +bail: + mutex_unlock(&dd->eep_lock); + +no_unlock: + return ret; +} + +/* + * For validation, we want to check the checksums, even of the + * fields we do not otherwise use. This function reads the bytes from + * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors + */ +static int qsfp_cks(struct qib_pportdata *ppd, int first, int next) +{ + int ret; + u16 cks; + u8 bval; + + cks = 0; + while (first < next) { + ret = qsfp_read(ppd, first, &bval, 1); + if (ret < 0) + goto bail; + cks += bval; + ++first; + } + ret = cks & 0xFF; +bail: + return ret; + +} + +int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) +{ + int ret; + int idx; + u16 cks; + u32 mask; + u8 peek[4]; + + /* ensure sane contents on invalid reads, for cable swaps */ + memset(cp, 0, sizeof(*cp)); + + mask = QSFP_GPIO_MOD_PRS_N; + if (ppd->hw_pidx) + mask <<= QSFP_GPIO_PORT2_SHIFT; + + ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); + if (ret & mask) { + ret = -ENODEV; + goto bail; + } + + ret = qsfp_read(ppd, 0, peek, 3); + if (ret < 0) + goto bail; + if ((peek[0] & 0xFE) != 0x0C) + qib_dev_porterr(ppd->dd, ppd->port, + "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); + + if ((peek[2] & 2) == 0) { + /* + * If cable is paged, rather than "flat memory", we need to + * set the page to zero, Even if it already appears to be zero. + */ + u8 poke = 0; + ret = qib_qsfp_write(ppd, 127, &poke, 1); + udelay(50); + if (ret != 1) { + qib_dev_porterr(ppd->dd, ppd->port, + "Failed QSFP Page set\n"); + goto bail; + } + } + + ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1); + if (ret < 0) + goto bail; + if ((cp->id & 0xFE) != 0x0C) + qib_dev_porterr(ppd->dd, ppd->port, + "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id); + cks = cp->id; + + ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1); + if (ret < 0) + goto bail; + cks += cp->pwr; + + ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS); + if (ret < 0) + goto bail; + cks += ret; + + ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1); + if (ret < 0) + goto bail; + cks += cp->len; + + ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1); + if (ret < 0) + goto bail; + cks += cp->tech; + + ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_VEND_LEN; ++idx) + cks += cp->vendor[idx]; + + ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1); + if (ret < 0) + goto bail; + cks += cp->xt_xcv; + + ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_VOUI_LEN; ++idx) + cks += cp->oui[idx]; + + ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_PN_LEN; ++idx) + cks += cp->partnum[idx]; + + ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_REV_LEN; ++idx) + cks += cp->rev[idx]; + + ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx) + cks += cp->atten[idx]; + + ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS); + if (ret < 0) + goto bail; + cks += ret; + + cks &= 0xFF; + ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1); + if (ret < 0) + goto bail; + if (cks != cp->cks1) + qib_dev_porterr(ppd->dd, ppd->port, + "QSFP cks1 is %02X, computed %02X\n", cp->cks1, + cks); + + /* Second checksum covers 192 to (serial, date, lot) */ + ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS); + if (ret < 0) + goto bail; + cks = ret; + + ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_SN_LEN; ++idx) + cks += cp->serial[idx]; + + ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_DATE_LEN; ++idx) + cks += cp->date[idx]; + + ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN); + if (ret < 0) + goto bail; + for (idx = 0; idx < QSFP_LOT_LEN; ++idx) + cks += cp->lot[idx]; + + ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS); + if (ret < 0) + goto bail; + cks += ret; + + ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1); + if (ret < 0) + goto bail; + cks &= 0xFF; + if (cks != cp->cks2) + qib_dev_porterr(ppd->dd, ppd->port, + "QSFP cks2 is %02X, computed %02X\n", cp->cks2, + cks); + return 0; + +bail: + cp->id = 0; + return ret; +} + +const char * const qib_qsfp_devtech[16] = { + "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP", + "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML", + "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq", + "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq" +}; + +#define QSFP_DUMP_CHUNK 16 /* Holds longest string */ +#define QSFP_DEFAULT_HDR_CNT 224 + +static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; + +/* + * Initialize structures that control access to QSFP. Called once per port + * on cards that support QSFP. + */ +void qib_qsfp_init(struct qib_qsfp_data *qd, + void (*fevent)(struct work_struct *)) +{ + u32 mask, highs; + int pins; + + struct qib_devdata *dd = qd->ppd->dd; + + /* Initialize work struct for later QSFP events */ + INIT_WORK(&qd->work, fevent); + + /* + * Later, we may want more validation. For now, just set up pins and + * blip reset. If module is present, call qib_refresh_qsfp_cache(), + * to do further init. + */ + mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; + highs = mask - QSFP_GPIO_MOD_RST_N; + if (qd->ppd->hw_pidx) { + mask <<= QSFP_GPIO_PORT2_SHIFT; + highs <<= QSFP_GPIO_PORT2_SHIFT; + } + dd->f_gpio_mod(dd, highs, mask, mask); + udelay(20); /* Generous RST dwell */ + + dd->f_gpio_mod(dd, mask, mask, mask); + /* Spec says module can take up to two seconds! */ + mask = QSFP_GPIO_MOD_PRS_N; + if (qd->ppd->hw_pidx) + mask <<= QSFP_GPIO_PORT2_SHIFT; + + /* Do not try to wait here. Better to let event handle it */ + pins = dd->f_gpio_mod(dd, 0, 0, 0); + if (pins & mask) + goto bail; + /* We see a module, but it may be unwise to look yet. Just schedule */ + qd->t_insert = get_jiffies_64(); + schedule_work(&qd->work); +bail: + return; +} + +void qib_qsfp_deinit(struct qib_qsfp_data *qd) +{ + /* + * There is nothing to do here for now. our + * work is scheduled with schedule_work(), and + * flush_scheduled_work() from remove_one will + * block until all work ssetup with schedule_work() + * completes. + */ +} + +int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) +{ + struct qib_qsfp_cache cd; + u8 bin_buff[QSFP_DUMP_CHUNK]; + char lenstr[6]; + int sofar, ret; + int bidx = 0; + + sofar = 0; + ret = qib_refresh_qsfp_cache(ppd, &cd); + if (ret < 0) + goto bail; + + lenstr[0] = ' '; + lenstr[1] = '\0'; + if (QSFP_IS_CU(cd.tech)) + sprintf(lenstr, "%dM ", cd.len); + + sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes + + (QSFP_PWR(cd.pwr) * 4)); + + sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr, + qib_qsfp_devtech[cd.tech >> 4]); + + sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n", + QSFP_VEND_LEN, cd.vendor); + + sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n", + QSFP_OUI(cd.oui)); + + sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n", + QSFP_PN_LEN, cd.partnum); + sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n", + QSFP_REV_LEN, cd.rev); + if (QSFP_IS_CU(cd.tech)) + sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n", + QSFP_ATTEN_SDR(cd.atten), + QSFP_ATTEN_DDR(cd.atten)); + sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n", + QSFP_SN_LEN, cd.serial); + sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", + QSFP_DATE_LEN, cd.date); + sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", + QSFP_LOT_LEN, cd.date); + + while (bidx < QSFP_DEFAULT_HDR_CNT) { + int iidx; + ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); + if (ret < 0) + goto bail; + for (iidx = 0; iidx < ret; ++iidx) { + sofar += scnprintf(buf + sofar, len-sofar, " %02X", + bin_buff[iidx]); + } + sofar += scnprintf(buf + sofar, len - sofar, "\n"); + bidx += QSFP_DUMP_CHUNK; + } + ret = sofar; +bail: + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h new file mode 100644 index 000000000000..19b527bafd57 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qsfp.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* QSFP support common definitions, for ib_qib driver */ + +#define QSFP_DEV 0xA0 +#define QSFP_PWR_LAG_MSEC 2000 + +/* + * Below are masks for various QSFP signals, for Port 1. + * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT. + * _N means asserted low + */ +#define QSFP_GPIO_MOD_SEL_N (4) +#define QSFP_GPIO_MOD_PRS_N (8) +#define QSFP_GPIO_INT_N (0x10) +#define QSFP_GPIO_MOD_RST_N (0x20) +#define QSFP_GPIO_LP_MODE (0x40) +#define QSFP_GPIO_PORT2_SHIFT 5 + +#define QSFP_PAGESIZE 128 +/* Defined fields that QLogic requires of qualified cables */ +/* Byte 0 is Identifier, not checked */ +/* Byte 1 is reserved "status MSB" */ +/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */ +/* + * Rest of first 128 not used, although 127 is reserved for page select + * if module is not "Flat memory". + */ +/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */ +#define QSFP_MOD_ID_OFFS 128 +/* + * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class + * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W + */ +#define QSFP_MOD_PWR_OFFS 129 +/* Byte 130 is Connector type. Not QLogic req'd */ +/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */ +/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */ +/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */ +/* Byte 141 is Extended Rate Select. Not QLogic req'd */ +/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */ +/* Byte 146 is length for Copper. Units of 1 meter */ +#define QSFP_MOD_LEN_OFFS 146 +/* + * Byte 147 is Device technology. D0..3 not Qlogc req'd + * D4..7 select from 15 choices, translated by table: + */ +#define QSFP_MOD_TECH_OFFS 147 +extern const char *const qib_qsfp_devtech[16]; +/* Active Equalization includes fiber, copper full EQ, and copper near Eq */ +#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1) +/* Attenuation should be valid for copper other than full/near Eq */ +#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1) +/* Length is only valid if technology is "copper" */ +#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1) +#define QSFP_TECH_1490 9 + +#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \ + oui[2]) +#define QSFP_OUI_AMPHENOL 0x415048 +#define QSFP_OUI_FINISAR 0x009065 +#define QSFP_OUI_GORE 0x002177 + +/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */ +#define QSFP_VEND_OFFS 148 +#define QSFP_VEND_LEN 16 +/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */ +#define QSFP_IBXCV_OFFS 164 +/* Bytes 165..167 are Vendor OUI number */ +#define QSFP_VOUI_OFFS 165 +#define QSFP_VOUI_LEN 3 +/* Bytes 168..183 are Vendor Part Number, string */ +#define QSFP_PN_OFFS 168 +#define QSFP_PN_LEN 16 +/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */ +#define QSFP_REV_OFFS 184 +#define QSFP_REV_LEN 2 +/* + * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd + * If copper, they are attenuation in dB: + * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR) + */ +#define QSFP_ATTEN_OFFS 186 +#define QSFP_ATTEN_LEN 2 +/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */ +/* Byte 190 is Max Case Temp. Not QLogic req'd */ +/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */ +#define QSFP_CC_OFFS 191 +/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */ +/* Bytes 196..211 are Serial Number, String */ +#define QSFP_SN_OFFS 196 +#define QSFP_SN_LEN 16 +/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */ +#define QSFP_DATE_OFFS 212 +#define QSFP_DATE_LEN 6 +/* Bytes 218,219 are optional lot-code, string */ +#define QSFP_LOT_OFFS 218 +#define QSFP_LOT_LEN 2 +/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */ +/* Byte 223 is LSB of sum of bytes 192..222 */ +#define QSFP_CC_EXT_OFFS 223 + +/* + * struct qib_qsfp_data encapsulates state of QSFP device for one port. + * it will be part of port-chip-specific data if a board supports QSFP. + * + * Since multiple board-types use QSFP, and their pport_data structs + * differ (in the chip-specific section), we need a pointer to its head. + * + * Avoiding premature optimization, we will have one work_struct per port, + * and let the (increasingly inaccurately named) eep_lock arbitrate + * access to common resources. + * + */ + +/* + * Hold the parts of the onboard EEPROM that we care about, so we aren't + * coonstantly bit-boffing + */ +struct qib_qsfp_cache { + u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */ + u8 pwr; /* in D6,7 */ + u8 len; /* in meters, Cu only */ + u8 tech; + char vendor[QSFP_VEND_LEN]; + u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */ + u8 oui[QSFP_VOUI_LEN]; + u8 partnum[QSFP_PN_LEN]; + u8 rev[QSFP_REV_LEN]; + u8 atten[QSFP_ATTEN_LEN]; + u8 cks1; /* Checksum of bytes 128..190 */ + u8 serial[QSFP_SN_LEN]; + u8 date[QSFP_DATE_LEN]; + u8 lot[QSFP_LOT_LEN]; + u8 cks2; /* Checsum of bytes 192..222 */ +}; + +#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3) +#define QSFP_ATTEN_SDR(attenarray) (attenarray[0]) +#define QSFP_ATTEN_DDR(attenarray) (attenarray[1]) + +struct qib_qsfp_data { + /* Helps to find our way */ + struct qib_pportdata *ppd; + struct work_struct work; + struct qib_qsfp_cache cache; + u64 t_insert; +}; + +extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, + struct qib_qsfp_cache *cp); +extern void qib_qsfp_init(struct qib_qsfp_data *qd, + void (*fevent)(struct work_struct *)); +extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c new file mode 100644 index 000000000000..40c0a373719c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -0,0 +1,2288 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/io.h> + +#include "qib.h" + +/* cut down ridiculously long IB macro names */ +#define OP(x) IB_OPCODE_RC_##x + +static void rc_timeout(unsigned long arg); + +static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, + u32 psn, u32 pmtu) +{ + u32 len; + + len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; + ss->sge = wqe->sg_list[0]; + ss->sg_list = wqe->sg_list + 1; + ss->num_sge = wqe->wr.num_sge; + ss->total_len = wqe->length; + qib_skip_sge(ss, len, 0); + return wqe->length - len; +} + +static void start_timer(struct qib_qp *qp) +{ + qp->s_flags |= QIB_S_TIMER; + qp->s_timer.function = rc_timeout; + /* 4.096 usec. * (1 << qp->timeout) */ + qp->s_timer.expires = jiffies + + usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); + add_timer(&qp->s_timer); +} + +/** + * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) + * @dev: the device for this QP + * @qp: a pointer to the QP + * @ohdr: a pointer to the IB header being constructed + * @pmtu: the path MTU + * + * Return 1 if constructed; otherwise, return 0. + * Note that we are in the responder's side of the QP context. + * Note the QP s_lock must be held. + */ +static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, + struct qib_other_headers *ohdr, u32 pmtu) +{ + struct qib_ack_entry *e; + u32 hwords; + u32 len; + u32 bth0; + u32 bth2; + + /* Don't send an ACK if we aren't supposed to. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto bail; + + /* header size in 32-bit words LRH+BTH = (8+12)/4. */ + hwords = 5; + + switch (qp->s_ack_state) { + case OP(RDMA_READ_RESPONSE_LAST): + case OP(RDMA_READ_RESPONSE_ONLY): + e = &qp->s_ack_queue[qp->s_tail_ack_queue]; + if (e->rdma_sge.mr) { + atomic_dec(&e->rdma_sge.mr->refcount); + e->rdma_sge.mr = NULL; + } + /* FALLTHROUGH */ + case OP(ATOMIC_ACKNOWLEDGE): + /* + * We can increment the tail pointer now that the last + * response has been sent instead of only being + * constructed. + */ + if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) + qp->s_tail_ack_queue = 0; + /* FALLTHROUGH */ + case OP(SEND_ONLY): + case OP(ACKNOWLEDGE): + /* Check for no next entry in the queue. */ + if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { + if (qp->s_flags & QIB_S_ACK_PENDING) + goto normal; + goto bail; + } + + e = &qp->s_ack_queue[qp->s_tail_ack_queue]; + if (e->opcode == OP(RDMA_READ_REQUEST)) { + /* + * If a RDMA read response is being resent and + * we haven't seen the duplicate request yet, + * then stop sending the remaining responses the + * responder has seen until the requester resends it. + */ + len = e->rdma_sge.sge_length; + if (len && !e->rdma_sge.mr) { + qp->s_tail_ack_queue = qp->r_head_ack_queue; + goto bail; + } + /* Copy SGE state in case we need to resend */ + qp->s_rdma_mr = e->rdma_sge.mr; + if (qp->s_rdma_mr) + atomic_inc(&qp->s_rdma_mr->refcount); + qp->s_ack_rdma_sge.sge = e->rdma_sge; + qp->s_ack_rdma_sge.num_sge = 1; + qp->s_cur_sge = &qp->s_ack_rdma_sge; + if (len > pmtu) { + len = pmtu; + qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); + } else { + qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); + e->sent = 1; + } + ohdr->u.aeth = qib_compute_aeth(qp); + hwords++; + qp->s_ack_rdma_psn = e->psn; + bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; + } else { + /* COMPARE_SWAP or FETCH_ADD */ + qp->s_cur_sge = NULL; + len = 0; + qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); + ohdr->u.at.aeth = qib_compute_aeth(qp); + ohdr->u.at.atomic_ack_eth[0] = + cpu_to_be32(e->atomic_data >> 32); + ohdr->u.at.atomic_ack_eth[1] = + cpu_to_be32(e->atomic_data); + hwords += sizeof(ohdr->u.at) / sizeof(u32); + bth2 = e->psn & QIB_PSN_MASK; + e->sent = 1; + } + bth0 = qp->s_ack_state << 24; + break; + + case OP(RDMA_READ_RESPONSE_FIRST): + qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); + /* FALLTHROUGH */ + case OP(RDMA_READ_RESPONSE_MIDDLE): + qp->s_cur_sge = &qp->s_ack_rdma_sge; + qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; + if (qp->s_rdma_mr) + atomic_inc(&qp->s_rdma_mr->refcount); + len = qp->s_ack_rdma_sge.sge.sge_length; + if (len > pmtu) + len = pmtu; + else { + ohdr->u.aeth = qib_compute_aeth(qp); + hwords++; + qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); + e = &qp->s_ack_queue[qp->s_tail_ack_queue]; + e->sent = 1; + } + bth0 = qp->s_ack_state << 24; + bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; + break; + + default: +normal: + /* + * Send a regular ACK. + * Set the s_ack_state so we wait until after sending + * the ACK before setting s_ack_state to ACKNOWLEDGE + * (see above). + */ + qp->s_ack_state = OP(SEND_ONLY); + qp->s_flags &= ~QIB_S_ACK_PENDING; + qp->s_cur_sge = NULL; + if (qp->s_nak_state) + ohdr->u.aeth = + cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | + (qp->s_nak_state << + QIB_AETH_CREDIT_SHIFT)); + else + ohdr->u.aeth = qib_compute_aeth(qp); + hwords++; + len = 0; + bth0 = OP(ACKNOWLEDGE) << 24; + bth2 = qp->s_ack_psn & QIB_PSN_MASK; + } + qp->s_rdma_ack_cnt++; + qp->s_hdrwords = hwords; + qp->s_cur_size = len; + qib_make_ruc_header(qp, ohdr, bth0, bth2); + return 1; + +bail: + qp->s_ack_state = OP(ACKNOWLEDGE); + qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); + return 0; +} + +/** + * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) + * @qp: a pointer to the QP + * + * Return 1 if constructed; otherwise, return 0. + */ +int qib_make_rc_req(struct qib_qp *qp) +{ + struct qib_ibdev *dev = to_idev(qp->ibqp.device); + struct qib_other_headers *ohdr; + struct qib_sge_state *ss; + struct qib_swqe *wqe; + u32 hwords; + u32 len; + u32 bth0; + u32 bth2; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); + char newreq; + unsigned long flags; + int ret = 0; + int delta; + + ohdr = &qp->s_hdr.u.oth; + if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) + ohdr = &qp->s_hdr.u.l.oth; + + /* + * The lock is needed to synchronize between the sending tasklet, + * the receive interrupt handler, and timeout resends. + */ + spin_lock_irqsave(&qp->s_lock, flags); + + /* Sending responses has higher priority over sending requests. */ + if ((qp->s_flags & QIB_S_RESP_PENDING) && + qib_make_rc_ack(dev, qp, ohdr, pmtu)) + goto done; + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { + if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + goto bail; + /* We are in the error state, flush the work request. */ + if (qp->s_last == qp->s_head) + goto bail; + /* If DMAs are in progress, we can't flush immediately. */ + if (atomic_read(&qp->s_dma_busy)) { + qp->s_flags |= QIB_S_WAIT_DMA; + goto bail; + } + wqe = get_swqe_ptr(qp, qp->s_last); + while (qp->s_last != qp->s_acked) { + qib_send_complete(qp, wqe, IB_WC_SUCCESS); + if (++qp->s_last >= qp->s_size) + qp->s_last = 0; + wqe = get_swqe_ptr(qp, qp->s_last); + } + qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); + goto done; + } + + if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) + goto bail; + + if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { + if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { + qp->s_flags |= QIB_S_WAIT_PSN; + goto bail; + } + qp->s_sending_psn = qp->s_psn; + qp->s_sending_hpsn = qp->s_psn - 1; + } + + /* header size in 32-bit words LRH+BTH = (8+12)/4. */ + hwords = 5; + bth0 = 0; + + /* Send a request. */ + wqe = get_swqe_ptr(qp, qp->s_cur); + switch (qp->s_state) { + default: + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) + goto bail; + /* + * Resend an old request or start a new one. + * + * We keep track of the current SWQE so that + * we don't reset the "furthest progress" state + * if we need to back up. + */ + newreq = 0; + if (qp->s_cur == qp->s_tail) { + /* Check if send work queue is empty. */ + if (qp->s_tail == qp->s_head) + goto bail; + /* + * If a fence is requested, wait for previous + * RDMA read and atomic operations to finish. + */ + if ((wqe->wr.send_flags & IB_SEND_FENCE) && + qp->s_num_rd_atomic) { + qp->s_flags |= QIB_S_WAIT_FENCE; + goto bail; + } + wqe->psn = qp->s_next_psn; + newreq = 1; + } + /* + * Note that we have to be careful not to modify the + * original work request since we may need to resend + * it. + */ + len = wqe->length; + ss = &qp->s_sge; + bth2 = qp->s_psn & QIB_PSN_MASK; + switch (wqe->wr.opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + /* If no credit, return. */ + if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { + qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + goto bail; + } + wqe->lpsn = wqe->psn; + if (len > pmtu) { + wqe->lpsn += (len - 1) / pmtu; + qp->s_state = OP(SEND_FIRST); + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_SEND) + qp->s_state = OP(SEND_ONLY); + else { + qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + } + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + bth2 |= IB_BTH_REQ_ACK; + if (++qp->s_cur == qp->s_size) + qp->s_cur = 0; + break; + + case IB_WR_RDMA_WRITE: + if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + qp->s_lsn++; + /* FALLTHROUGH */ + case IB_WR_RDMA_WRITE_WITH_IMM: + /* If no credit, return. */ + if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { + qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + goto bail; + } + ohdr->u.rc.reth.vaddr = + cpu_to_be64(wqe->wr.wr.rdma.remote_addr); + ohdr->u.rc.reth.rkey = + cpu_to_be32(wqe->wr.wr.rdma.rkey); + ohdr->u.rc.reth.length = cpu_to_be32(len); + hwords += sizeof(struct ib_reth) / sizeof(u32); + wqe->lpsn = wqe->psn; + if (len > pmtu) { + wqe->lpsn += (len - 1) / pmtu; + qp->s_state = OP(RDMA_WRITE_FIRST); + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + qp->s_state = OP(RDMA_WRITE_ONLY); + else { + qp->s_state = + OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); + /* Immediate data comes after RETH */ + ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + } + bth2 |= IB_BTH_REQ_ACK; + if (++qp->s_cur == qp->s_size) + qp->s_cur = 0; + break; + + case IB_WR_RDMA_READ: + /* + * Don't allow more operations to be started + * than the QP limits allow. + */ + if (newreq) { + if (qp->s_num_rd_atomic >= + qp->s_max_rd_atomic) { + qp->s_flags |= QIB_S_WAIT_RDMAR; + goto bail; + } + qp->s_num_rd_atomic++; + if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + qp->s_lsn++; + /* + * Adjust s_next_psn to count the + * expected number of responses. + */ + if (len > pmtu) + qp->s_next_psn += (len - 1) / pmtu; + wqe->lpsn = qp->s_next_psn++; + } + ohdr->u.rc.reth.vaddr = + cpu_to_be64(wqe->wr.wr.rdma.remote_addr); + ohdr->u.rc.reth.rkey = + cpu_to_be32(wqe->wr.wr.rdma.rkey); + ohdr->u.rc.reth.length = cpu_to_be32(len); + qp->s_state = OP(RDMA_READ_REQUEST); + hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); + ss = NULL; + len = 0; + bth2 |= IB_BTH_REQ_ACK; + if (++qp->s_cur == qp->s_size) + qp->s_cur = 0; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + /* + * Don't allow more operations to be started + * than the QP limits allow. + */ + if (newreq) { + if (qp->s_num_rd_atomic >= + qp->s_max_rd_atomic) { + qp->s_flags |= QIB_S_WAIT_RDMAR; + goto bail; + } + qp->s_num_rd_atomic++; + if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + qp->s_lsn++; + wqe->lpsn = wqe->psn; + } + if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + qp->s_state = OP(COMPARE_SWAP); + ohdr->u.atomic_eth.swap_data = cpu_to_be64( + wqe->wr.wr.atomic.swap); + ohdr->u.atomic_eth.compare_data = cpu_to_be64( + wqe->wr.wr.atomic.compare_add); + } else { + qp->s_state = OP(FETCH_ADD); + ohdr->u.atomic_eth.swap_data = cpu_to_be64( + wqe->wr.wr.atomic.compare_add); + ohdr->u.atomic_eth.compare_data = 0; + } + ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( + wqe->wr.wr.atomic.remote_addr >> 32); + ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( + wqe->wr.wr.atomic.remote_addr); + ohdr->u.atomic_eth.rkey = cpu_to_be32( + wqe->wr.wr.atomic.rkey); + hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); + ss = NULL; + len = 0; + bth2 |= IB_BTH_REQ_ACK; + if (++qp->s_cur == qp->s_size) + qp->s_cur = 0; + break; + + default: + goto bail; + } + qp->s_sge.sge = wqe->sg_list[0]; + qp->s_sge.sg_list = wqe->sg_list + 1; + qp->s_sge.num_sge = wqe->wr.num_sge; + qp->s_sge.total_len = wqe->length; + qp->s_len = wqe->length; + if (newreq) { + qp->s_tail++; + if (qp->s_tail >= qp->s_size) + qp->s_tail = 0; + } + if (wqe->wr.opcode == IB_WR_RDMA_READ) + qp->s_psn = wqe->lpsn + 1; + else { + qp->s_psn++; + if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) + qp->s_next_psn = qp->s_psn; + } + break; + + case OP(RDMA_READ_RESPONSE_FIRST): + /* + * qp->s_state is normally set to the opcode of the + * last packet constructed for new requests and therefore + * is never set to RDMA read response. + * RDMA_READ_RESPONSE_FIRST is used by the ACK processing + * thread to indicate a SEND needs to be restarted from an + * earlier PSN without interferring with the sending thread. + * See qib_restart_rc(). + */ + qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); + /* FALLTHROUGH */ + case OP(SEND_FIRST): + qp->s_state = OP(SEND_MIDDLE); + /* FALLTHROUGH */ + case OP(SEND_MIDDLE): + bth2 = qp->s_psn++ & QIB_PSN_MASK; + if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) + qp->s_next_psn = qp->s_psn; + ss = &qp->s_sge; + len = qp->s_len; + if (len > pmtu) { + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_SEND) + qp->s_state = OP(SEND_LAST); + else { + qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + } + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + bth2 |= IB_BTH_REQ_ACK; + qp->s_cur++; + if (qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + + case OP(RDMA_READ_RESPONSE_LAST): + /* + * qp->s_state is normally set to the opcode of the + * last packet constructed for new requests and therefore + * is never set to RDMA read response. + * RDMA_READ_RESPONSE_LAST is used by the ACK processing + * thread to indicate a RDMA write needs to be restarted from + * an earlier PSN without interferring with the sending thread. + * See qib_restart_rc(). + */ + qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); + /* FALLTHROUGH */ + case OP(RDMA_WRITE_FIRST): + qp->s_state = OP(RDMA_WRITE_MIDDLE); + /* FALLTHROUGH */ + case OP(RDMA_WRITE_MIDDLE): + bth2 = qp->s_psn++ & QIB_PSN_MASK; + if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) + qp->s_next_psn = qp->s_psn; + ss = &qp->s_sge; + len = qp->s_len; + if (len > pmtu) { + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + qp->s_state = OP(RDMA_WRITE_LAST); + else { + qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + } + bth2 |= IB_BTH_REQ_ACK; + qp->s_cur++; + if (qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + + case OP(RDMA_READ_RESPONSE_MIDDLE): + /* + * qp->s_state is normally set to the opcode of the + * last packet constructed for new requests and therefore + * is never set to RDMA read response. + * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing + * thread to indicate a RDMA read needs to be restarted from + * an earlier PSN without interferring with the sending thread. + * See qib_restart_rc(). + */ + len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; + ohdr->u.rc.reth.vaddr = + cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); + ohdr->u.rc.reth.rkey = + cpu_to_be32(wqe->wr.wr.rdma.rkey); + ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); + qp->s_state = OP(RDMA_READ_REQUEST); + hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); + bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK; + qp->s_psn = wqe->lpsn + 1; + ss = NULL; + len = 0; + qp->s_cur++; + if (qp->s_cur == qp->s_size) + qp->s_cur = 0; + break; + } + qp->s_sending_hpsn = bth2; + delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; + if (delta && delta % QIB_PSN_CREDIT == 0) + bth2 |= IB_BTH_REQ_ACK; + if (qp->s_flags & QIB_S_SEND_ONE) { + qp->s_flags &= ~QIB_S_SEND_ONE; + qp->s_flags |= QIB_S_WAIT_ACK; + bth2 |= IB_BTH_REQ_ACK; + } + qp->s_len -= len; + qp->s_hdrwords = hwords; + qp->s_cur_sge = ss; + qp->s_cur_size = len; + qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); +done: + ret = 1; + goto unlock; + +bail: + qp->s_flags &= ~QIB_S_BUSY; +unlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + return ret; +} + +/** + * qib_send_rc_ack - Construct an ACK packet and send it + * @qp: a pointer to the QP + * + * This is called from qib_rc_rcv() and qib_kreceive(). + * Note that RDMA reads and atomics are handled in the + * send side QP state and tasklet. + */ +void qib_send_rc_ack(struct qib_qp *qp) +{ + struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + u64 pbc; + u16 lrh0; + u32 bth0; + u32 hwords; + u32 pbufn; + u32 __iomem *piobuf; + struct qib_ib_header hdr; + struct qib_other_headers *ohdr; + u32 control; + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto unlock; + + /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ + if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) + goto queue_ack; + + /* Construct the header with s_lock held so APM doesn't change it. */ + ohdr = &hdr.u.oth; + lrh0 = QIB_LRH_BTH; + /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */ + hwords = 6; + if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { + hwords += qib_make_grh(ibp, &hdr.u.l.grh, + &qp->remote_ah_attr.grh, hwords, 0); + ohdr = &hdr.u.l.oth; + lrh0 = QIB_LRH_GRH; + } + /* read pkey_index w/o lock (its atomic) */ + bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); + if (qp->s_mig_state == IB_MIG_MIGRATED) + bth0 |= IB_BTH_MIG_REQ; + if (qp->r_nak_state) + ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | + (qp->r_nak_state << + QIB_AETH_CREDIT_SHIFT)); + else + ohdr->u.aeth = qib_compute_aeth(qp); + lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | + qp->remote_ah_attr.sl << 4; + hdr.lrh[0] = cpu_to_be16(lrh0); + hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); + hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); + hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); + ohdr->bth[0] = cpu_to_be32(bth0); + ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); + ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK); + + spin_unlock_irqrestore(&qp->s_lock, flags); + + /* Don't try to send ACKs if the link isn't ACTIVE */ + if (!(ppd->lflags & QIBL_LINKACTIVE)) + goto done; + + control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC, + qp->s_srate, lrh0 >> 12); + /* length is + 1 for the control dword */ + pbc = ((u64) control << 32) | (hwords + 1); + + piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); + if (!piobuf) { + /* + * We are out of PIO buffers at the moment. + * Pass responsibility for sending the ACK to the + * send tasklet so that when a PIO buffer becomes + * available, the ACK is sent ahead of other outgoing + * packets. + */ + spin_lock_irqsave(&qp->s_lock, flags); + goto queue_ack; + } + + /* + * Write the pbc. + * We have to flush after the PBC for correctness + * on some cpus or WC buffer can be written out of order. + */ + writeq(pbc, piobuf); + + if (dd->flags & QIB_PIO_FLUSH_WC) { + u32 *hdrp = (u32 *) &hdr; + + qib_flush_wc(); + qib_pio_copy(piobuf + 2, hdrp, hwords - 1); + qib_flush_wc(); + __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1); + } else + qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords); + + if (dd->flags & QIB_USE_SPCL_TRIG) { + u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; + + qib_flush_wc(); + __raw_writel(0xaebecede, piobuf + spcl_off); + } + + qib_flush_wc(); + qib_sendbuf_done(dd, pbufn); + + ibp->n_unicast_xmit++; + goto done; + +queue_ack: + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { + ibp->n_rc_qacks++; + qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; + qp->s_nak_state = qp->r_nak_state; + qp->s_ack_psn = qp->r_ack_psn; + + /* Schedule the send tasklet. */ + qib_schedule_send(qp); + } +unlock: + spin_unlock_irqrestore(&qp->s_lock, flags); +done: + return; +} + +/** + * reset_psn - reset the QP state to send starting from PSN + * @qp: the QP + * @psn: the packet sequence number to restart at + * + * This is called from qib_rc_rcv() to process an incoming RC ACK + * for the given QP. + * Called at interrupt level with the QP s_lock held. + */ +static void reset_psn(struct qib_qp *qp, u32 psn) +{ + u32 n = qp->s_acked; + struct qib_swqe *wqe = get_swqe_ptr(qp, n); + u32 opcode; + + qp->s_cur = n; + + /* + * If we are starting the request from the beginning, + * let the normal send code handle initialization. + */ + if (qib_cmp24(psn, wqe->psn) <= 0) { + qp->s_state = OP(SEND_LAST); + goto done; + } + + /* Find the work request opcode corresponding to the given PSN. */ + opcode = wqe->wr.opcode; + for (;;) { + int diff; + + if (++n == qp->s_size) + n = 0; + if (n == qp->s_tail) + break; + wqe = get_swqe_ptr(qp, n); + diff = qib_cmp24(psn, wqe->psn); + if (diff < 0) + break; + qp->s_cur = n; + /* + * If we are starting the request from the beginning, + * let the normal send code handle initialization. + */ + if (diff == 0) { + qp->s_state = OP(SEND_LAST); + goto done; + } + opcode = wqe->wr.opcode; + } + + /* + * Set the state to restart in the middle of a request. + * Don't change the s_sge, s_cur_sge, or s_cur_size. + * See qib_make_rc_req(). + */ + switch (opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); + break; + + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + qp->s_state = OP(RDMA_READ_RESPONSE_LAST); + break; + + case IB_WR_RDMA_READ: + qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); + break; + + default: + /* + * This case shouldn't happen since its only + * one PSN per req. + */ + qp->s_state = OP(SEND_LAST); + } +done: + qp->s_psn = psn; + /* + * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer + * asynchronously before the send tasklet can get scheduled. + * Doing it in qib_make_rc_req() is too late. + */ + if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && + (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) + qp->s_flags |= QIB_S_WAIT_PSN; +} + +/* + * Back up requester to resend the last un-ACKed request. + * The QP s_lock should be held and interrupts disabled. + */ +static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) +{ + struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); + struct qib_ibport *ibp; + + if (qp->s_retry == 0) { + if (qp->s_mig_state == IB_MIG_ARMED) { + qib_migrate_qp(qp); + qp->s_retry = qp->s_retry_cnt; + } else if (qp->s_last == qp->s_acked) { + qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); + qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + return; + } else /* XXX need to handle delayed completion */ + return; + } else + qp->s_retry--; + + ibp = to_iport(qp->ibqp.device, qp->port_num); + if (wqe->wr.opcode == IB_WR_RDMA_READ) + ibp->n_rc_resends++; + else + ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + + qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | + QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | + QIB_S_WAIT_ACK); + if (wait) + qp->s_flags |= QIB_S_SEND_ONE; + reset_psn(qp, psn); +} + +/* + * This is called from s_timer for missing responses. + */ +static void rc_timeout(unsigned long arg) +{ + struct qib_qp *qp = (struct qib_qp *)arg; + struct qib_ibport *ibp; + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); + if (qp->s_flags & QIB_S_TIMER) { + ibp = to_iport(qp->ibqp.device, qp->port_num); + ibp->n_rc_timeouts++; + qp->s_flags &= ~QIB_S_TIMER; + del_timer(&qp->s_timer); + qib_restart_rc(qp, qp->s_last_psn + 1, 1); + qib_schedule_send(qp); + } + spin_unlock_irqrestore(&qp->s_lock, flags); +} + +/* + * This is called from s_timer for RNR timeouts. + */ +void qib_rc_rnr_retry(unsigned long arg) +{ + struct qib_qp *qp = (struct qib_qp *)arg; + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); + if (qp->s_flags & QIB_S_WAIT_RNR) { + qp->s_flags &= ~QIB_S_WAIT_RNR; + del_timer(&qp->s_timer); + qib_schedule_send(qp); + } + spin_unlock_irqrestore(&qp->s_lock, flags); +} + +/* + * Set qp->s_sending_psn to the next PSN after the given one. + * This would be psn+1 except when RDMA reads are present. + */ +static void reset_sending_psn(struct qib_qp *qp, u32 psn) +{ + struct qib_swqe *wqe; + u32 n = qp->s_last; + + /* Find the work request corresponding to the given PSN. */ + for (;;) { + wqe = get_swqe_ptr(qp, n); + if (qib_cmp24(psn, wqe->lpsn) <= 0) { + if (wqe->wr.opcode == IB_WR_RDMA_READ) + qp->s_sending_psn = wqe->lpsn + 1; + else + qp->s_sending_psn = psn + 1; + break; + } + if (++n == qp->s_size) + n = 0; + if (n == qp->s_tail) + break; + } +} + +/* + * This should be called with the QP s_lock held and interrupts disabled. + */ +void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) +{ + struct qib_other_headers *ohdr; + struct qib_swqe *wqe; + struct ib_wc wc; + unsigned i; + u32 opcode; + u32 psn; + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + return; + + /* Find out where the BTH is */ + if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH) + ohdr = &hdr->u.oth; + else + ohdr = &hdr->u.l.oth; + + opcode = be32_to_cpu(ohdr->bth[0]) >> 24; + if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && + opcode <= OP(ATOMIC_ACKNOWLEDGE)) { + WARN_ON(!qp->s_rdma_ack_cnt); + qp->s_rdma_ack_cnt--; + return; + } + + psn = be32_to_cpu(ohdr->bth[2]); + reset_sending_psn(qp, psn); + + /* + * Start timer after a packet requesting an ACK has been sent and + * there are still requests that haven't been acked. + */ + if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && + !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) + start_timer(qp); + + while (qp->s_last != qp->s_acked) { + wqe = get_swqe_ptr(qp, qp->s_last); + if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && + qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) + break; + for (i = 0; i < wqe->wr.num_sge; i++) { + struct qib_sge *sge = &wqe->sg_list[i]; + + atomic_dec(&sge->mr->refcount); + } + /* Post a send completion queue entry if requested. */ + if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + (wqe->wr.send_flags & IB_SEND_SIGNALED)) { + memset(&wc, 0, sizeof wc); + wc.wr_id = wqe->wr.wr_id; + wc.status = IB_WC_SUCCESS; + wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; + wc.byte_len = wqe->length; + wc.qp = &qp->ibqp; + qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + } + if (++qp->s_last >= qp->s_size) + qp->s_last = 0; + } + /* + * If we were waiting for sends to complete before resending, + * and they are now complete, restart sending. + */ + if (qp->s_flags & QIB_S_WAIT_PSN && + qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + qp->s_flags &= ~QIB_S_WAIT_PSN; + qp->s_sending_psn = qp->s_psn; + qp->s_sending_hpsn = qp->s_psn - 1; + qib_schedule_send(qp); + } +} + +static inline void update_last_psn(struct qib_qp *qp, u32 psn) +{ + qp->s_last_psn = psn; +} + +/* + * Generate a SWQE completion. + * This is similar to qib_send_complete but has to check to be sure + * that the SGEs are not being referenced if the SWQE is being resent. + */ +static struct qib_swqe *do_rc_completion(struct qib_qp *qp, + struct qib_swqe *wqe, + struct qib_ibport *ibp) +{ + struct ib_wc wc; + unsigned i; + + /* + * Don't decrement refcount and don't generate a + * completion if the SWQE is being resent until the send + * is finished. + */ + if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || + qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + for (i = 0; i < wqe->wr.num_sge; i++) { + struct qib_sge *sge = &wqe->sg_list[i]; + + atomic_dec(&sge->mr->refcount); + } + /* Post a send completion queue entry if requested. */ + if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + (wqe->wr.send_flags & IB_SEND_SIGNALED)) { + memset(&wc, 0, sizeof wc); + wc.wr_id = wqe->wr.wr_id; + wc.status = IB_WC_SUCCESS; + wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; + wc.byte_len = wqe->length; + wc.qp = &qp->ibqp; + qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + } + if (++qp->s_last >= qp->s_size) + qp->s_last = 0; + } else + ibp->n_rc_delayed_comp++; + + qp->s_retry = qp->s_retry_cnt; + update_last_psn(qp, wqe->lpsn); + + /* + * If we are completing a request which is in the process of + * being resent, we can stop resending it since we know the + * responder has already seen it. + */ + if (qp->s_acked == qp->s_cur) { + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + qp->s_acked = qp->s_cur; + wqe = get_swqe_ptr(qp, qp->s_cur); + if (qp->s_acked != qp->s_tail) { + qp->s_state = OP(SEND_LAST); + qp->s_psn = wqe->psn; + } + } else { + if (++qp->s_acked >= qp->s_size) + qp->s_acked = 0; + if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) + qp->s_draining = 0; + wqe = get_swqe_ptr(qp, qp->s_acked); + } + return wqe; +} + +/** + * do_rc_ack - process an incoming RC ACK + * @qp: the QP the ACK came in on + * @psn: the packet sequence number of the ACK + * @opcode: the opcode of the request that resulted in the ACK + * + * This is called from qib_rc_rcv_resp() to process an incoming RC ACK + * for the given QP. + * Called at interrupt level with the QP s_lock held. + * Returns 1 if OK, 0 if current operation should be aborted (NAK). + */ +static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, + u64 val, struct qib_ctxtdata *rcd) +{ + struct qib_ibport *ibp; + enum ib_wc_status status; + struct qib_swqe *wqe; + int ret = 0; + u32 ack_psn; + int diff; + + /* Remove QP from retry timer */ + if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { + qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + del_timer(&qp->s_timer); + } + + /* + * Note that NAKs implicitly ACK outstanding SEND and RDMA write + * requests and implicitly NAK RDMA read and atomic requests issued + * before the NAK'ed request. The MSN won't include the NAK'ed + * request but will include an ACK'ed request(s). + */ + ack_psn = psn; + if (aeth >> 29) + ack_psn--; + wqe = get_swqe_ptr(qp, qp->s_acked); + ibp = to_iport(qp->ibqp.device, qp->port_num); + + /* + * The MSN might be for a later WQE than the PSN indicates so + * only complete WQEs that the PSN finishes. + */ + while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) { + /* + * RDMA_READ_RESPONSE_ONLY is a special case since + * we want to generate completion events for everything + * before the RDMA read, copy the data, then generate + * the completion for the read. + */ + if (wqe->wr.opcode == IB_WR_RDMA_READ && + opcode == OP(RDMA_READ_RESPONSE_ONLY) && + diff == 0) { + ret = 1; + goto bail; + } + /* + * If this request is a RDMA read or atomic, and the ACK is + * for a later operation, this ACK NAKs the RDMA read or + * atomic. In other words, only a RDMA_READ_LAST or ONLY + * can ACK a RDMA read and likewise for atomic ops. Note + * that the NAK case can only happen if relaxed ordering is + * used and requests are sent after an RDMA read or atomic + * is sent but before the response is received. + */ + if ((wqe->wr.opcode == IB_WR_RDMA_READ && + (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || + ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && + (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { + /* Retry this request. */ + if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { + qp->r_flags |= QIB_R_RDMAR_SEQ; + qib_restart_rc(qp, qp->s_last_psn + 1, 0); + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_SEND; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, + &rcd->qp_wait_list); + } + } + /* + * No need to process the ACK/NAK since we are + * restarting an earlier request. + */ + goto bail; + } + if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + u64 *vaddr = wqe->sg_list[0].vaddr; + *vaddr = val; + } + if (qp->s_num_rd_atomic && + (wqe->wr.opcode == IB_WR_RDMA_READ || + wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { + qp->s_num_rd_atomic--; + /* Restart sending task if fence is complete */ + if ((qp->s_flags & QIB_S_WAIT_FENCE) && + !qp->s_num_rd_atomic) { + qp->s_flags &= ~(QIB_S_WAIT_FENCE | + QIB_S_WAIT_ACK); + qib_schedule_send(qp); + } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { + qp->s_flags &= ~(QIB_S_WAIT_RDMAR | + QIB_S_WAIT_ACK); + qib_schedule_send(qp); + } + } + wqe = do_rc_completion(qp, wqe, ibp); + if (qp->s_acked == qp->s_tail) + break; + } + + switch (aeth >> 29) { + case 0: /* ACK */ + ibp->n_rc_acks++; + if (qp->s_acked != qp->s_tail) { + /* + * We are expecting more ACKs so + * reset the retransmit timer. + */ + start_timer(qp); + /* + * We can stop resending the earlier packets and + * continue with the next packet the receiver wants. + */ + if (qib_cmp24(qp->s_psn, psn) <= 0) + reset_psn(qp, psn + 1); + } else if (qib_cmp24(qp->s_psn, psn) <= 0) { + qp->s_state = OP(SEND_LAST); + qp->s_psn = psn + 1; + } + if (qp->s_flags & QIB_S_WAIT_ACK) { + qp->s_flags &= ~QIB_S_WAIT_ACK; + qib_schedule_send(qp); + } + qib_get_credit(qp, aeth); + qp->s_rnr_retry = qp->s_rnr_retry_cnt; + qp->s_retry = qp->s_retry_cnt; + update_last_psn(qp, psn); + ret = 1; + goto bail; + + case 1: /* RNR NAK */ + ibp->n_rnr_naks++; + if (qp->s_acked == qp->s_tail) + goto bail; + if (qp->s_flags & QIB_S_WAIT_RNR) + goto bail; + if (qp->s_rnr_retry == 0) { + status = IB_WC_RNR_RETRY_EXC_ERR; + goto class_b; + } + if (qp->s_rnr_retry_cnt < 7) + qp->s_rnr_retry--; + + /* The last valid PSN is the previous PSN. */ + update_last_psn(qp, psn - 1); + + ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + + reset_psn(qp, psn); + + qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); + qp->s_flags |= QIB_S_WAIT_RNR; + qp->s_timer.function = qib_rc_rnr_retry; + qp->s_timer.expires = jiffies + usecs_to_jiffies( + ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & + QIB_AETH_CREDIT_MASK]); + add_timer(&qp->s_timer); + goto bail; + + case 3: /* NAK */ + if (qp->s_acked == qp->s_tail) + goto bail; + /* The last valid PSN is the previous PSN. */ + update_last_psn(qp, psn - 1); + switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & + QIB_AETH_CREDIT_MASK) { + case 0: /* PSN sequence error */ + ibp->n_seq_naks++; + /* + * Back up to the responder's expected PSN. + * Note that we might get a NAK in the middle of an + * RDMA READ response which terminates the RDMA + * READ. + */ + qib_restart_rc(qp, psn, 0); + qib_schedule_send(qp); + break; + + case 1: /* Invalid Request */ + status = IB_WC_REM_INV_REQ_ERR; + ibp->n_other_naks++; + goto class_b; + + case 2: /* Remote Access Error */ + status = IB_WC_REM_ACCESS_ERR; + ibp->n_other_naks++; + goto class_b; + + case 3: /* Remote Operation Error */ + status = IB_WC_REM_OP_ERR; + ibp->n_other_naks++; +class_b: + if (qp->s_last == qp->s_acked) { + qib_send_complete(qp, wqe, status); + qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + } + break; + + default: + /* Ignore other reserved NAK error codes */ + goto reserved; + } + qp->s_retry = qp->s_retry_cnt; + qp->s_rnr_retry = qp->s_rnr_retry_cnt; + goto bail; + + default: /* 2: reserved */ +reserved: + /* Ignore reserved NAK codes. */ + goto bail; + } + +bail: + return ret; +} + +/* + * We have seen an out of sequence RDMA read middle or last packet. + * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. + */ +static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, + struct qib_ctxtdata *rcd) +{ + struct qib_swqe *wqe; + + /* Remove QP from retry timer */ + if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { + qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + del_timer(&qp->s_timer); + } + + wqe = get_swqe_ptr(qp, qp->s_acked); + + while (qib_cmp24(psn, wqe->lpsn) > 0) { + if (wqe->wr.opcode == IB_WR_RDMA_READ || + wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + break; + wqe = do_rc_completion(qp, wqe, ibp); + } + + ibp->n_rdma_seq++; + qp->r_flags |= QIB_R_RDMAR_SEQ; + qib_restart_rc(qp, qp->s_last_psn + 1, 0); + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_SEND; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, &rcd->qp_wait_list); + } +} + +/** + * qib_rc_rcv_resp - process an incoming RC response packet + * @ibp: the port this packet came in on + * @ohdr: the other headers for this packet + * @data: the packet data + * @tlen: the packet length + * @qp: the QP for this packet + * @opcode: the opcode for this packet + * @psn: the packet sequence number for this packet + * @hdrsize: the header length + * @pmtu: the path MTU + * + * This is called from qib_rc_rcv() to process an incoming RC response + * packet for the given QP. + * Called at interrupt level. + */ +static void qib_rc_rcv_resp(struct qib_ibport *ibp, + struct qib_other_headers *ohdr, + void *data, u32 tlen, + struct qib_qp *qp, + u32 opcode, + u32 psn, u32 hdrsize, u32 pmtu, + struct qib_ctxtdata *rcd) +{ + struct qib_swqe *wqe; + enum ib_wc_status status; + unsigned long flags; + int diff; + u32 pad; + u32 aeth; + u64 val; + + spin_lock_irqsave(&qp->s_lock, flags); + + /* Double check we can process this now that we hold the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto ack_done; + + /* Ignore invalid responses. */ + if (qib_cmp24(psn, qp->s_next_psn) >= 0) + goto ack_done; + + /* Ignore duplicate responses. */ + diff = qib_cmp24(psn, qp->s_last_psn); + if (unlikely(diff <= 0)) { + /* Update credits for "ghost" ACKs */ + if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { + aeth = be32_to_cpu(ohdr->u.aeth); + if ((aeth >> 29) == 0) + qib_get_credit(qp, aeth); + } + goto ack_done; + } + + /* + * Skip everything other than the PSN we expect, if we are waiting + * for a reply to a restarted RDMA read or atomic op. + */ + if (qp->r_flags & QIB_R_RDMAR_SEQ) { + if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) + goto ack_done; + qp->r_flags &= ~QIB_R_RDMAR_SEQ; + } + + if (unlikely(qp->s_acked == qp->s_tail)) + goto ack_done; + wqe = get_swqe_ptr(qp, qp->s_acked); + status = IB_WC_SUCCESS; + + switch (opcode) { + case OP(ACKNOWLEDGE): + case OP(ATOMIC_ACKNOWLEDGE): + case OP(RDMA_READ_RESPONSE_FIRST): + aeth = be32_to_cpu(ohdr->u.aeth); + if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { + __be32 *p = ohdr->u.at.atomic_ack_eth; + + val = ((u64) be32_to_cpu(p[0]) << 32) | + be32_to_cpu(p[1]); + } else + val = 0; + if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || + opcode != OP(RDMA_READ_RESPONSE_FIRST)) + goto ack_done; + hdrsize += 4; + wqe = get_swqe_ptr(qp, qp->s_acked); + if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) + goto ack_op_err; + /* + * If this is a response to a resent RDMA read, we + * have to be careful to copy the data to the right + * location. + */ + qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, + wqe, psn, pmtu); + goto read_middle; + + case OP(RDMA_READ_RESPONSE_MIDDLE): + /* no AETH, no ACK */ + if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) + goto ack_seq_err; + if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) + goto ack_op_err; +read_middle: + if (unlikely(tlen != (hdrsize + pmtu + 4))) + goto ack_len_err; + if (unlikely(pmtu >= qp->s_rdma_read_len)) + goto ack_len_err; + + /* + * We got a response so update the timeout. + * 4.096 usec. * (1 << qp->timeout) + */ + qp->s_flags |= QIB_S_TIMER; + mod_timer(&qp->s_timer, jiffies + + usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / + 1000UL)); + if (qp->s_flags & QIB_S_WAIT_ACK) { + qp->s_flags &= ~QIB_S_WAIT_ACK; + qib_schedule_send(qp); + } + + if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) + qp->s_retry = qp->s_retry_cnt; + + /* + * Update the RDMA receive state but do the copy w/o + * holding the locks and blocking interrupts. + */ + qp->s_rdma_read_len -= pmtu; + update_last_psn(qp, psn); + spin_unlock_irqrestore(&qp->s_lock, flags); + qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); + goto bail; + + case OP(RDMA_READ_RESPONSE_ONLY): + aeth = be32_to_cpu(ohdr->u.aeth); + if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) + goto ack_done; + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* + * Check that the data size is >= 0 && <= pmtu. + * Remember to account for the AETH header (4) and + * ICRC (4). + */ + if (unlikely(tlen < (hdrsize + pad + 8))) + goto ack_len_err; + /* + * If this is a response to a resent RDMA read, we + * have to be careful to copy the data to the right + * location. + */ + wqe = get_swqe_ptr(qp, qp->s_acked); + qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, + wqe, psn, pmtu); + goto read_last; + + case OP(RDMA_READ_RESPONSE_LAST): + /* ACKs READ req. */ + if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) + goto ack_seq_err; + if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) + goto ack_op_err; + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* + * Check that the data size is >= 1 && <= pmtu. + * Remember to account for the AETH header (4) and + * ICRC (4). + */ + if (unlikely(tlen <= (hdrsize + pad + 8))) + goto ack_len_err; +read_last: + tlen -= hdrsize + pad + 8; + if (unlikely(tlen != qp->s_rdma_read_len)) + goto ack_len_err; + aeth = be32_to_cpu(ohdr->u.aeth); + qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); + WARN_ON(qp->s_rdma_read_sge.num_sge); + (void) do_rc_ack(qp, aeth, psn, + OP(RDMA_READ_RESPONSE_LAST), 0, rcd); + goto ack_done; + } + +ack_op_err: + status = IB_WC_LOC_QP_OP_ERR; + goto ack_err; + +ack_seq_err: + rdma_seq_err(qp, ibp, psn, rcd); + goto ack_done; + +ack_len_err: + status = IB_WC_LOC_LEN_ERR; +ack_err: + if (qp->s_last == qp->s_acked) { + qib_send_complete(qp, wqe, status); + qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + } +ack_done: + spin_unlock_irqrestore(&qp->s_lock, flags); +bail: + return; +} + +/** + * qib_rc_rcv_error - process an incoming duplicate or error RC packet + * @ohdr: the other headers for this packet + * @data: the packet data + * @qp: the QP for this packet + * @opcode: the opcode for this packet + * @psn: the packet sequence number for this packet + * @diff: the difference between the PSN and the expected PSN + * + * This is called from qib_rc_rcv() to process an unexpected + * incoming RC packet for the given QP. + * Called at interrupt level. + * Return 1 if no more processing is needed; otherwise return 0 to + * schedule a response to be sent. + */ +static int qib_rc_rcv_error(struct qib_other_headers *ohdr, + void *data, + struct qib_qp *qp, + u32 opcode, + u32 psn, + int diff, + struct qib_ctxtdata *rcd) +{ + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct qib_ack_entry *e; + unsigned long flags; + u8 i, prev; + int old_req; + + if (diff > 0) { + /* + * Packet sequence error. + * A NAK will ACK earlier sends and RDMA writes. + * Don't queue the NAK if we already sent one. + */ + if (!qp->r_nak_state) { + ibp->n_rc_seqnak++; + qp->r_nak_state = IB_NAK_PSN_ERROR; + /* Use the expected PSN. */ + qp->r_ack_psn = qp->r_psn; + /* + * Wait to send the sequence NAK until all packets + * in the receive queue have been processed. + * Otherwise, we end up propagating congestion. + */ + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_NAK; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, &rcd->qp_wait_list); + } + } + goto done; + } + + /* + * Handle a duplicate request. Don't re-execute SEND, RDMA + * write or atomic op. Don't NAK errors, just silently drop + * the duplicate request. Note that r_sge, r_len, and + * r_rcv_len may be in use so don't modify them. + * + * We are supposed to ACK the earliest duplicate PSN but we + * can coalesce an outstanding duplicate ACK. We have to + * send the earliest so that RDMA reads can be restarted at + * the requester's expected PSN. + * + * First, find where this duplicate PSN falls within the + * ACKs previously sent. + * old_req is true if there is an older response that is scheduled + * to be sent before sending this one. + */ + e = NULL; + old_req = 1; + ibp->n_rc_dupreq++; + + spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this now that we hold the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto unlock_done; + + for (i = qp->r_head_ack_queue; ; i = prev) { + if (i == qp->s_tail_ack_queue) + old_req = 0; + if (i) + prev = i - 1; + else + prev = QIB_MAX_RDMA_ATOMIC; + if (prev == qp->r_head_ack_queue) { + e = NULL; + break; + } + e = &qp->s_ack_queue[prev]; + if (!e->opcode) { + e = NULL; + break; + } + if (qib_cmp24(psn, e->psn) >= 0) { + if (prev == qp->s_tail_ack_queue && + qib_cmp24(psn, e->lpsn) <= 0) + old_req = 0; + break; + } + } + switch (opcode) { + case OP(RDMA_READ_REQUEST): { + struct ib_reth *reth; + u32 offset; + u32 len; + + /* + * If we didn't find the RDMA read request in the ack queue, + * we can ignore this request. + */ + if (!e || e->opcode != OP(RDMA_READ_REQUEST)) + goto unlock_done; + /* RETH comes after BTH */ + reth = &ohdr->u.rc.reth; + /* + * Address range must be a subset of the original + * request and start on pmtu boundaries. + * We reuse the old ack_queue slot since the requester + * should not back up and request an earlier PSN for the + * same request. + */ + offset = ((psn - e->psn) & QIB_PSN_MASK) * + ib_mtu_enum_to_int(qp->path_mtu); + len = be32_to_cpu(reth->length); + if (unlikely(offset + len != e->rdma_sge.sge_length)) + goto unlock_done; + if (e->rdma_sge.mr) { + atomic_dec(&e->rdma_sge.mr->refcount); + e->rdma_sge.mr = NULL; + } + if (len != 0) { + u32 rkey = be32_to_cpu(reth->rkey); + u64 vaddr = be64_to_cpu(reth->vaddr); + int ok; + + ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, + IB_ACCESS_REMOTE_READ); + if (unlikely(!ok)) + goto unlock_done; + } else { + e->rdma_sge.vaddr = NULL; + e->rdma_sge.length = 0; + e->rdma_sge.sge_length = 0; + } + e->psn = psn; + if (old_req) + goto unlock_done; + qp->s_tail_ack_queue = prev; + break; + } + + case OP(COMPARE_SWAP): + case OP(FETCH_ADD): { + /* + * If we didn't find the atomic request in the ack queue + * or the send tasklet is already backed up to send an + * earlier entry, we can ignore this request. + */ + if (!e || e->opcode != (u8) opcode || old_req) + goto unlock_done; + qp->s_tail_ack_queue = prev; + break; + } + + default: + /* + * Ignore this operation if it doesn't request an ACK + * or an earlier RDMA read or atomic is going to be resent. + */ + if (!(psn & IB_BTH_REQ_ACK) || old_req) + goto unlock_done; + /* + * Resend the most recent ACK if this request is + * after all the previous RDMA reads and atomics. + */ + if (i == qp->r_head_ack_queue) { + spin_unlock_irqrestore(&qp->s_lock, flags); + qp->r_nak_state = 0; + qp->r_ack_psn = qp->r_psn - 1; + goto send_ack; + } + /* + * Try to send a simple ACK to work around a Mellanox bug + * which doesn't accept a RDMA read response or atomic + * response as an ACK for earlier SENDs or RDMA writes. + */ + if (!(qp->s_flags & QIB_S_RESP_PENDING)) { + spin_unlock_irqrestore(&qp->s_lock, flags); + qp->r_nak_state = 0; + qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; + goto send_ack; + } + /* + * Resend the RDMA read or atomic op which + * ACKs this duplicate request. + */ + qp->s_tail_ack_queue = i; + break; + } + qp->s_ack_state = OP(ACKNOWLEDGE); + qp->s_flags |= QIB_S_RESP_PENDING; + qp->r_nak_state = 0; + qib_schedule_send(qp); + +unlock_done: + spin_unlock_irqrestore(&qp->s_lock, flags); +done: + return 1; + +send_ack: + return 0; +} + +void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) +{ + unsigned long flags; + int lastwqe; + + spin_lock_irqsave(&qp->s_lock, flags); + lastwqe = qib_error_qp(qp, err); + spin_unlock_irqrestore(&qp->s_lock, flags); + + if (lastwqe) { + struct ib_event ev; + + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_QP_LAST_WQE_REACHED; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } +} + +static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) +{ + unsigned next; + + next = n + 1; + if (next > QIB_MAX_RDMA_ATOMIC) + next = 0; + qp->s_tail_ack_queue = next; + qp->s_ack_state = OP(ACKNOWLEDGE); +} + +/** + * qib_rc_rcv - process an incoming RC packet + * @rcd: the context pointer + * @hdr: the header of this packet + * @has_grh: true if the header has a GRH + * @data: the packet data + * @tlen: the packet length + * @qp: the QP for this packet + * + * This is called from qib_qp_rcv() to process an incoming RC packet + * for the given QP. + * Called at interrupt level. + */ +void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp) +{ + struct qib_ibport *ibp = &rcd->ppd->ibport_data; + struct qib_other_headers *ohdr; + u32 opcode; + u32 hdrsize; + u32 psn; + u32 pad; + struct ib_wc wc; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); + int diff; + struct ib_reth *reth; + unsigned long flags; + int ret; + + /* Check for GRH */ + if (!has_grh) { + ohdr = &hdr->u.oth; + hdrsize = 8 + 12; /* LRH + BTH */ + } else { + ohdr = &hdr->u.l.oth; + hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ + } + + opcode = be32_to_cpu(ohdr->bth[0]); + spin_lock_irqsave(&qp->s_lock, flags); + if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) + goto sunlock; + spin_unlock_irqrestore(&qp->s_lock, flags); + + psn = be32_to_cpu(ohdr->bth[2]); + opcode >>= 24; + + /* Prevent simultaneous processing after APM on different CPUs */ + spin_lock(&qp->r_lock); + + /* + * Process responses (ACKs) before anything else. Note that the + * packet sequence number will be for something in the send work + * queue rather than the expected receive packet sequence number. + * In other words, this QP is the requester. + */ + if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && + opcode <= OP(ATOMIC_ACKNOWLEDGE)) { + qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, + hdrsize, pmtu, rcd); + goto runlock; + } + + /* Compute 24 bits worth of difference. */ + diff = qib_cmp24(psn, qp->r_psn); + if (unlikely(diff)) { + if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) + goto runlock; + goto send_ack; + } + + /* Check for opcode sequence errors. */ + switch (qp->r_state) { + case OP(SEND_FIRST): + case OP(SEND_MIDDLE): + if (opcode == OP(SEND_MIDDLE) || + opcode == OP(SEND_LAST) || + opcode == OP(SEND_LAST_WITH_IMMEDIATE)) + break; + goto nack_inv; + + case OP(RDMA_WRITE_FIRST): + case OP(RDMA_WRITE_MIDDLE): + if (opcode == OP(RDMA_WRITE_MIDDLE) || + opcode == OP(RDMA_WRITE_LAST) || + opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) + break; + goto nack_inv; + + default: + if (opcode == OP(SEND_MIDDLE) || + opcode == OP(SEND_LAST) || + opcode == OP(SEND_LAST_WITH_IMMEDIATE) || + opcode == OP(RDMA_WRITE_MIDDLE) || + opcode == OP(RDMA_WRITE_LAST) || + opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) + goto nack_inv; + /* + * Note that it is up to the requester to not send a new + * RDMA read or atomic operation before receiving an ACK + * for the previous operation. + */ + break; + } + + memset(&wc, 0, sizeof wc); + + if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { + qp->r_flags |= QIB_R_COMM_EST; + if (qp->ibqp.event_handler) { + struct ib_event ev; + + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_COMM_EST; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } + } + + /* OK, process the packet. */ + switch (opcode) { + case OP(SEND_FIRST): + ret = qib_get_rwqe(qp, 0); + if (ret < 0) + goto nack_op_err; + if (!ret) + goto rnr_nak; + qp->r_rcv_len = 0; + /* FALLTHROUGH */ + case OP(SEND_MIDDLE): + case OP(RDMA_WRITE_MIDDLE): +send_middle: + /* Check for invalid length PMTU or posted rwqe len. */ + if (unlikely(tlen != (hdrsize + pmtu + 4))) + goto nack_inv; + qp->r_rcv_len += pmtu; + if (unlikely(qp->r_rcv_len > qp->r_len)) + goto nack_inv; + qib_copy_sge(&qp->r_sge, data, pmtu, 1); + break; + + case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): + /* consume RWQE */ + ret = qib_get_rwqe(qp, 1); + if (ret < 0) + goto nack_op_err; + if (!ret) + goto rnr_nak; + goto send_last_imm; + + case OP(SEND_ONLY): + case OP(SEND_ONLY_WITH_IMMEDIATE): + ret = qib_get_rwqe(qp, 0); + if (ret < 0) + goto nack_op_err; + if (!ret) + goto rnr_nak; + qp->r_rcv_len = 0; + if (opcode == OP(SEND_ONLY)) + goto send_last; + /* FALLTHROUGH */ + case OP(SEND_LAST_WITH_IMMEDIATE): +send_last_imm: + wc.ex.imm_data = ohdr->u.imm_data; + hdrsize += 4; + wc.wc_flags = IB_WC_WITH_IMM; + /* FALLTHROUGH */ + case OP(SEND_LAST): + case OP(RDMA_WRITE_LAST): +send_last: + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* Check for invalid length. */ + /* XXX LAST len should be >= 1 */ + if (unlikely(tlen < (hdrsize + pad + 4))) + goto nack_inv; + /* Don't count the CRC. */ + tlen -= (hdrsize + pad + 4); + wc.byte_len = tlen + qp->r_rcv_len; + if (unlikely(wc.byte_len > qp->r_len)) + goto nack_inv; + qib_copy_sge(&qp->r_sge, data, tlen, 1); + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + qp->r_msn++; + if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + break; + wc.wr_id = qp->r_wr_id; + wc.status = IB_WC_SUCCESS; + if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || + opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) + wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; + else + wc.opcode = IB_WC_RECV; + wc.qp = &qp->ibqp; + wc.src_qp = qp->remote_qpn; + wc.slid = qp->remote_ah_attr.dlid; + wc.sl = qp->remote_ah_attr.sl; + /* Signal completion event if the solicited bit is set. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + (ohdr->bth[0] & + cpu_to_be32(IB_BTH_SOLICITED)) != 0); + break; + + case OP(RDMA_WRITE_FIRST): + case OP(RDMA_WRITE_ONLY): + case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) + goto nack_inv; + /* consume RWQE */ + reth = &ohdr->u.rc.reth; + hdrsize += sizeof(*reth); + qp->r_len = be32_to_cpu(reth->length); + qp->r_rcv_len = 0; + qp->r_sge.sg_list = NULL; + if (qp->r_len != 0) { + u32 rkey = be32_to_cpu(reth->rkey); + u64 vaddr = be64_to_cpu(reth->vaddr); + int ok; + + /* Check rkey & NAK */ + ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, + rkey, IB_ACCESS_REMOTE_WRITE); + if (unlikely(!ok)) + goto nack_acc; + qp->r_sge.num_sge = 1; + } else { + qp->r_sge.num_sge = 0; + qp->r_sge.sge.mr = NULL; + qp->r_sge.sge.vaddr = NULL; + qp->r_sge.sge.length = 0; + qp->r_sge.sge.sge_length = 0; + } + if (opcode == OP(RDMA_WRITE_FIRST)) + goto send_middle; + else if (opcode == OP(RDMA_WRITE_ONLY)) + goto send_last; + ret = qib_get_rwqe(qp, 1); + if (ret < 0) + goto nack_op_err; + if (!ret) + goto rnr_nak; + goto send_last_imm; + + case OP(RDMA_READ_REQUEST): { + struct qib_ack_entry *e; + u32 len; + u8 next; + + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) + goto nack_inv; + next = qp->r_head_ack_queue + 1; + /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */ + if (next > QIB_MAX_RDMA_ATOMIC) + next = 0; + spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this while holding the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto srunlock; + if (unlikely(next == qp->s_tail_ack_queue)) { + if (!qp->s_ack_queue[next].sent) + goto nack_inv_unlck; + qib_update_ack_queue(qp, next); + } + e = &qp->s_ack_queue[qp->r_head_ack_queue]; + if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + atomic_dec(&e->rdma_sge.mr->refcount); + e->rdma_sge.mr = NULL; + } + reth = &ohdr->u.rc.reth; + len = be32_to_cpu(reth->length); + if (len) { + u32 rkey = be32_to_cpu(reth->rkey); + u64 vaddr = be64_to_cpu(reth->vaddr); + int ok; + + /* Check rkey & NAK */ + ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, + rkey, IB_ACCESS_REMOTE_READ); + if (unlikely(!ok)) + goto nack_acc_unlck; + /* + * Update the next expected PSN. We add 1 later + * below, so only add the remainder here. + */ + if (len > pmtu) + qp->r_psn += (len - 1) / pmtu; + } else { + e->rdma_sge.mr = NULL; + e->rdma_sge.vaddr = NULL; + e->rdma_sge.length = 0; + e->rdma_sge.sge_length = 0; + } + e->opcode = opcode; + e->sent = 0; + e->psn = psn; + e->lpsn = qp->r_psn; + /* + * We need to increment the MSN here instead of when we + * finish sending the result since a duplicate request would + * increment it more than once. + */ + qp->r_msn++; + qp->r_psn++; + qp->r_state = opcode; + qp->r_nak_state = 0; + qp->r_head_ack_queue = next; + + /* Schedule the send tasklet. */ + qp->s_flags |= QIB_S_RESP_PENDING; + qib_schedule_send(qp); + + goto srunlock; + } + + case OP(COMPARE_SWAP): + case OP(FETCH_ADD): { + struct ib_atomic_eth *ateth; + struct qib_ack_entry *e; + u64 vaddr; + atomic64_t *maddr; + u64 sdata; + u32 rkey; + u8 next; + + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) + goto nack_inv; + next = qp->r_head_ack_queue + 1; + if (next > QIB_MAX_RDMA_ATOMIC) + next = 0; + spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this while holding the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto srunlock; + if (unlikely(next == qp->s_tail_ack_queue)) { + if (!qp->s_ack_queue[next].sent) + goto nack_inv_unlck; + qib_update_ack_queue(qp, next); + } + e = &qp->s_ack_queue[qp->r_head_ack_queue]; + if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + atomic_dec(&e->rdma_sge.mr->refcount); + e->rdma_sge.mr = NULL; + } + ateth = &ohdr->u.atomic_eth; + vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | + be32_to_cpu(ateth->vaddr[1]); + if (unlikely(vaddr & (sizeof(u64) - 1))) + goto nack_inv_unlck; + rkey = be32_to_cpu(ateth->rkey); + /* Check rkey & NAK */ + if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + vaddr, rkey, + IB_ACCESS_REMOTE_ATOMIC))) + goto nack_acc_unlck; + /* Perform atomic OP and save result. */ + maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + sdata = be64_to_cpu(ateth->swap_data); + e->atomic_data = (opcode == OP(FETCH_ADD)) ? + (u64) atomic64_add_return(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + be64_to_cpu(ateth->compare_data), + sdata); + atomic_dec(&qp->r_sge.sge.mr->refcount); + qp->r_sge.num_sge = 0; + e->opcode = opcode; + e->sent = 0; + e->psn = psn; + e->lpsn = psn; + qp->r_msn++; + qp->r_psn++; + qp->r_state = opcode; + qp->r_nak_state = 0; + qp->r_head_ack_queue = next; + + /* Schedule the send tasklet. */ + qp->s_flags |= QIB_S_RESP_PENDING; + qib_schedule_send(qp); + + goto srunlock; + } + + default: + /* NAK unknown opcodes. */ + goto nack_inv; + } + qp->r_psn++; + qp->r_state = opcode; + qp->r_ack_psn = psn; + qp->r_nak_state = 0; + /* Send an ACK if requested or required. */ + if (psn & (1 << 31)) + goto send_ack; + goto runlock; + +rnr_nak: + qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; + qp->r_ack_psn = qp->r_psn; + /* Queue RNR NAK for later */ + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_NAK; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, &rcd->qp_wait_list); + } + goto runlock; + +nack_op_err: + qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; + qp->r_ack_psn = qp->r_psn; + /* Queue NAK for later */ + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_NAK; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, &rcd->qp_wait_list); + } + goto runlock; + +nack_inv_unlck: + spin_unlock_irqrestore(&qp->s_lock, flags); +nack_inv: + qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + qp->r_nak_state = IB_NAK_INVALID_REQUEST; + qp->r_ack_psn = qp->r_psn; + /* Queue NAK for later */ + if (list_empty(&qp->rspwait)) { + qp->r_flags |= QIB_R_RSP_NAK; + atomic_inc(&qp->refcount); + list_add_tail(&qp->rspwait, &rcd->qp_wait_list); + } + goto runlock; + +nack_acc_unlck: + spin_unlock_irqrestore(&qp->s_lock, flags); +nack_acc: + qib_rc_error(qp, IB_WC_LOC_PROT_ERR); + qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; + qp->r_ack_psn = qp->r_psn; +send_ack: + qib_send_rc_ack(qp); +runlock: + spin_unlock(&qp->r_lock); + return; + +srunlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + spin_unlock(&qp->r_lock); + return; + +sunlock: + spin_unlock_irqrestore(&qp->s_lock, flags); +} diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c new file mode 100644 index 000000000000..eb78d9367f06 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -0,0 +1,817 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/spinlock.h> + +#include "qib.h" +#include "qib_mad.h" + +/* + * Convert the AETH RNR timeout code into the number of microseconds. + */ +const u32 ib_qib_rnr_table[32] = { + 655360, /* 00: 655.36 */ + 10, /* 01: .01 */ + 20, /* 02 .02 */ + 30, /* 03: .03 */ + 40, /* 04: .04 */ + 60, /* 05: .06 */ + 80, /* 06: .08 */ + 120, /* 07: .12 */ + 160, /* 08: .16 */ + 240, /* 09: .24 */ + 320, /* 0A: .32 */ + 480, /* 0B: .48 */ + 640, /* 0C: .64 */ + 960, /* 0D: .96 */ + 1280, /* 0E: 1.28 */ + 1920, /* 0F: 1.92 */ + 2560, /* 10: 2.56 */ + 3840, /* 11: 3.84 */ + 5120, /* 12: 5.12 */ + 7680, /* 13: 7.68 */ + 10240, /* 14: 10.24 */ + 15360, /* 15: 15.36 */ + 20480, /* 16: 20.48 */ + 30720, /* 17: 30.72 */ + 40960, /* 18: 40.96 */ + 61440, /* 19: 61.44 */ + 81920, /* 1A: 81.92 */ + 122880, /* 1B: 122.88 */ + 163840, /* 1C: 163.84 */ + 245760, /* 1D: 245.76 */ + 327680, /* 1E: 327.68 */ + 491520 /* 1F: 491.52 */ +}; + +/* + * Validate a RWQE and fill in the SGE state. + * Return 1 if OK. + */ +static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) +{ + int i, j, ret; + struct ib_wc wc; + struct qib_lkey_table *rkt; + struct qib_pd *pd; + struct qib_sge_state *ss; + + rkt = &to_idev(qp->ibqp.device)->lk_table; + pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); + ss = &qp->r_sge; + ss->sg_list = qp->r_sg_list; + qp->r_len = 0; + for (i = j = 0; i < wqe->num_sge; i++) { + if (wqe->sg_list[i].length == 0) + continue; + /* Check LKEY */ + if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, + &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) + goto bad_lkey; + qp->r_len += wqe->sg_list[i].length; + j++; + } + ss->num_sge = j; + ss->total_len = qp->r_len; + ret = 1; + goto bail; + +bad_lkey: + while (j) { + struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; + + atomic_dec(&sge->mr->refcount); + } + ss->num_sge = 0; + memset(&wc, 0, sizeof(wc)); + wc.wr_id = wqe->wr_id; + wc.status = IB_WC_LOC_PROT_ERR; + wc.opcode = IB_WC_RECV; + wc.qp = &qp->ibqp; + /* Signal solicited completion event. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + ret = 0; +bail: + return ret; +} + +/** + * qib_get_rwqe - copy the next RWQE into the QP's RWQE + * @qp: the QP + * @wr_id_only: update qp->r_wr_id only, not qp->r_sge + * + * Return -1 if there is a local error, 0 if no RWQE is available, + * otherwise return 1. + * + * Can be called from interrupt level. + */ +int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) +{ + unsigned long flags; + struct qib_rq *rq; + struct qib_rwq *wq; + struct qib_srq *srq; + struct qib_rwqe *wqe; + void (*handler)(struct ib_event *, void *); + u32 tail; + int ret; + + if (qp->ibqp.srq) { + srq = to_isrq(qp->ibqp.srq); + handler = srq->ibsrq.event_handler; + rq = &srq->rq; + } else { + srq = NULL; + handler = NULL; + rq = &qp->r_rq; + } + + spin_lock_irqsave(&rq->lock, flags); + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { + ret = 0; + goto unlock; + } + + wq = rq->wq; + tail = wq->tail; + /* Validate tail before using it since it is user writable. */ + if (tail >= rq->size) + tail = 0; + if (unlikely(tail == wq->head)) { + ret = 0; + goto unlock; + } + /* Make sure entry is read after head index is read. */ + smp_rmb(); + wqe = get_rwqe_ptr(rq, tail); + /* + * Even though we update the tail index in memory, the verbs + * consumer is not supposed to post more entries until a + * completion is generated. + */ + if (++tail >= rq->size) + tail = 0; + wq->tail = tail; + if (!wr_id_only && !qib_init_sge(qp, wqe)) { + ret = -1; + goto unlock; + } + qp->r_wr_id = wqe->wr_id; + + ret = 1; + set_bit(QIB_R_WRID_VALID, &qp->r_aflags); + if (handler) { + u32 n; + + /* + * Validate head pointer value and compute + * the number of remaining WQEs. + */ + n = wq->head; + if (n >= rq->size) + n = 0; + if (n < tail) + n += rq->size - tail; + else + n -= tail; + if (n < srq->limit) { + struct ib_event ev; + + srq->limit = 0; + spin_unlock_irqrestore(&rq->lock, flags); + ev.device = qp->ibqp.device; + ev.element.srq = qp->ibqp.srq; + ev.event = IB_EVENT_SRQ_LIMIT_REACHED; + handler(&ev, srq->ibsrq.srq_context); + goto bail; + } + } +unlock: + spin_unlock_irqrestore(&rq->lock, flags); +bail: + return ret; +} + +/* + * Switch to alternate path. + * The QP s_lock should be held and interrupts disabled. + */ +void qib_migrate_qp(struct qib_qp *qp) +{ + struct ib_event ev; + + qp->s_mig_state = IB_MIG_MIGRATED; + qp->remote_ah_attr = qp->alt_ah_attr; + qp->port_num = qp->alt_ah_attr.port_num; + qp->s_pkey_index = qp->s_alt_pkey_index; + + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_PATH_MIG; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); +} + +static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) +{ + if (!index) { + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + + return ppd->guid; + } else + return ibp->guids[index - 1]; +} + +static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) +{ + return (gid->global.interface_id == id && + (gid->global.subnet_prefix == gid_prefix || + gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); +} + +/* + * + * This should be called with the QP s_lock held. + */ +int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, struct qib_qp *qp, u32 bth0) +{ + __be64 guid; + + if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { + if (!has_grh) { + if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) + goto err; + } else { + if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) + goto err; + guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); + if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + goto err; + if (!gid_ok(&hdr->u.l.grh.sgid, + qp->alt_ah_attr.grh.dgid.global.subnet_prefix, + qp->alt_ah_attr.grh.dgid.global.interface_id)) + goto err; + } + if (!qib_pkey_ok((u16)bth0, + qib_get_pkey(ibp, qp->s_alt_pkey_index))) { + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, + (u16)bth0, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, + 0, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); + goto err; + } + /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ + if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || + ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) + goto err; + qib_migrate_qp(qp); + } else { + if (!has_grh) { + if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) + goto err; + } else { + if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) + goto err; + guid = get_sguid(ibp, + qp->remote_ah_attr.grh.sgid_index); + if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + goto err; + if (!gid_ok(&hdr->u.l.grh.sgid, + qp->remote_ah_attr.grh.dgid.global.subnet_prefix, + qp->remote_ah_attr.grh.dgid.global.interface_id)) + goto err; + } + if (!qib_pkey_ok((u16)bth0, + qib_get_pkey(ibp, qp->s_pkey_index))) { + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, + (u16)bth0, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, + 0, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); + goto err; + } + /* Validate the SLID. See Ch. 9.6.1.5 */ + if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || + ppd_from_ibp(ibp)->port != qp->port_num) + goto err; + if (qp->s_mig_state == IB_MIG_REARM && + !(bth0 & IB_BTH_MIG_REQ)) + qp->s_mig_state = IB_MIG_ARMED; + } + + return 0; + +err: + return 1; +} + +/** + * qib_ruc_loopback - handle UC and RC lookback requests + * @sqp: the sending QP + * + * This is called from qib_do_send() to + * forward a WQE addressed to the same HCA. + * Note that although we are single threaded due to the tasklet, we still + * have to protect against post_send(). We don't have to worry about + * receive interrupts since this is a connected protocol and all packets + * will pass through here. + */ +static void qib_ruc_loopback(struct qib_qp *sqp) +{ + struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); + struct qib_qp *qp; + struct qib_swqe *wqe; + struct qib_sge *sge; + unsigned long flags; + struct ib_wc wc; + u64 sdata; + atomic64_t *maddr; + enum ib_wc_status send_status; + int release; + int ret; + + /* + * Note that we check the responder QP state after + * checking the requester's state. + */ + qp = qib_lookup_qpn(ibp, sqp->remote_qpn); + + spin_lock_irqsave(&sqp->s_lock, flags); + + /* Return if we are already busy processing a work request. */ + if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || + !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + goto unlock; + + sqp->s_flags |= QIB_S_BUSY; + +again: + if (sqp->s_last == sqp->s_head) + goto clr_busy; + wqe = get_swqe_ptr(sqp, sqp->s_last); + + /* Return if it is not OK to start a new work reqeust. */ + if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { + if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) + goto clr_busy; + /* We are in the error state, flush the work request. */ + send_status = IB_WC_WR_FLUSH_ERR; + goto flush_send; + } + + /* + * We can rely on the entry not changing without the s_lock + * being held until we update s_last. + * We increment s_cur to indicate s_last is in progress. + */ + if (sqp->s_last == sqp->s_cur) { + if (++sqp->s_cur >= sqp->s_size) + sqp->s_cur = 0; + } + spin_unlock_irqrestore(&sqp->s_lock, flags); + + if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || + qp->ibqp.qp_type != sqp->ibqp.qp_type) { + ibp->n_pkt_drops++; + /* + * For RC, the requester would timeout and retry so + * shortcut the timeouts and just signal too many retries. + */ + if (sqp->ibqp.qp_type == IB_QPT_RC) + send_status = IB_WC_RETRY_EXC_ERR; + else + send_status = IB_WC_SUCCESS; + goto serr; + } + + memset(&wc, 0, sizeof wc); + send_status = IB_WC_SUCCESS; + + release = 1; + sqp->s_sge.sge = wqe->sg_list[0]; + sqp->s_sge.sg_list = wqe->sg_list + 1; + sqp->s_sge.num_sge = wqe->wr.num_sge; + sqp->s_len = wqe->length; + switch (wqe->wr.opcode) { + case IB_WR_SEND_WITH_IMM: + wc.wc_flags = IB_WC_WITH_IMM; + wc.ex.imm_data = wqe->wr.ex.imm_data; + /* FALLTHROUGH */ + case IB_WR_SEND: + ret = qib_get_rwqe(qp, 0); + if (ret < 0) + goto op_err; + if (!ret) + goto rnr_nak; + break; + + case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) + goto inv_err; + wc.wc_flags = IB_WC_WITH_IMM; + wc.ex.imm_data = wqe->wr.ex.imm_data; + ret = qib_get_rwqe(qp, 1); + if (ret < 0) + goto op_err; + if (!ret) + goto rnr_nak; + /* FALLTHROUGH */ + case IB_WR_RDMA_WRITE: + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) + goto inv_err; + if (wqe->length == 0) + break; + if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, + wqe->wr.wr.rdma.remote_addr, + wqe->wr.wr.rdma.rkey, + IB_ACCESS_REMOTE_WRITE))) + goto acc_err; + qp->r_sge.sg_list = NULL; + qp->r_sge.num_sge = 1; + qp->r_sge.total_len = wqe->length; + break; + + case IB_WR_RDMA_READ: + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) + goto inv_err; + if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, + wqe->wr.wr.rdma.remote_addr, + wqe->wr.wr.rdma.rkey, + IB_ACCESS_REMOTE_READ))) + goto acc_err; + release = 0; + sqp->s_sge.sg_list = NULL; + sqp->s_sge.num_sge = 1; + qp->r_sge.sge = wqe->sg_list[0]; + qp->r_sge.sg_list = wqe->sg_list + 1; + qp->r_sge.num_sge = wqe->wr.num_sge; + qp->r_sge.total_len = wqe->length; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) + goto inv_err; + if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + wqe->wr.wr.atomic.remote_addr, + wqe->wr.wr.atomic.rkey, + IB_ACCESS_REMOTE_ATOMIC))) + goto acc_err; + /* Perform atomic OP and save result. */ + maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + sdata = wqe->wr.wr.atomic.compare_add; + *(u64 *) sqp->s_sge.sge.vaddr = + (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? + (u64) atomic64_add_return(sdata, maddr) - sdata : + (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + sdata, wqe->wr.wr.atomic.swap); + atomic_dec(&qp->r_sge.sge.mr->refcount); + qp->r_sge.num_sge = 0; + goto send_comp; + + default: + send_status = IB_WC_LOC_QP_OP_ERR; + goto serr; + } + + sge = &sqp->s_sge.sge; + while (sqp->s_len) { + u32 len = sqp->s_len; + + if (len > sge->length) + len = sge->length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (!release) + atomic_dec(&sge->mr->refcount); + if (--sqp->s_sge.num_sge) + *sge = *sqp->s_sge.sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + sqp->s_len -= len; + } + if (release) + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + + if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + goto send_comp; + + if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) + wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; + else + wc.opcode = IB_WC_RECV; + wc.wr_id = qp->r_wr_id; + wc.status = IB_WC_SUCCESS; + wc.byte_len = wqe->length; + wc.qp = &qp->ibqp; + wc.src_qp = qp->remote_qpn; + wc.slid = qp->remote_ah_attr.dlid; + wc.sl = qp->remote_ah_attr.sl; + wc.port_num = 1; + /* Signal completion event if the solicited bit is set. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + wqe->wr.send_flags & IB_SEND_SOLICITED); + +send_comp: + spin_lock_irqsave(&sqp->s_lock, flags); + ibp->n_loop_pkts++; +flush_send: + sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; + qib_send_complete(sqp, wqe, send_status); + goto again; + +rnr_nak: + /* Handle RNR NAK */ + if (qp->ibqp.qp_type == IB_QPT_UC) + goto send_comp; + ibp->n_rnr_naks++; + /* + * Note: we don't need the s_lock held since the BUSY flag + * makes this single threaded. + */ + if (sqp->s_rnr_retry == 0) { + send_status = IB_WC_RNR_RETRY_EXC_ERR; + goto serr; + } + if (sqp->s_rnr_retry_cnt < 7) + sqp->s_rnr_retry--; + spin_lock_irqsave(&sqp->s_lock, flags); + if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) + goto clr_busy; + sqp->s_flags |= QIB_S_WAIT_RNR; + sqp->s_timer.function = qib_rc_rnr_retry; + sqp->s_timer.expires = jiffies + + usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); + add_timer(&sqp->s_timer); + goto clr_busy; + +op_err: + send_status = IB_WC_REM_OP_ERR; + wc.status = IB_WC_LOC_QP_OP_ERR; + goto err; + +inv_err: + send_status = IB_WC_REM_INV_REQ_ERR; + wc.status = IB_WC_LOC_QP_OP_ERR; + goto err; + +acc_err: + send_status = IB_WC_REM_ACCESS_ERR; + wc.status = IB_WC_LOC_PROT_ERR; +err: + /* responder goes to error state */ + qib_rc_error(qp, wc.status); + +serr: + spin_lock_irqsave(&sqp->s_lock, flags); + qib_send_complete(sqp, wqe, send_status); + if (sqp->ibqp.qp_type == IB_QPT_RC) { + int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + + sqp->s_flags &= ~QIB_S_BUSY; + spin_unlock_irqrestore(&sqp->s_lock, flags); + if (lastwqe) { + struct ib_event ev; + + ev.device = sqp->ibqp.device; + ev.element.qp = &sqp->ibqp; + ev.event = IB_EVENT_QP_LAST_WQE_REACHED; + sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); + } + goto done; + } +clr_busy: + sqp->s_flags &= ~QIB_S_BUSY; +unlock: + spin_unlock_irqrestore(&sqp->s_lock, flags); +done: + if (qp && atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); +} + +/** + * qib_make_grh - construct a GRH header + * @ibp: a pointer to the IB port + * @hdr: a pointer to the GRH header being constructed + * @grh: the global route address to send to + * @hwords: the number of 32 bit words of header being sent + * @nwords: the number of 32 bit words of data being sent + * + * Return the size of the header in 32 bit words. + */ +u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, + struct ib_global_route *grh, u32 hwords, u32 nwords) +{ + hdr->version_tclass_flow = + cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | + (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | + (grh->flow_label << IB_GRH_FLOW_SHIFT)); + hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); + /* next_hdr is defined by C8-7 in ch. 8.4.1 */ + hdr->next_hdr = IB_GRH_NEXT_HDR; + hdr->hop_limit = grh->hop_limit; + /* The SGID is 32-bit aligned. */ + hdr->sgid.global.subnet_prefix = ibp->gid_prefix; + hdr->sgid.global.interface_id = grh->sgid_index ? + ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; + hdr->dgid = grh->dgid; + + /* GRH header size in 32-bit words. */ + return sizeof(struct ib_grh) / sizeof(u32); +} + +void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, + u32 bth0, u32 bth2) +{ + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + u16 lrh0; + u32 nwords; + u32 extra_bytes; + + /* Construct the header. */ + extra_bytes = -qp->s_cur_size & 3; + nwords = (qp->s_cur_size + extra_bytes) >> 2; + lrh0 = QIB_LRH_BTH; + if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { + qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, + &qp->remote_ah_attr.grh, + qp->s_hdrwords, nwords); + lrh0 = QIB_LRH_GRH; + } + lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | + qp->remote_ah_attr.sl << 4; + qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); + qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); + qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); + qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | + qp->remote_ah_attr.src_path_bits); + bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); + bth0 |= extra_bytes << 20; + if (qp->s_mig_state == IB_MIG_MIGRATED) + bth0 |= IB_BTH_MIG_REQ; + ohdr->bth[0] = cpu_to_be32(bth0); + ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); + ohdr->bth[2] = cpu_to_be32(bth2); +} + +/** + * qib_do_send - perform a send on a QP + * @work: contains a pointer to the QP + * + * Process entries in the send work queue until credit or queue is + * exhausted. Only allow one CPU to send a packet per QP (tasklet). + * Otherwise, two threads could send packets out of order. + */ +void qib_do_send(struct work_struct *work) +{ + struct qib_qp *qp = container_of(work, struct qib_qp, s_work); + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + int (*make_req)(struct qib_qp *qp); + unsigned long flags; + + if ((qp->ibqp.qp_type == IB_QPT_RC || + qp->ibqp.qp_type == IB_QPT_UC) && + (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { + qib_ruc_loopback(qp); + return; + } + + if (qp->ibqp.qp_type == IB_QPT_RC) + make_req = qib_make_rc_req; + else if (qp->ibqp.qp_type == IB_QPT_UC) + make_req = qib_make_uc_req; + else + make_req = qib_make_ud_req; + + spin_lock_irqsave(&qp->s_lock, flags); + + /* Return if we are already busy processing a work request. */ + if (!qib_send_ok(qp)) { + spin_unlock_irqrestore(&qp->s_lock, flags); + return; + } + + qp->s_flags |= QIB_S_BUSY; + + spin_unlock_irqrestore(&qp->s_lock, flags); + + do { + /* Check for a constructed packet to be sent. */ + if (qp->s_hdrwords != 0) { + /* + * If the packet cannot be sent now, return and + * the send tasklet will be woken up later. + */ + if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, + qp->s_cur_sge, qp->s_cur_size)) + break; + /* Record that s_hdr is empty. */ + qp->s_hdrwords = 0; + } + } while (make_req(qp)); +} + +/* + * This should be called with s_lock held. + */ +void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, + enum ib_wc_status status) +{ + u32 old_last, last; + unsigned i; + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + return; + + for (i = 0; i < wqe->wr.num_sge; i++) { + struct qib_sge *sge = &wqe->sg_list[i]; + + atomic_dec(&sge->mr->refcount); + } + if (qp->ibqp.qp_type == IB_QPT_UD || + qp->ibqp.qp_type == IB_QPT_SMI || + qp->ibqp.qp_type == IB_QPT_GSI) + atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); + + /* See ch. 11.2.4.1 and 10.7.3.1 */ + if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + (wqe->wr.send_flags & IB_SEND_SIGNALED) || + status != IB_WC_SUCCESS) { + struct ib_wc wc; + + memset(&wc, 0, sizeof wc); + wc.wr_id = wqe->wr.wr_id; + wc.status = status; + wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; + wc.qp = &qp->ibqp; + if (status == IB_WC_SUCCESS) + wc.byte_len = wqe->length; + qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, + status != IB_WC_SUCCESS); + } + + last = qp->s_last; + old_last = last; + if (++last >= qp->s_size) + last = 0; + qp->s_last = last; + if (qp->s_acked == old_last) + qp->s_acked = last; + if (qp->s_cur == old_last) + qp->s_cur = last; + if (qp->s_tail == old_last) + qp->s_tail = last; + if (qp->state == IB_QPS_SQD && last == qp->s_cur) + qp->s_draining = 0; +} diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 2a68d9f624dd..0aeed0e74cb6 100644 --- a/drivers/infiniband/hw/ipath/ipath_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -32,22 +32,40 @@ */ /* * This file contains all of the code that is specific to the SerDes - * on the InfiniPath 7220 chip. + * on the QLogic_IB 7220 chip. */ #include <linux/pci.h> #include <linux/delay.h> -#include "ipath_kernel.h" -#include "ipath_registers.h" -#include "ipath_7220.h" +#include "qib.h" +#include "qib_7220.h" + +/* + * Same as in qib_iba7220.c, but just the registers needed here. + * Could move whole set to qib_7220.h, but decided better to keep + * local. + */ +#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) +#define kr_hwerrclear KREG_IDX(HwErrClear) +#define kr_hwerrmask KREG_IDX(HwErrMask) +#define kr_hwerrstatus KREG_IDX(HwErrStatus) +#define kr_ibcstatus KREG_IDX(IBCStatus) +#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) +#define kr_scratch KREG_IDX(Scratch) +#define kr_xgxs_cfg KREG_IDX(XGXSCfg) +/* these are used only here, not in qib_iba7220.c */ +#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl) +#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg) +#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg) +#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl) +#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0) /* * The IBSerDesMappTable is a memory that holds values to be stored in - * various SerDes registers by IBC. It is not part of the normal kregs - * map and is used in exactly one place, hence the #define below. + * various SerDes registers by IBC. */ -#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t))) +#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable) /* * Below used for sdnum parameter, selecting one of the two sections @@ -71,42 +89,37 @@ #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) /* Forward declarations. */ -static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, - u32 data, u32 mask); -static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, +static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, + u32 data, u32 mask); +static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, int mask); -static int ipath_sd_trimdone_poll(struct ipath_devdata *dd); -static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, - const char *where); -static int ipath_sd_setvals(struct ipath_devdata *dd); -static int ipath_sd_early(struct ipath_devdata *dd); -static int ipath_sd_dactrim(struct ipath_devdata *dd); -/* Set the registers that IBC may muck with to their default "preset" values */ -int ipath_sd7220_presets(struct ipath_devdata *dd); -static int ipath_internal_presets(struct ipath_devdata *dd); +static int qib_sd_trimdone_poll(struct qib_devdata *dd); +static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where); +static int qib_sd_setvals(struct qib_devdata *dd); +static int qib_sd_early(struct qib_devdata *dd); +static int qib_sd_dactrim(struct qib_devdata *dd); +static int qib_internal_presets(struct qib_devdata *dd); /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ -static int ipath_sd_trimself(struct ipath_devdata *dd, int val); -static int epb_access(struct ipath_devdata *dd, int sdnum, int claim); - -void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup); +static int qib_sd_trimself(struct qib_devdata *dd, int val); +static int epb_access(struct qib_devdata *dd, int sdnum, int claim); /* * Below keeps track of whether the "once per power-on" initialization has * been done, because uC code Version 1.32.17 or higher allows the uC to * be reset at will, and Automatic Equalization may require it. So the - * state of the reset "pin", as reflected in was_reset parameter to - * ipath_sd7220_init() is no longer valid. Instead, we check for the + * state of the reset "pin", is no longer valid. Instead, we check for the * actual uC code having been loaded. */ -static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd) +static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd) { - if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0)) - dd->serdes_first_init_done = 1; - return dd->serdes_first_init_done; + struct qib_devdata *dd = ppd->dd; + if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0)) + dd->cspec->serdes_first_init_done = 1; + return dd->cspec->serdes_first_init_done; } -/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */ -#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL +/* repeat #define for local use. "Real" #define is in qib_iba7220.c */ +#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) #define UC_PAR_CLR_D 8 @@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd) #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) -void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd) +void qib_sd7220_clr_ibpar(struct qib_devdata *dd) { int ret; /* clear, then re-enable parity errs */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, UC_PAR_CLR_D, UC_PAR_CLR_M); if (ret < 0) { - ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); + qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); goto bail; } - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, UC_PAR_CLR_M); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); udelay(4); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, - INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_write_kreg(dd, kr_hwerrclear, + QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); + qib_read_kreg32(dd, kr_scratch); bail: return; } @@ -146,7 +159,7 @@ bail: #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) -static int ipath_resync_ibepb(struct ipath_devdata *dd) +static int qib_resync_ibepb(struct qib_devdata *dd) { int ret, pat, tries, chn; u32 loc; @@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd) chn = 0; for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { loc = IB_PGUDP(chn); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { - ipath_dev_err(dd, "Failed read in resync\n"); + qib_dev_err(dd, "Failed read in resync\n"); continue; } if (ret != 0xF0 && ret != 0x55 && tries == 0) - ipath_dev_err(dd, "unexpected pattern in resync\n"); + qib_dev_err(dd, "unexpected pattern in resync\n"); pat = ret ^ 0xA5; /* alternate F0 and 55 */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); if (ret < 0) { - ipath_dev_err(dd, "Failed write in resync\n"); + qib_dev_err(dd, "Failed write in resync\n"); continue; } - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { - ipath_dev_err(dd, "Failed re-read in resync\n"); + qib_dev_err(dd, "Failed re-read in resync\n"); continue; } if (ret != pat) { - ipath_dev_err(dd, "Failed compare1 in resync\n"); + qib_dev_err(dd, "Failed compare1 in resync\n"); continue; } loc = IB_CMUDONE(chn); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { - ipath_dev_err(dd, "Failed CMUDONE rd in resync\n"); + qib_dev_err(dd, "Failed CMUDONE rd in resync\n"); continue; } if ((ret & 0x70) != ((chn << 4) | 0x40)) { - ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", - ret, chn); + qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", + ret, chn); continue; } if (++chn == 4) break; /* Success */ } - ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries); return (ret > 0) ? 0 : ret; } @@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd) * Localize the stuff that should be done to change IB uC reset * returns <0 for errors. */ -static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst) +static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) { u64 rst_val; int ret = 0; unsigned long flags; - rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); + rst_val = qib_read_kreg64(dd, kr_ibserdesctrl); if (assert_rst) { /* * Vendor recommends "interrupting" uC before reset, to * minimize possible glitches. */ - spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); + spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); epb_access(dd, IB_7220_SERDES, 1); rst_val |= 1ULL; /* Squelch possible parity error from _asserting_ reset */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask & - ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); + qib_write_kreg(dd, kr_hwerrmask, + dd->cspec->hwerrmask & + ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); + qib_write_kreg(dd, kr_ibserdesctrl, rst_val); /* flush write, delay to ensure it took effect */ - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); udelay(2); /* once it's reset, can remove interrupt */ epb_access(dd, IB_7220_SERDES, -1); - spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); + spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); } else { /* * Before we de-assert reset, we need to deal with @@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst) */ u64 val; rst_val &= ~(1ULL); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask & - ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); + qib_write_kreg(dd, kr_hwerrmask, + dd->cspec->hwerrmask & + ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); - ret = ipath_resync_ibepb(dd); + ret = qib_resync_ibepb(dd); if (ret < 0) - ipath_dev_err(dd, "unable to re-sync IB EPB\n"); + qib_dev_err(dd, "unable to re-sync IB EPB\n"); /* set uC control regs to suppress parity errs */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); if (ret < 0) goto bail; /* IB uC code past Version 1.32.17 allow suppression of wdog */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); if (ret < 0) { - ipath_dev_err(dd, "Failed to set WDOG disable\n"); + qib_dev_err(dd, "Failed to set WDOG disable\n"); goto bail; } - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); + qib_write_kreg(dd, kr_ibserdesctrl, rst_val); /* flush write, delay for startup */ - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); udelay(1); /* clear, then re-enable parity errs */ - ipath_sd7220_clr_ibpar(dd); - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); - if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) { - ipath_dev_err(dd, "IBUC Parity still set after RST\n"); - dd->ipath_hwerrmask &= - ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR; + qib_sd7220_clr_ibpar(dd); + val = qib_read_kreg64(dd, kr_hwerrstatus); + if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) { + qib_dev_err(dd, "IBUC Parity still set after RST\n"); + dd->cspec->hwerrmask &= + ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; } - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, - dd->ipath_hwerrmask); + qib_write_kreg(dd, kr_hwerrmask, + dd->cspec->hwerrmask); } bail: return ret; } -static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, +static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where) { int ret, chn, baduns; @@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, /* give time for reset to settle out in EPB */ udelay(2); - ret = ipath_resync_ibepb(dd); + ret = qib_resync_ibepb(dd); if (ret < 0) - ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); + qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); /* Do "sacrificial read" to get EPB in sane state after reset */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); if (ret < 0) - ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); + qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); /* Check/show "summary" Trim-done bit in IBCStatus */ - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); - if (val & (1ULL << 11)) - ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where); - else - ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); - + val = qib_read_kreg64(dd, kr_ibcstatus); + if (!(val & (1ULL << 11))) + qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); + /* + * Do "dummy read/mod/wr" to get EPB in sane state after reset + * The default value for MPREG6 is 0. + */ udelay(2); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); if (ret < 0) - ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); + qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); udelay(10); baduns = 0; for (chn = 3; chn >= 0; --chn) { /* Read CTRL reg for each channel to check TRIMDONE */ - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0, 0); if (ret < 0) - ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d" - " (%s)\n", chn, where); + qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" + " (%s)\n", chn, where); if (!(ret & 0x10)) { int probe; + baduns |= (1 << chn); - ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." + qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." " (%s)\n", chn, ret, where); - probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, + probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_PGUDP(0), 0, 0); - ipath_dev_err(dd, "probe is %d (%02X)\n", + qib_dev_err(dd, "probe is %d (%02X)\n", probe, probe); - probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, + probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0, 0); - ipath_dev_err(dd, "re-read: %d (%02X)\n", + qib_dev_err(dd, "re-read: %d (%02X)\n", probe, probe); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); if (ret < 0) - ipath_dev_err(dd, + qib_dev_err(dd, "Err on TRIMDONE rewrite1\n"); } } for (chn = 3; chn >= 0; --chn) { /* Read CTRL reg for each channel to check TRIMDONE */ if (baduns & (1 << chn)) { - ipath_dev_err(dd, + qib_dev_err(dd, "Reseting TRIMDONE on chn %d (%s)\n", chn, where); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); if (ret < 0) - ipath_dev_err(dd, "Failed re-setting " + qib_dev_err(dd, "Failed re-setting " "TRIMDONE, chn %d (%s)\n", chn, where); } @@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, * Post IB uC code version 1.32.17, was_reset being 1 is not really * informative, so we double-check. */ -int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset) +int qib_sd7220_init(struct qib_devdata *dd) { int ret = 1; /* default to failure */ - int first_reset; - int val_stat; + int first_reset, was_reset; + /* SERDES MPU reset recorded in D0 */ + was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1); if (!was_reset) { /* entered with reset not asserted, we need to do it */ - ipath_ibsd_reset(dd, 1); - ipath_sd_trimdone_monitor(dd, "Driver-reload"); + qib_ibsd_reset(dd, 1); + qib_sd_trimdone_monitor(dd, "Driver-reload"); } - /* Substitute our deduced value for was_reset */ - ret = ipath_ibsd_ucode_loaded(dd); - if (ret < 0) { - ret = 1; - goto done; - } - first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ + ret = qib_ibsd_ucode_loaded(dd->pport); + if (ret < 0) + goto bail; + first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ /* * Alter some regs per vendor latest doc, reset-defaults * are not right for IB. */ - ret = ipath_sd_early(dd); + ret = qib_sd_early(dd); if (ret < 0) { - ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n"); - ret = 1; - goto done; + qib_dev_err(dd, "Failed to set IB SERDES early defaults\n"); + goto bail; } - /* * Set DAC manual trim IB. * We only do this once after chip has been reset (usually * same as once per system boot). */ if (first_reset) { - ret = ipath_sd_dactrim(dd); + ret = qib_sd_dactrim(dd); if (ret < 0) { - ipath_dev_err(dd, "Failed IB SERDES DAC trim\n"); - ret = 1; - goto done; + qib_dev_err(dd, "Failed IB SERDES DAC trim\n"); + goto bail; } } - /* * Set various registers (DDS and RXEQ) that will be * controlled by IBC (in 1.2 mode) to reasonable preset values * Calling the "internal" version avoids the "check for needed" * and "trimdone monitor" that might be counter-productive. */ - ret = ipath_internal_presets(dd); + ret = qib_internal_presets(dd); if (ret < 0) { - ipath_dev_err(dd, "Failed to set IB SERDES presets\n"); - ret = 1; - goto done; + qib_dev_err(dd, "Failed to set IB SERDES presets\n"); + goto bail; } - ret = ipath_sd_trimself(dd, 0x80); + ret = qib_sd_trimself(dd, 0x80); if (ret < 0) { - ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); - ret = 1; - goto done; + qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); + goto bail; } /* Load image, then try to verify */ - ret = 0; /* Assume success */ + ret = 0; /* Assume success */ if (first_reset) { int vfy; int trim_done; - ipath_dbg("SerDes uC was reset, reloading PRAM\n"); - ret = ipath_sd7220_ib_load(dd); + + ret = qib_sd7220_ib_load(dd); if (ret < 0) { - ipath_dev_err(dd, "Failed to load IB SERDES image\n"); - ret = 1; - goto done; - } + qib_dev_err(dd, "Failed to load IB SERDES image\n"); + goto bail; + } else { + /* Loaded image, try to verify */ + vfy = qib_sd7220_ib_vfy(dd); + if (vfy != ret) { + qib_dev_err(dd, "SERDES PRAM VFY failed\n"); + goto bail; + } /* end if verified */ + } /* end if loaded */ - /* Loaded image, try to verify */ - vfy = ipath_sd7220_ib_vfy(dd); - if (vfy != ret) { - ipath_dev_err(dd, "SERDES PRAM VFY failed\n"); - ret = 1; - goto done; - } /* * Loaded and verified. Almost good... * hold "success" in ret */ ret = 0; - /* * Prev steps all worked, continue bringup * De-assert RESET to uC, only in first reset, to allow @@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset) */ ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); if (ret < 0) { - ipath_dev_err(dd, "Failed clearing START_EQ1\n"); - ret = 1; - goto done; + qib_dev_err(dd, "Failed clearing START_EQ1\n"); + goto bail; } - ipath_ibsd_reset(dd, 0); + qib_ibsd_reset(dd, 0); /* * If this is not the first reset, trimdone should be set - * already. + * already. We may need to check about this. */ - trim_done = ipath_sd_trimdone_poll(dd); + trim_done = qib_sd_trimdone_poll(dd); /* * Whether or not trimdone succeeded, we need to put the * uC back into reset to avoid a possible fight with the * IBC state-machine. */ - ipath_ibsd_reset(dd, 1); + qib_ibsd_reset(dd, 1); if (!trim_done) { - ipath_dev_err(dd, "No TRIMDONE seen\n"); - ret = 1; - goto done; + qib_dev_err(dd, "No TRIMDONE seen\n"); + goto bail; } - - ipath_sd_trimdone_monitor(dd, "First-reset"); + /* + * DEBUG: check each time we reset if trimdone bits have + * gotten cleared, and re-set them. + */ + qib_sd_trimdone_monitor(dd, "First-reset"); /* Remember so we do not re-do the load, dactrim, etc. */ - dd->serdes_first_init_done = 1; + dd->cspec->serdes_first_init_done = 1; } /* - * Setup for channel training and load values for + * setup for channel training and load values for * RxEq and DDS in tables used by IBC in IB1.2 mode */ - - val_stat = ipath_sd_setvals(dd); - if (val_stat < 0) - ret = 1; + ret = 0; + if (qib_sd_setvals(dd) >= 0) + goto done; +bail: + ret = 1; done: /* start relock timer regardless, but start at 1 second */ - ipath_set_relock_poll(dd, -1); + set_7220_relock_poll(dd, -1); return ret; } @@ -517,7 +523,7 @@ done: * the "claim" parameter is >0 to claim, <0 to release, 0 to query. * Returns <0 for errors, >0 if we had ownership, else 0. */ -static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) +static int epb_access(struct qib_devdata *dd, int sdnum, int claim) { u16 acc; u64 accval; @@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) u64 oct_sel = 0; switch (sdnum) { - case IB_7220_SERDES : + case IB_7220_SERDES: /* * The IB SERDES "ownership" is fairly simple. A single each * request/grant. */ - acc = dd->ipath_kregs->kr_ib_epbacc; + acc = kr_ibsd_epb_access_ctrl; break; - case PCIE_SERDES0 : - case PCIE_SERDES1 : + + case PCIE_SERDES0: + case PCIE_SERDES1: /* PCIe SERDES has two "octants", need to select which */ - acc = dd->ipath_kregs->kr_pcie_epbacc; + acc = kr_pciesd_epb_access_ctrl; oct_sel = (2 << (sdnum - PCIE_SERDES0)); break; - default : + + default: return 0; } /* Make sure any outstanding transaction was seen */ - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); udelay(15); - accval = ipath_read_kreg32(dd, acc); + accval = qib_read_kreg32(dd, acc); owned = !!(accval & EPB_ACC_GNT); if (claim < 0) { @@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) * Both should be clear */ u64 newval = 0; - ipath_write_kreg(dd, acc, newval); + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ - pollval = ipath_read_kreg32(dd, acc); + pollval = qib_read_kreg32(dd, acc); udelay(5); - pollval = ipath_read_kreg32(dd, acc); + pollval = qib_read_kreg32(dd, acc); if (pollval & EPB_ACC_GNT) owned = -1; } else if (claim > 0) { /* Need to claim */ u64 pollval; u64 newval = EPB_ACC_REQ | oct_sel; - ipath_write_kreg(dd, acc, newval); + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ - pollval = ipath_read_kreg32(dd, acc); + pollval = qib_read_kreg32(dd, acc); udelay(5); - pollval = ipath_read_kreg32(dd, acc); + pollval = qib_read_kreg32(dd, acc); if (!(pollval & EPB_ACC_GNT)) owned = -1; } @@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) /* * Lemma to deal with race condition of write..read to epb regs */ -static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) +static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) { int tries; u64 transval; - - ipath_write_kreg(dd, reg, i_val); + qib_write_kreg(dd, reg, i_val); /* Throw away first read, as RDY bit may be stale */ - transval = ipath_read_kreg64(dd, reg); + transval = qib_read_kreg64(dd, reg); for (tries = EPB_TRANS_TRIES; tries; --tries) { - transval = ipath_read_kreg32(dd, reg); + transval = qib_read_kreg32(dd, reg); if (transval & EPB_TRANS_RDY) break; udelay(5); @@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) } /** - * - * ipath_sd7220_reg_mod - modify SERDES register - * @dd: the infinipath device + * qib_sd7220_reg_mod - modify SERDES register + * @dd: the qlogic_ib device * @sdnum: which SERDES to access * @loc: location - channel, element, register, as packed by EPB_LOC() macro. * @wd: Write Data - value to set in register * @mask: ones where data should be spliced into reg. * - * Basic register read/modify/write, with un-needed accesses elided. That is, + * Basic register read/modify/write, with un-needed acesses elided. That is, * a mask of zero will prevent write, while a mask of 0xFF will prevent read. * returns current (presumed, if a write was done) contents of selected * register, or <0 if errors. */ -static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, - u32 wd, u32 mask) +static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, + u32 wd, u32 mask) { u16 trans; u64 transval; @@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, unsigned long flags; switch (sdnum) { - case IB_7220_SERDES : - trans = dd->ipath_kregs->kr_ib_epbtrans; + case IB_7220_SERDES: + trans = kr_ibsd_epb_transaction_reg; break; - case PCIE_SERDES0 : - case PCIE_SERDES1 : - trans = dd->ipath_kregs->kr_pcie_epbtrans; + + case PCIE_SERDES0: + case PCIE_SERDES1: + trans = kr_pciesd_epb_transaction_reg; break; - default : + + default: return -1; } @@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, * All access is locked in software (vs other host threads) and * hardware (vs uC access). */ - spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); + spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); owned = epb_access(dd, sdnum, 1); if (owned < 0) { - spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); + spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); return -1; } ret = 0; for (tries = EPB_TRANS_TRIES; tries; --tries) { - transval = ipath_read_kreg32(dd, trans); + transval = qib_read_kreg32(dd, trans); if (transval & EPB_TRANS_RDY) break; udelay(5); } if (tries > 0) { - tries = 1; /* to make read-skip work */ + tries = 1; /* to make read-skip work */ if (mask != 0xFF) { /* * Not a pure write, so need to read. @@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, else ret = transval & EPB_DATA_MASK; - spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); + spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); if (tries <= 0) ret = -1; return ret; @@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, #define EPB_RAMDATA EPB_LOC(6, 0, 5) /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ -static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, +static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, u8 *buf, int cnt, int rd_notwr) { u16 trans; @@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, /* Pick appropriate transaction reg and "Chip select" for this serdes */ switch (sdnum) { - case IB_7220_SERDES : + case IB_7220_SERDES: csbit = 1ULL << EPB_IB_UC_CS_SHF; - trans = dd->ipath_kregs->kr_ib_epbtrans; + trans = kr_ibsd_epb_transaction_reg; break; - case PCIE_SERDES0 : - case PCIE_SERDES1 : + + case PCIE_SERDES0: + case PCIE_SERDES1: /* PCIe SERDES has uC "chip select" in different bit, too */ csbit = 1ULL << EPB_PCIE_UC_CS_SHF; - trans = dd->ipath_kregs->kr_pcie_epbtrans; + trans = kr_pciesd_epb_transaction_reg; break; - default : + + default: return -1; } op = rd_notwr ? "Rd" : "Wr"; - spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); + spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); owned = epb_access(dd, sdnum, 1); if (owned < 0) { - spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); - ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n", - op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe", - owned, loc); + spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); return -1; } @@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, */ addr = loc & 0x1FFF; for (tries = EPB_TRANS_TRIES; tries; --tries) { - transval = ipath_read_kreg32(dd, trans); + transval = qib_read_kreg32(dd, trans); if (transval & EPB_TRANS_RDY) break; udelay(5); } sofar = 0; - if (tries <= 0) - ipath_dbg("No initial RDY on EPB access request\n"); - else { + if (tries > 0) { /* * Every "memory" access is doubly-indirect. * We set two bytes of address, then read/write @@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, transval = csbit | EPB_UC_CTL | (rd_notwr ? EPB_ROM_R : EPB_ROM_W); tries = epb_trans(dd, trans, transval, &transval); - if (tries <= 0) - ipath_dbg("No EPB response to uC %s cmd\n", op); while (tries > 0 && sofar < cnt) { if (!sofar) { /* Only set address at start of chunk */ @@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, transval = csbit | EPB_MADDRH | addrbyte; tries = epb_trans(dd, trans, transval, &transval); - if (tries <= 0) { - ipath_dbg("No EPB response ADDRH\n"); + if (tries <= 0) break; - } addrbyte = (addr + sofar) & 0xFF; transval = csbit | EPB_MADDRL | addrbyte; tries = epb_trans(dd, trans, transval, &transval); - if (tries <= 0) { - ipath_dbg("No EPB response ADDRL\n"); + if (tries <= 0) break; - } } if (rd_notwr) @@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, else transval = csbit | EPB_ROMDATA | buf[sofar]; tries = epb_trans(dd, trans, transval, &transval); - if (tries <= 0) { - ipath_dbg("No EPB response DATA\n"); + if (tries <= 0) break; - } if (rd_notwr) buf[sofar] = transval & EPB_DATA_MASK; ++sofar; @@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, /* Finally, clear control-bit for Read or Write */ transval = csbit | EPB_UC_CTL; tries = epb_trans(dd, trans, transval, &transval); - if (tries <= 0) - ipath_dbg("No EPB response to drop of uC %s cmd\n", op); } ret = sofar; @@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, if (epb_access(dd, sdnum, -1) < 0) ret = -1; - spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); - if (tries <= 0) { - ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar); + spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); + if (tries <= 0) ret = -1; - } return ret; } #define PROG_CHUNK 64 -int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, - u8 *img, int len, int offset) +int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, + u8 *img, int len, int offset) { int cnt, sofar, req; @@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, req = len - sofar; if (req > PROG_CHUNK) req = PROG_CHUNK; - cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar, + cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar, img + sofar, req, 0); if (cnt < req) { sofar = -1; @@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, #define VFY_CHUNK 64 #define SD_PRAM_ERROR_LIMIT 42 -int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, - const u8 *img, int len, int offset) +int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, + const u8 *img, int len, int offset) { int cnt, sofar, req, idx, errors; unsigned char readback[VFY_CHUNK]; @@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, req = len - sofar; if (req > VFY_CHUNK) req = VFY_CHUNK; - cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset, + cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset, readback, req, 1); if (cnt < req) { /* failed in read itself */ @@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, return errors ? -errors : sofar; } -/* IRQ not set up at this point in init, so we poll. */ +/* + * IRQ not set up at this point in init, so we poll. + */ #define IB_SERDES_TRIM_DONE (1ULL << 11) #define TRIM_TMO (30) -static int ipath_sd_trimdone_poll(struct ipath_devdata *dd) +static int qib_sd_trimdone_poll(struct qib_devdata *dd) { int trim_tmo, ret; uint64_t val; @@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd) */ ret = 0; for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); + val = qib_read_kreg64(dd, kr_ibcstatus); if (val & IB_SERDES_TRIM_DONE) { - ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo); ret = 1; break; } msleep(10); } if (trim_tmo >= TRIM_TMO) { - ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); + qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); ret = 0; } return ret; @@ -964,8 +958,7 @@ static struct dds_init { }; /* - * Next, values related to Receive Equalization. - * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR + * Now the RXEQ section of the table. */ /* Hardware packs an element number and register address thus: */ #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) @@ -981,23 +974,23 @@ static struct dds_init { #define RXEQ_SDR_ZCNT 23 static struct rxeq_init { - u16 rdesc; /* in form used in SerDesDDSRXEQ */ + u16 rdesc; /* in form used in SerDesDDSRXEQ */ u8 rdata[4]; } rxeq_init_vals[] = { /* Set Rcv Eq. to Preset node */ RXEQ_VAL_ALL(7, 0x27, 0x10), /* Set DFELTHFDR/HDR thresholds */ - RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */ + RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */ RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ - /* Set TLTHFDR/HDR threshold */ - RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */ - RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */ + /* Set TLTHFDR/HDR theshold */ + RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */ + RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */ /* Set Preamp setting 2 (ZFR/ZCNT) */ - RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */ - RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */ + RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */ + RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */ /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ - RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */ - RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */ + RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */ + RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */ /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ @@ -1007,27 +1000,27 @@ static struct rxeq_init { #define DDS_ROWS (16) #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) -static int ipath_sd_setvals(struct ipath_devdata *dd) +static int qib_sd_setvals(struct qib_devdata *dd) { int idx, midx; - int min_idx; /* Minimum index for this portion of table */ + int min_idx; /* Minimum index for this portion of table */ uint32_t dds_reg_map; u64 __iomem *taddr, *iaddr; uint64_t data; uint64_t sdctl; - taddr = dd->ipath_kregbase + KR_IBSerDesMappTable; - iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq; + taddr = dd->kregbase + kr_serdes_maptable; + iaddr = dd->kregbase + kr_serdes_ddsrxeq0; /* * Init the DDS section of the table. * Each "row" of the table provokes NUM_DDS_REG writes, to the * registers indicated in DDS_REG_MAP. */ - sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); + sdctl = qib_read_kreg64(dd, kr_ibserdesctrl); sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl); + qib_write_kreg(dd, kr_ibserdesctrl, sdctl); /* * Iterate down table within loop for each register to store. @@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; writeq(data, iaddr + idx); mmiowb(); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); dds_reg_map >>= 4; for (midx = 0; midx < DDS_ROWS; ++midx) { u64 __iomem *daddr = taddr + ((midx << 4) + idx); data = dds_init_vals[midx].reg_vals[idx]; writeq(data, daddr); mmiowb(); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); } /* End inner for (vals for this reg, each row) */ } /* end outer for (regs to be stored) */ /* - * Init the RXEQ section of the table. As explained above the table - * rxeq_init_vals[], this runs in a different order, as the pattern - * of register references is more complex, but there are only + * Init the RXEQ section of the table. + * This runs in a different order, as the pattern of + * register references is more complex, but there are only * four "data" values per register. */ min_idx = idx; /* RXEQ indices pick up where DDS left off */ @@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) /* Store the next RXEQ register address */ writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); mmiowb(); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); /* Iterate through RXEQ values */ for (vidx = 0; vidx < 4; vidx++) { data = rxeq_init_vals[idx].rdata[vidx]; writeq(data, taddr + (vidx << 6) + idx); mmiowb(); - ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); + qib_read_kreg32(dd, kr_scratch); } } /* end outer for (Reg-writes for RXEQ) */ return 0; @@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) -static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask) -{ - int ret = -1; - int sloc; /* shifted loc, for messages */ - - loc |= (1U << EPB_IB_QUAD0_CS_SHF); - sloc = loc >> EPB_ADDR_SHF; - - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask); - if (ret < 0) - ipath_dev_err(dd, "Write failed: elt %d," - " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n", - (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7, - val & 0xFF, mask & 0xFF); - return ret; -} - /* * Repeat a "store" across all channels of the IB SerDes. * Although nominally it inherits the "read value" of the last * channel it modified, the only really useful return is <0 for * failure, >= 0 for success. The parameter 'loc' is assumed to - * be the location for the channel-0 copy of the register to - * be modified. + * be the location in some channel of the register to be modified + * The caller can specify use of the "gang write" option of EPB, + * in which case we use the specified channel data for any fields + * not explicitely written. */ -static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, - int mask) +static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, + int mask) { int ret = -1; int chnl; @@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, loc |= (1U << EPB_IB_QUAD0_CS_SHF); chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; if (mask != 0xFF) { - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, - loc & ~EPB_GLOBAL_WR, 0, 0); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, + loc & ~EPB_GLOBAL_WR, 0, 0); if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - ipath_dev_err(dd, "pre-read failed: elt %d," - " addr 0x%X, chnl %d\n", (sloc & 0xF), - (sloc >> 9) & 0x3f, chnl); + + qib_dev_err(dd, "pre-read failed: elt %d," + " addr 0x%X, chnl %d\n", + (sloc & 0xF), + (sloc >> 9) & 0x3f, chnl); return ret; } val = (ret & ~mask) | (val & mask); } loc &= ~(7 << (4+EPB_ADDR_SHF)); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - ipath_dev_err(dd, "Global WR failed: elt %d," - " addr 0x%X, val %02X\n", - (sloc & 0xF), (sloc >> 9) & 0x3f, val); + + qib_dev_err(dd, "Global WR failed: elt %d," + " addr 0x%X, val %02X\n", + (sloc & 0xF), (sloc >> 9) & 0x3f, val); } return ret; } @@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, loc &= ~(7 << (4+EPB_ADDR_SHF)); loc |= (1U << EPB_IB_QUAD0_CS_SHF); for (chnl = 0; chnl < 4; ++chnl) { - int cloc; - cloc = loc | (chnl << (4+EPB_ADDR_SHF)); - ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); + int cloc = loc | (chnl << (4+EPB_ADDR_SHF)); + + ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); if (ret < 0) { int sloc = loc >> EPB_ADDR_SHF; - ipath_dev_err(dd, "Write failed: elt %d," - " addr 0x%X, chnl %d, val 0x%02X," - " mask 0x%02X\n", - (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, - val & 0xFF, mask & 0xFF); + + qib_dev_err(dd, "Write failed: elt %d," + " addr 0x%X, chnl %d, val 0x%02X," + " mask 0x%02X\n", + (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, + val & 0xFF, mask & 0xFF); break; } } @@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, * Set the Tx values normally modified by IBC in IB1.2 mode to default * values, as gotten from first row of init table. */ -static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi) +static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi) { int ret; int idx, reg, data; @@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi) * Set the Rx values normally modified by IBC in IB1.2 mode to default * values, as gotten from selected column of init table. */ -static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) +static int set_rxeq_vals(struct qib_devdata *dd, int vsel) { int ret; int ridx; @@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) for (ridx = 0; ridx < cnt; ++ridx) { int elt, reg, val, loc; + elt = rxeq_init_vals[ridx].rdesc & 0xF; reg = rxeq_init_vals[ridx].rdesc >> 4; loc = EPB_LOC(0, elt, reg); @@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) /* * Set the default values (row 0) for DDR Driver Demphasis. * we do this initially and whenever we turn off IB-1.2 + * * The "default" values for Rx equalization are also stored to * SerDes registers. Formerly (and still default), we used set 2. * For experimenting with cables and link-partners, we allow changing * that via a module parameter. */ -static unsigned ipath_rxeq_set = 2; -module_param_named(rxeq_default_set, ipath_rxeq_set, uint, - S_IWUSR | S_IRUGO); +static unsigned qib_rxeq_set = 2; +module_param_named(rxeq_default_set, qib_rxeq_set, uint, + S_IWUSR | S_IRUGO); MODULE_PARM_DESC(rxeq_default_set, - "Which set [0..3] of Rx Equalization values is default"); + "Which set [0..3] of Rx Equalization values is default"); -static int ipath_internal_presets(struct ipath_devdata *dd) +static int qib_internal_presets(struct qib_devdata *dd) { int ret = 0; ret = set_dds_vals(dd, dds_init_vals + DDS_3M); if (ret < 0) - ipath_dev_err(dd, "Failed to set default DDS values\n"); - ret = set_rxeq_vals(dd, ipath_rxeq_set & 3); + qib_dev_err(dd, "Failed to set default DDS values\n"); + ret = set_rxeq_vals(dd, qib_rxeq_set & 3); if (ret < 0) - ipath_dev_err(dd, "Failed to set default RXEQ values\n"); + qib_dev_err(dd, "Failed to set default RXEQ values\n"); return ret; } -int ipath_sd7220_presets(struct ipath_devdata *dd) +int qib_sd7220_presets(struct qib_devdata *dd) { int ret = 0; - if (!dd->ipath_presets_needed) + if (!dd->cspec->presets_needed) return ret; - dd->ipath_presets_needed = 0; + dd->cspec->presets_needed = 0; /* Assert uC reset, so we don't clash with it. */ - ipath_ibsd_reset(dd, 1); + qib_ibsd_reset(dd, 1); udelay(2); - ipath_sd_trimdone_monitor(dd, "link-down"); + qib_sd_trimdone_monitor(dd, "link-down"); - ret = ipath_internal_presets(dd); -return ret; + ret = qib_internal_presets(dd); + return ret; } -static int ipath_sd_trimself(struct ipath_devdata *dd, int val) +static int qib_sd_trimself(struct qib_devdata *dd, int val) { - return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF); + int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF); + + return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); } -static int ipath_sd_early(struct ipath_devdata *dd) +static int qib_sd_early(struct qib_devdata *dd) { - int ret = -1; /* Default failed */ - int chnl; + int ret; - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF); - if (ret < 0) - goto bail; - } - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF); - if (ret < 0) - goto bail; - } - /* more fine-tuning of what will be default */ - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF); - if (ret < 0) - goto bail; - } - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF); - if (ret < 0) - goto bail; - } - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF); - if (ret < 0) - goto bail; - } + ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF); + if (ret < 0) + goto bail; + ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF); + if (ret < 0) + goto bail; + ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF); bail: return ret; } @@ -1302,50 +1268,53 @@ bail: #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) -static int ipath_sd_dactrim(struct ipath_devdata *dd) +static int qib_sd_dactrim(struct qib_devdata *dd) { - int ret = -1; /* Default failed */ - int chnl; + int ret; + + ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF); + if (ret < 0) + goto bail; + + /* more fine-tuning of what will be default */ + ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF); + if (ret < 0) + goto bail; + + ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF); + if (ret < 0) + goto bail; + + ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF); + if (ret < 0) + goto bail; + + ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF); + if (ret < 0) + goto bail; - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF); - if (ret < 0) - goto bail; - } - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF); - if (ret < 0) - goto bail; - } - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF); - if (ret < 0) - goto bail; - } /* - * delay for max possible number of steps, with slop. + * Delay for max possible number of steps, with slop. * Each step is about 4usec. */ udelay(415); - for (chnl = 0; chnl < 4; ++chnl) { - ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF); - if (ret < 0) - goto bail; - } + + ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF); + bail: return ret; } #define RELOCK_FIRST_MS 3 #define RXLSPPM(chan) EPB_LOC(chan, 0, 2) -void ipath_toggle_rclkrls(struct ipath_devdata *dd) +void toggle_7220_rclkrls(struct qib_devdata *dd) { int loc = RXLSPPM(0) | EPB_GLOBAL_WR; int ret; ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); if (ret < 0) - ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); + qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); else { udelay(1); ibsd_mod_allchnls(dd, loc, 0x80, 0x80); @@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd) udelay(1); ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); if (ret < 0) - ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); + qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); else { udelay(1); ibsd_mod_allchnls(dd, loc, 0x80, 0x80); } /* Now reset xgxs and IBC to complete the recovery */ - dd->ipath_f_xgxs_reset(dd); + dd->f_xgxs_reset(dd->pport); } /* * Shut down the timer that polls for relock occasions, if needed - * this is "hooked" from ipath_7220_quiet_serdes(), which is called - * just before ipath_shutdown_device() in ipath_driver.c shuts down all + * this is "hooked" from qib_7220_quiet_serdes(), which is called + * just before qib_shutdown_device() in qib_driver.c shuts down all * the other timers */ -void ipath_shutdown_relock_poll(struct ipath_devdata *dd) +void shutdown_7220_relock_poll(struct qib_devdata *dd) { - struct ipath_relock *irp = &dd->ipath_relock_singleton; - if (atomic_read(&irp->ipath_relock_timer_active)) { - del_timer_sync(&irp->ipath_relock_timer); - atomic_set(&irp->ipath_relock_timer_active, 0); - } + if (dd->cspec->relock_timer_active) + del_timer_sync(&dd->cspec->relock_timer); } -static unsigned ipath_relock_by_timer = 1; -module_param_named(relock_by_timer, ipath_relock_by_timer, uint, - S_IWUSR | S_IRUGO); +static unsigned qib_relock_by_timer = 1; +module_param_named(relock_by_timer, qib_relock_by_timer, uint, + S_IWUSR | S_IRUGO); MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); -static void ipath_run_relock(unsigned long opaque) +static void qib_run_relock(unsigned long opaque) { - struct ipath_devdata *dd = (struct ipath_devdata *)opaque; - struct ipath_relock *irp = &dd->ipath_relock_singleton; - u64 val, ltstate; - - if (!(dd->ipath_flags & IPATH_INITTED)) { - /* Not yet up, just reenable the timer for later */ - irp->ipath_relock_interval = HZ; - mod_timer(&irp->ipath_relock_timer, jiffies + HZ); - return; - } + struct qib_devdata *dd = (struct qib_devdata *)opaque; + struct qib_pportdata *ppd = dd->pport; + struct qib_chip_specific *cs = dd->cspec; + int timeoff; /* - * Check link-training state for "stuck" state. + * Check link-training state for "stuck" state, when down. * if found, try relock and schedule another try at * exponentially growing delay, maxed at one second. * if not stuck, our work is done. */ - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); - ltstate = ipath_ib_linktrstate(dd, val); - - if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT - && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { - int timeoff; - /* Not up yet. Try again, if allowed by module-param */ - if (ipath_relock_by_timer) { - if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) - ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n"); - else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) { - ipath_cdbg(VERBOSE, "RELOCK\n"); - ipath_toggle_rclkrls(dd); - } + if ((dd->flags & QIB_INITTED) && !(ppd->lflags & + (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED | + QIBL_LINKACTIVE))) { + if (qib_relock_by_timer) { + if (!(ppd->lflags & QIBL_IB_LINK_DISABLED)) + toggle_7220_rclkrls(dd); } /* re-set timer for next check */ - timeoff = irp->ipath_relock_interval << 1; + timeoff = cs->relock_interval << 1; if (timeoff > HZ) timeoff = HZ; - irp->ipath_relock_interval = timeoff; - - mod_timer(&irp->ipath_relock_timer, jiffies + timeoff); - } else { - /* Up, so no more need to check so often */ - mod_timer(&irp->ipath_relock_timer, jiffies + HZ); - } + cs->relock_interval = timeoff; + } else + timeoff = HZ; + mod_timer(&cs->relock_timer, jiffies + timeoff); } -void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup) +void set_7220_relock_poll(struct qib_devdata *dd, int ibup) { - struct ipath_relock *irp = &dd->ipath_relock_singleton; + struct qib_chip_specific *cs = dd->cspec; - if (ibup > 0) { - /* we are now up, so relax timer to 1 second interval */ - if (atomic_read(&irp->ipath_relock_timer_active)) - mod_timer(&irp->ipath_relock_timer, jiffies + HZ); + if (ibup) { + /* We are now up, relax timer to 1 second interval */ + if (cs->relock_timer_active) { + cs->relock_interval = HZ; + mod_timer(&cs->relock_timer, jiffies + HZ); + } } else { /* Transition to down, (re-)set timer to short interval. */ - int timeout; - timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000; + unsigned int timeout; + + timeout = msecs_to_jiffies(RELOCK_FIRST_MS); if (timeout == 0) timeout = 1; /* If timer has not yet been started, do so. */ - if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) { - init_timer(&irp->ipath_relock_timer); - irp->ipath_relock_timer.function = ipath_run_relock; - irp->ipath_relock_timer.data = (unsigned long) dd; - irp->ipath_relock_interval = timeout; - irp->ipath_relock_timer.expires = jiffies + timeout; - add_timer(&irp->ipath_relock_timer); + if (!cs->relock_timer_active) { + cs->relock_timer_active = 1; + init_timer(&cs->relock_timer); + cs->relock_timer.function = qib_run_relock; + cs->relock_timer.data = (unsigned long) dd; + cs->relock_interval = timeout; + cs->relock_timer.expires = jiffies + timeout; + add_timer(&cs->relock_timer); } else { - irp->ipath_relock_interval = timeout; - mod_timer(&irp->ipath_relock_timer, jiffies + timeout); - atomic_dec(&irp->ipath_relock_timer_active); + cs->relock_interval = timeout; + mod_timer(&cs->relock_timer, jiffies + timeout); } } } - diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c index 5ef59da9270a..a1118fbd2370 100644 --- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c +++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c @@ -38,11 +38,10 @@ #include <linux/pci.h> #include <linux/delay.h> -#include "ipath_kernel.h" -#include "ipath_registers.h" -#include "ipath_7220.h" +#include "qib.h" +#include "qib_7220.h" -static unsigned char ipath_sd7220_ib_img[] = { +static unsigned char qib_sd7220_ib_img[] = { /*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6, 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, /*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01, @@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = { 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81 }; -int ipath_sd7220_ib_load(struct ipath_devdata *dd) +int qib_sd7220_ib_load(struct qib_devdata *dd) { - return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img, - sizeof(ipath_sd7220_ib_img), 0); + return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img, + sizeof(qib_sd7220_ib_img), 0); } -int ipath_sd7220_ib_vfy(struct ipath_devdata *dd) +int qib_sd7220_ib_vfy(struct qib_devdata *dd) { - return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img, - sizeof(ipath_sd7220_ib_img), 0); + return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img, + sizeof(qib_sd7220_ib_img), 0); } diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c new file mode 100644 index 000000000000..b8456881f7f6 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -0,0 +1,973 @@ +/* + * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/spinlock.h> +#include <linux/netdevice.h> + +#include "qib.h" +#include "qib_common.h" + +/* default pio off, sdma on */ +static ushort sdma_descq_cnt = 256; +module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO); +MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); + +/* + * Bits defined in the send DMA descriptor. + */ +#define SDMA_DESC_LAST (1ULL << 11) +#define SDMA_DESC_FIRST (1ULL << 12) +#define SDMA_DESC_DMA_HEAD (1ULL << 13) +#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14) +#define SDMA_DESC_INTR (1ULL << 15) +#define SDMA_DESC_COUNT_LSB 16 +#define SDMA_DESC_GEN_LSB 30 + +char *qib_sdma_state_names[] = { + [qib_sdma_state_s00_hw_down] = "s00_HwDown", + [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait", + [qib_sdma_state_s20_idle] = "s20_Idle", + [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", + [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", + [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", + [qib_sdma_state_s99_running] = "s99_Running", +}; + +char *qib_sdma_event_names[] = { + [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown", + [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart", + [qib_sdma_event_e20_hw_started] = "e20_HwStarted", + [qib_sdma_event_e30_go_running] = "e30_GoRunning", + [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned", + [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned", + [qib_sdma_event_e60_hw_halted] = "e60_HwHalted", + [qib_sdma_event_e70_go_idle] = "e70_GoIdle", + [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted", + [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted", + [qib_sdma_event_e90_timer_tick] = "e90_TimerTick", +}; + +/* declare all statics here rather than keep sorting */ +static int alloc_sdma(struct qib_pportdata *); +static void sdma_complete(struct kref *); +static void sdma_finalput(struct qib_sdma_state *); +static void sdma_get(struct qib_sdma_state *); +static void sdma_put(struct qib_sdma_state *); +static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states); +static void sdma_start_sw_clean_up(struct qib_pportdata *); +static void sdma_sw_clean_up_task(unsigned long); +static void unmap_desc(struct qib_pportdata *, unsigned); + +static void sdma_get(struct qib_sdma_state *ss) +{ + kref_get(&ss->kref); +} + +static void sdma_complete(struct kref *kref) +{ + struct qib_sdma_state *ss = + container_of(kref, struct qib_sdma_state, kref); + + complete(&ss->comp); +} + +static void sdma_put(struct qib_sdma_state *ss) +{ + kref_put(&ss->kref, sdma_complete); +} + +static void sdma_finalput(struct qib_sdma_state *ss) +{ + sdma_put(ss); + wait_for_completion(&ss->comp); +} + +/* + * Complete all the sdma requests on the active list, in the correct + * order, and with appropriate processing. Called when cleaning up + * after sdma shutdown, and when new sdma requests are submitted for + * a link that is down. This matches what is done for requests + * that complete normally, it's just the full list. + * + * Must be called with sdma_lock held + */ +static void clear_sdma_activelist(struct qib_pportdata *ppd) +{ + struct qib_sdma_txreq *txp, *txp_next; + + list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { + list_del_init(&txp->list); + if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) { + unsigned idx; + + idx = txp->start_idx; + while (idx != txp->next_descq_idx) { + unmap_desc(ppd, idx); + if (++idx == ppd->sdma_descq_cnt) + idx = 0; + } + } + if (txp->callback) + (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED); + } +} + +static void sdma_sw_clean_up_task(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *) opaque; + unsigned long flags; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + /* + * At this point, the following should always be true: + * - We are halted, so no more descriptors are getting retired. + * - We are not running, so no one is submitting new work. + * - Only we can send the e40_sw_cleaned, so we can't start + * running again until we say so. So, the active list and + * descq are ours to play with. + */ + + /* Process all retired requests. */ + qib_sdma_make_progress(ppd); + + clear_sdma_activelist(ppd); + + /* + * Resync count of added and removed. It is VERY important that + * sdma_descq_removed NEVER decrement - user_sdma depends on it. + */ + ppd->sdma_descq_removed = ppd->sdma_descq_added; + + /* + * Reset our notion of head and tail. + * Note that the HW registers will be reset when switching states + * due to calling __qib_sdma_process_event() below. + */ + ppd->sdma_descq_tail = 0; + ppd->sdma_descq_head = 0; + ppd->sdma_head_dma[0] = 0; + ppd->sdma_generation = 0; + + __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned); + + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +/* + * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait + * as a result of send buffer errors or send DMA descriptor errors. + * We want to disarm the buffers in these cases. + */ +static void sdma_hw_start_up(struct qib_pportdata *ppd) +{ + struct qib_sdma_state *ss = &ppd->sdma_state; + unsigned bufno; + + for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno) + ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno)); + + ppd->dd->f_sdma_hw_start_up(ppd); +} + +static void sdma_sw_tear_down(struct qib_pportdata *ppd) +{ + struct qib_sdma_state *ss = &ppd->sdma_state; + + /* Releasing this reference means the state machine has stopped. */ + sdma_put(ss); +} + +static void sdma_start_sw_clean_up(struct qib_pportdata *ppd) +{ + tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task); +} + +static void sdma_set_state(struct qib_pportdata *ppd, + enum qib_sdma_states next_state) +{ + struct qib_sdma_state *ss = &ppd->sdma_state; + struct sdma_set_state_action *action = ss->set_state_action; + unsigned op = 0; + + /* debugging bookkeeping */ + ss->previous_state = ss->current_state; + ss->previous_op = ss->current_op; + + ss->current_state = next_state; + + if (action[next_state].op_enable) + op |= QIB_SDMA_SENDCTRL_OP_ENABLE; + + if (action[next_state].op_intenable) + op |= QIB_SDMA_SENDCTRL_OP_INTENABLE; + + if (action[next_state].op_halt) + op |= QIB_SDMA_SENDCTRL_OP_HALT; + + if (action[next_state].op_drain) + op |= QIB_SDMA_SENDCTRL_OP_DRAIN; + + if (action[next_state].go_s99_running_tofalse) + ss->go_s99_running = 0; + + if (action[next_state].go_s99_running_totrue) + ss->go_s99_running = 1; + + ss->current_op = op; + + ppd->dd->f_sdma_sendctrl(ppd, ss->current_op); +} + +static void unmap_desc(struct qib_pportdata *ppd, unsigned head) +{ + __le64 *descqp = &ppd->sdma_descq[head].qw[0]; + u64 desc[2]; + dma_addr_t addr; + size_t len; + + desc[0] = le64_to_cpu(descqp[0]); + desc[1] = le64_to_cpu(descqp[1]); + + addr = (desc[1] << 32) | (desc[0] >> 32); + len = (desc[0] >> 14) & (0x7ffULL << 2); + dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE); +} + +static int alloc_sdma(struct qib_pportdata *ppd) +{ + ppd->sdma_descq_cnt = sdma_descq_cnt; + if (!ppd->sdma_descq_cnt) + ppd->sdma_descq_cnt = 256; + + /* Allocate memory for SendDMA descriptor FIFO */ + ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev, + ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys, + GFP_KERNEL); + + if (!ppd->sdma_descq) { + qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " + "FIFO memory\n"); + goto bail; + } + + /* Allocate memory for DMA of head register to memory */ + ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, + PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); + if (!ppd->sdma_head_dma) { + qib_dev_err(ppd->dd, "failed to allocate SendDMA " + "head memory\n"); + goto cleanup_descq; + } + ppd->sdma_head_dma[0] = 0; + return 0; + +cleanup_descq: + dma_free_coherent(&ppd->dd->pcidev->dev, + ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq, + ppd->sdma_descq_phys); + ppd->sdma_descq = NULL; + ppd->sdma_descq_phys = 0; +bail: + ppd->sdma_descq_cnt = 0; + return -ENOMEM; +} + +static void free_sdma(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + + if (ppd->sdma_head_dma) { + dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, + (void *)ppd->sdma_head_dma, + ppd->sdma_head_phys); + ppd->sdma_head_dma = NULL; + ppd->sdma_head_phys = 0; + } + + if (ppd->sdma_descq) { + dma_free_coherent(&dd->pcidev->dev, + ppd->sdma_descq_cnt * sizeof(u64[2]), + ppd->sdma_descq, ppd->sdma_descq_phys); + ppd->sdma_descq = NULL; + ppd->sdma_descq_phys = 0; + } +} + +static inline void make_sdma_desc(struct qib_pportdata *ppd, + u64 *sdmadesc, u64 addr, u64 dwlen, + u64 dwoffset) +{ + + WARN_ON(addr & 3); + /* SDmaPhyAddr[47:32] */ + sdmadesc[1] = addr >> 32; + /* SDmaPhyAddr[31:0] */ + sdmadesc[0] = (addr & 0xfffffffcULL) << 32; + /* SDmaGeneration[1:0] */ + sdmadesc[0] |= (ppd->sdma_generation & 3ULL) << + SDMA_DESC_GEN_LSB; + /* SDmaDwordCount[10:0] */ + sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB; + /* SDmaBufOffset[12:2] */ + sdmadesc[0] |= dwoffset & 0x7ffULL; +} + +/* sdma_lock must be held */ +int qib_sdma_make_progress(struct qib_pportdata *ppd) +{ + struct list_head *lp = NULL; + struct qib_sdma_txreq *txp = NULL; + struct qib_devdata *dd = ppd->dd; + int progress = 0; + u16 hwhead; + u16 idx = 0; + + hwhead = dd->f_sdma_gethead(ppd); + + /* The reason for some of the complexity of this code is that + * not all descriptors have corresponding txps. So, we have to + * be able to skip over descs until we wander into the range of + * the next txp on the list. + */ + + if (!list_empty(&ppd->sdma_activelist)) { + lp = ppd->sdma_activelist.next; + txp = list_entry(lp, struct qib_sdma_txreq, list); + idx = txp->start_idx; + } + + while (ppd->sdma_descq_head != hwhead) { + /* if desc is part of this txp, unmap if needed */ + if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) && + (idx == ppd->sdma_descq_head)) { + unmap_desc(ppd, ppd->sdma_descq_head); + if (++idx == ppd->sdma_descq_cnt) + idx = 0; + } + + /* increment dequed desc count */ + ppd->sdma_descq_removed++; + + /* advance head, wrap if needed */ + if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt) + ppd->sdma_descq_head = 0; + + /* if now past this txp's descs, do the callback */ + if (txp && txp->next_descq_idx == ppd->sdma_descq_head) { + /* remove from active list */ + list_del_init(&txp->list); + if (txp->callback) + (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK); + /* see if there is another txp */ + if (list_empty(&ppd->sdma_activelist)) + txp = NULL; + else { + lp = ppd->sdma_activelist.next; + txp = list_entry(lp, struct qib_sdma_txreq, + list); + idx = txp->start_idx; + } + } + progress = 1; + } + if (progress) + qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); + return progress; +} + +/* + * This is called from interrupt context. + */ +void qib_sdma_intr(struct qib_pportdata *ppd) +{ + unsigned long flags; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + __qib_sdma_intr(ppd); + + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +void __qib_sdma_intr(struct qib_pportdata *ppd) +{ + if (__qib_sdma_running(ppd)) + qib_sdma_make_progress(ppd); +} + +int qib_setup_sdma(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + unsigned long flags; + int ret = 0; + + ret = alloc_sdma(ppd); + if (ret) + goto bail; + + /* set consistent sdma state */ + ppd->dd->f_sdma_init_early(ppd); + spin_lock_irqsave(&ppd->sdma_lock, flags); + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + + /* set up reference counting */ + kref_init(&ppd->sdma_state.kref); + init_completion(&ppd->sdma_state.comp); + + ppd->sdma_generation = 0; + ppd->sdma_descq_head = 0; + ppd->sdma_descq_removed = 0; + ppd->sdma_descq_added = 0; + + INIT_LIST_HEAD(&ppd->sdma_activelist); + + tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task, + (unsigned long)ppd); + + ret = dd->f_init_sdma_regs(ppd); + if (ret) + goto bail_alloc; + + qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start); + + return 0; + +bail_alloc: + qib_teardown_sdma(ppd); +bail: + return ret; +} + +void qib_teardown_sdma(struct qib_pportdata *ppd) +{ + qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); + + /* + * This waits for the state machine to exit so it is not + * necessary to kill the sdma_sw_clean_up_task to make sure + * it is not running. + */ + sdma_finalput(&ppd->sdma_state); + + free_sdma(ppd); +} + +int qib_sdma_running(struct qib_pportdata *ppd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + ret = __qib_sdma_running(ppd); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + + return ret; +} + +/* + * Complete a request when sdma not running; likely only request + * but to simplify the code, always queue it, then process the full + * activelist. We process the entire list to ensure that this particular + * request does get it's callback, but in the correct order. + * Must be called with sdma_lock held + */ +static void complete_sdma_err_req(struct qib_pportdata *ppd, + struct qib_verbs_txreq *tx) +{ + atomic_inc(&tx->qp->s_dma_busy); + /* no sdma descriptors, so no unmap_desc */ + tx->txreq.start_idx = 0; + tx->txreq.next_descq_idx = 0; + list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); + clear_sdma_activelist(ppd); +} + +/* + * This function queues one IB packet onto the send DMA queue per call. + * The caller is responsible for checking: + * 1) The number of send DMA descriptor entries is less than the size of + * the descriptor queue. + * 2) The IB SGE addresses and lengths are 32-bit aligned + * (except possibly the last SGE's length) + * 3) The SGE addresses are suitable for passing to dma_map_single(). + */ +int qib_sdma_verbs_send(struct qib_pportdata *ppd, + struct qib_sge_state *ss, u32 dwords, + struct qib_verbs_txreq *tx) +{ + unsigned long flags; + struct qib_sge *sge; + struct qib_qp *qp; + int ret = 0; + u16 tail; + __le64 *descqp; + u64 sdmadesc[2]; + u32 dwoffset; + dma_addr_t addr; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + +retry: + if (unlikely(!__qib_sdma_running(ppd))) { + complete_sdma_err_req(ppd, tx); + goto unlock; + } + + if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { + if (qib_sdma_make_progress(ppd)) + goto retry; + if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT) + ppd->dd->f_sdma_set_desc_cnt(ppd, + ppd->sdma_descq_cnt / 2); + goto busy; + } + + dwoffset = tx->hdr_dwords; + make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); + + sdmadesc[0] |= SDMA_DESC_FIRST; + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) + sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; + + /* write to the descq */ + tail = ppd->sdma_descq_tail; + descqp = &ppd->sdma_descq[tail].qw[0]; + *descqp++ = cpu_to_le64(sdmadesc[0]); + *descqp++ = cpu_to_le64(sdmadesc[1]); + + /* increment the tail */ + if (++tail == ppd->sdma_descq_cnt) { + tail = 0; + descqp = &ppd->sdma_descq[0].qw[0]; + ++ppd->sdma_generation; + } + + tx->txreq.start_idx = tail; + + sge = &ss->sge; + while (dwords) { + u32 dw; + u32 len; + + len = dwords << 2; + if (len > sge->length) + len = sge->length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + dw = (len + 3) >> 2; + addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, + dw << 2, DMA_TO_DEVICE); + if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) + goto unmap; + sdmadesc[0] = 0; + make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset); + /* SDmaUseLargeBuf has to be set in every descriptor */ + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) + sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; + /* write to the descq */ + *descqp++ = cpu_to_le64(sdmadesc[0]); + *descqp++ = cpu_to_le64(sdmadesc[1]); + + /* increment the tail */ + if (++tail == ppd->sdma_descq_cnt) { + tail = 0; + descqp = &ppd->sdma_descq[0].qw[0]; + ++ppd->sdma_generation; + } + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + + dwoffset += dw; + dwords -= dw; + } + + if (!tail) + descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0]; + descqp -= 2; + descqp[0] |= cpu_to_le64(SDMA_DESC_LAST); + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) + descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD); + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) + descqp[0] |= cpu_to_le64(SDMA_DESC_INTR); + + atomic_inc(&tx->qp->s_dma_busy); + tx->txreq.next_descq_idx = tail; + ppd->dd->f_sdma_update_tail(ppd, tail); + ppd->sdma_descq_added += tx->txreq.sg_count; + list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); + goto unlock; + +unmap: + for (;;) { + if (!tail) + tail = ppd->sdma_descq_cnt - 1; + else + tail--; + if (tail == ppd->sdma_descq_tail) + break; + unmap_desc(ppd, tail); + } + qp = tx->qp; + qib_put_txreq(tx); + spin_lock(&qp->s_lock); + if (qp->ibqp.qp_type == IB_QPT_RC) { + /* XXX what about error sending RDMA read responses? */ + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) + qib_error_qp(qp, IB_WC_GENERAL_ERR); + } else if (qp->s_wqe) + qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); + spin_unlock(&qp->s_lock); + /* return zero to process the next send work request */ + goto unlock; + +busy: + qp = tx->qp; + spin_lock(&qp->s_lock); + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { + struct qib_ibdev *dev; + + /* + * If we couldn't queue the DMA request, save the info + * and try again later rather than destroying the + * buffer and undoing the side effects of the copy. + */ + tx->ss = ss; + tx->dwords = dwords; + qp->s_tx = tx; + dev = &ppd->dd->verbs_dev; + spin_lock(&dev->pending_lock); + if (list_empty(&qp->iowait)) { + struct qib_ibport *ibp; + + ibp = &ppd->ibport_data; + ibp->n_dmawait++; + qp->s_flags |= QIB_S_WAIT_DMA_DESC; + list_add_tail(&qp->iowait, &dev->dmawait); + } + spin_unlock(&dev->pending_lock); + qp->s_flags &= ~QIB_S_BUSY; + spin_unlock(&qp->s_lock); + ret = -EBUSY; + } else { + spin_unlock(&qp->s_lock); + qib_put_txreq(tx); + } +unlock: + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + return ret; +} + +void qib_sdma_process_event(struct qib_pportdata *ppd, + enum qib_sdma_events event) +{ + unsigned long flags; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + __qib_sdma_process_event(ppd, event); + + if (ppd->sdma_state.current_state == qib_sdma_state_s99_running) + qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); + + spin_unlock_irqrestore(&ppd->sdma_lock, flags); +} + +void __qib_sdma_process_event(struct qib_pportdata *ppd, + enum qib_sdma_events event) +{ + struct qib_sdma_state *ss = &ppd->sdma_state; + + switch (ss->current_state) { + case qib_sdma_state_s00_hw_down: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + break; + case qib_sdma_event_e30_go_running: + /* + * If down, but running requested (usually result + * of link up, then we need to start up. + * This can happen when hw down is requested while + * bringing the link up with traffic active on + * 7220, e.g. */ + ss->go_s99_running = 1; + /* fall through and start dma engine */ + case qib_sdma_event_e10_go_hw_start: + /* This reference means the state machine is started */ + sdma_get(&ppd->sdma_state); + sdma_set_state(ppd, + qib_sdma_state_s10_hw_start_up_wait); + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e40_sw_cleaned: + sdma_sw_tear_down(ppd); + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + break; + case qib_sdma_event_e70_go_idle: + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s10_hw_start_up_wait: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + sdma_sw_tear_down(ppd); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + sdma_set_state(ppd, ss->go_s99_running ? + qib_sdma_state_s99_running : + qib_sdma_state_s20_idle); + break; + case qib_sdma_event_e30_go_running: + ss->go_s99_running = 1; + break; + case qib_sdma_event_e40_sw_cleaned: + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + break; + case qib_sdma_event_e70_go_idle: + ss->go_s99_running = 0; + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s20_idle: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + sdma_sw_tear_down(ppd); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e30_go_running: + sdma_set_state(ppd, qib_sdma_state_s99_running); + ss->go_s99_running = 1; + break; + case qib_sdma_event_e40_sw_cleaned: + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + break; + case qib_sdma_event_e70_go_idle: + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s30_sw_clean_up_wait: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e30_go_running: + ss->go_s99_running = 1; + break; + case qib_sdma_event_e40_sw_cleaned: + sdma_set_state(ppd, + qib_sdma_state_s10_hw_start_up_wait); + sdma_hw_start_up(ppd); + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + break; + case qib_sdma_event_e70_go_idle: + ss->go_s99_running = 0; + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s40_hw_clean_up_wait: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e30_go_running: + ss->go_s99_running = 1; + break; + case qib_sdma_event_e40_sw_cleaned: + break; + case qib_sdma_event_e50_hw_cleaned: + sdma_set_state(ppd, + qib_sdma_state_s30_sw_clean_up_wait); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e60_hw_halted: + break; + case qib_sdma_event_e70_go_idle: + ss->go_s99_running = 0; + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s50_hw_halt_wait: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e30_go_running: + ss->go_s99_running = 1; + break; + case qib_sdma_event_e40_sw_cleaned: + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + sdma_set_state(ppd, + qib_sdma_state_s40_hw_clean_up_wait); + ppd->dd->f_sdma_hw_clean_up(ppd); + break; + case qib_sdma_event_e70_go_idle: + ss->go_s99_running = 0; + break; + case qib_sdma_event_e7220_err_halted: + break; + case qib_sdma_event_e7322_err_halted: + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + + case qib_sdma_state_s99_running: + switch (event) { + case qib_sdma_event_e00_go_hw_down: + sdma_set_state(ppd, qib_sdma_state_s00_hw_down); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e10_go_hw_start: + break; + case qib_sdma_event_e20_hw_started: + break; + case qib_sdma_event_e30_go_running: + break; + case qib_sdma_event_e40_sw_cleaned: + break; + case qib_sdma_event_e50_hw_cleaned: + break; + case qib_sdma_event_e60_hw_halted: + sdma_set_state(ppd, + qib_sdma_state_s30_sw_clean_up_wait); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e70_go_idle: + sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); + ss->go_s99_running = 0; + break; + case qib_sdma_event_e7220_err_halted: + sdma_set_state(ppd, + qib_sdma_state_s30_sw_clean_up_wait); + sdma_start_sw_clean_up(ppd); + break; + case qib_sdma_event_e7322_err_halted: + sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); + break; + case qib_sdma_event_e90_timer_tick: + break; + } + break; + } + + ss->last_event = event; +} diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c new file mode 100644 index 000000000000..c3ec8efc2ed8 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_srq.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include "qib_verbs.h" + +/** + * qib_post_srq_receive - post a receive on a shared receive queue + * @ibsrq: the SRQ to post the receive on + * @wr: the list of work requests to post + * @bad_wr: A pointer to the first WR to cause a problem is put here + * + * This may be called from interrupt context. + */ +int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qib_srq *srq = to_isrq(ibsrq); + struct qib_rwq *wq; + unsigned long flags; + int ret; + + for (; wr; wr = wr->next) { + struct qib_rwqe *wqe; + u32 next; + int i; + + if ((unsigned) wr->num_sge > srq->rq.max_sge) { + *bad_wr = wr; + ret = -EINVAL; + goto bail; + } + + spin_lock_irqsave(&srq->rq.lock, flags); + wq = srq->rq.wq; + next = wq->head + 1; + if (next >= srq->rq.size) + next = 0; + if (next == wq->tail) { + spin_unlock_irqrestore(&srq->rq.lock, flags); + *bad_wr = wr; + ret = -ENOMEM; + goto bail; + } + + wqe = get_rwqe_ptr(&srq->rq, wq->head); + wqe->wr_id = wr->wr_id; + wqe->num_sge = wr->num_sge; + for (i = 0; i < wr->num_sge; i++) + wqe->sg_list[i] = wr->sg_list[i]; + /* Make sure queue entry is written before the head index. */ + smp_wmb(); + wq->head = next; + spin_unlock_irqrestore(&srq->rq.lock, flags); + } + ret = 0; + +bail: + return ret; +} + +/** + * qib_create_srq - create a shared receive queue + * @ibpd: the protection domain of the SRQ to create + * @srq_init_attr: the attributes of the SRQ + * @udata: data from libibverbs when creating a user SRQ + */ +struct ib_srq *qib_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata) +{ + struct qib_ibdev *dev = to_idev(ibpd->device); + struct qib_srq *srq; + u32 sz; + struct ib_srq *ret; + + if (srq_init_attr->attr.max_sge == 0 || + srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || + srq_init_attr->attr.max_wr == 0 || + srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) { + ret = ERR_PTR(-EINVAL); + goto done; + } + + srq = kmalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) { + ret = ERR_PTR(-ENOMEM); + goto done; + } + + /* + * Need to use vmalloc() if we want to support large #s of entries. + */ + srq->rq.size = srq_init_attr->attr.max_wr + 1; + srq->rq.max_sge = srq_init_attr->attr.max_sge; + sz = sizeof(struct ib_sge) * srq->rq.max_sge + + sizeof(struct qib_rwqe); + srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); + if (!srq->rq.wq) { + ret = ERR_PTR(-ENOMEM); + goto bail_srq; + } + + /* + * Return the address of the RWQ as the offset to mmap. + * See qib_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + int err; + u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz; + + srq->ip = + qib_create_mmap_info(dev, s, ibpd->uobject->context, + srq->rq.wq); + if (!srq->ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_wq; + } + + err = ib_copy_to_udata(udata, &srq->ip->offset, + sizeof(srq->ip->offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_ip; + } + } else + srq->ip = NULL; + + /* + * ib_create_srq() will initialize srq->ibsrq. + */ + spin_lock_init(&srq->rq.lock); + srq->rq.wq->head = 0; + srq->rq.wq->tail = 0; + srq->limit = srq_init_attr->attr.srq_limit; + + spin_lock(&dev->n_srqs_lock); + if (dev->n_srqs_allocated == ib_qib_max_srqs) { + spin_unlock(&dev->n_srqs_lock); + ret = ERR_PTR(-ENOMEM); + goto bail_ip; + } + + dev->n_srqs_allocated++; + spin_unlock(&dev->n_srqs_lock); + + if (srq->ip) { + spin_lock_irq(&dev->pending_lock); + list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + + ret = &srq->ibsrq; + goto done; + +bail_ip: + kfree(srq->ip); +bail_wq: + vfree(srq->rq.wq); +bail_srq: + kfree(srq); +done: + return ret; +} + +/** + * qib_modify_srq - modify a shared receive queue + * @ibsrq: the SRQ to modify + * @attr: the new attributes of the SRQ + * @attr_mask: indicates which attributes to modify + * @udata: user data for libibverbs.so + */ +int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, + struct ib_udata *udata) +{ + struct qib_srq *srq = to_isrq(ibsrq); + struct qib_rwq *wq; + int ret = 0; + + if (attr_mask & IB_SRQ_MAX_WR) { + struct qib_rwq *owq; + struct qib_rwqe *p; + u32 sz, size, n, head, tail; + + /* Check that the requested sizes are below the limits. */ + if ((attr->max_wr > ib_qib_max_srq_wrs) || + ((attr_mask & IB_SRQ_LIMIT) ? + attr->srq_limit : srq->limit) > attr->max_wr) { + ret = -EINVAL; + goto bail; + } + + sz = sizeof(struct qib_rwqe) + + srq->rq.max_sge * sizeof(struct ib_sge); + size = attr->max_wr + 1; + wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); + if (!wq) { + ret = -ENOMEM; + goto bail; + } + + /* Check that we can write the offset to mmap. */ + if (udata && udata->inlen >= sizeof(__u64)) { + __u64 offset_addr; + __u64 offset = 0; + + ret = ib_copy_from_udata(&offset_addr, udata, + sizeof(offset_addr)); + if (ret) + goto bail_free; + udata->outbuf = + (void __user *) (unsigned long) offset_addr; + ret = ib_copy_to_udata(udata, &offset, + sizeof(offset)); + if (ret) + goto bail_free; + } + + spin_lock_irq(&srq->rq.lock); + /* + * validate head and tail pointer values and compute + * the number of remaining WQEs. + */ + owq = srq->rq.wq; + head = owq->head; + tail = owq->tail; + if (head >= srq->rq.size || tail >= srq->rq.size) { + ret = -EINVAL; + goto bail_unlock; + } + n = head; + if (n < tail) + n += srq->rq.size - tail; + else + n -= tail; + if (size <= n) { + ret = -EINVAL; + goto bail_unlock; + } + n = 0; + p = wq->wq; + while (tail != head) { + struct qib_rwqe *wqe; + int i; + + wqe = get_rwqe_ptr(&srq->rq, tail); + p->wr_id = wqe->wr_id; + p->num_sge = wqe->num_sge; + for (i = 0; i < wqe->num_sge; i++) + p->sg_list[i] = wqe->sg_list[i]; + n++; + p = (struct qib_rwqe *)((char *) p + sz); + if (++tail >= srq->rq.size) + tail = 0; + } + srq->rq.wq = wq; + srq->rq.size = size; + wq->head = n; + wq->tail = 0; + if (attr_mask & IB_SRQ_LIMIT) + srq->limit = attr->srq_limit; + spin_unlock_irq(&srq->rq.lock); + + vfree(owq); + + if (srq->ip) { + struct qib_mmap_info *ip = srq->ip; + struct qib_ibdev *dev = to_idev(srq->ibsrq.device); + u32 s = sizeof(struct qib_rwq) + size * sz; + + qib_update_mmap_info(dev, ip, s, wq); + + /* + * Return the offset to mmap. + * See qib_mmap() for details. + */ + if (udata && udata->inlen >= sizeof(__u64)) { + ret = ib_copy_to_udata(udata, &ip->offset, + sizeof(ip->offset)); + if (ret) + goto bail; + } + + /* + * Put user mapping info onto the pending list + * unless it already is on the list. + */ + spin_lock_irq(&dev->pending_lock); + if (list_empty(&ip->pending_mmaps)) + list_add(&ip->pending_mmaps, + &dev->pending_mmaps); + spin_unlock_irq(&dev->pending_lock); + } + } else if (attr_mask & IB_SRQ_LIMIT) { + spin_lock_irq(&srq->rq.lock); + if (attr->srq_limit >= srq->rq.size) + ret = -EINVAL; + else + srq->limit = attr->srq_limit; + spin_unlock_irq(&srq->rq.lock); + } + goto bail; + +bail_unlock: + spin_unlock_irq(&srq->rq.lock); +bail_free: + vfree(wq); +bail: + return ret; +} + +int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) +{ + struct qib_srq *srq = to_isrq(ibsrq); + + attr->max_wr = srq->rq.size - 1; + attr->max_sge = srq->rq.max_sge; + attr->srq_limit = srq->limit; + return 0; +} + +/** + * qib_destroy_srq - destroy a shared receive queue + * @ibsrq: the SRQ to destroy + */ +int qib_destroy_srq(struct ib_srq *ibsrq) +{ + struct qib_srq *srq = to_isrq(ibsrq); + struct qib_ibdev *dev = to_idev(ibsrq->device); + + spin_lock(&dev->n_srqs_lock); + dev->n_srqs_allocated--; + spin_unlock(&dev->n_srqs_lock); + if (srq->ip) + kref_put(&srq->ip->ref, qib_release_mmap_info); + else + vfree(srq->rq.wq); + kfree(srq); + + return 0; +} diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c new file mode 100644 index 000000000000..dab4d9f4a2cc --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -0,0 +1,691 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/ctype.h> + +#include "qib.h" + +/** + * qib_parse_ushort - parse an unsigned short value in an arbitrary base + * @str: the string containing the number + * @valp: where to put the result + * + * Returns the number of bytes consumed, or negative value on error. + */ +static int qib_parse_ushort(const char *str, unsigned short *valp) +{ + unsigned long val; + char *end; + int ret; + + if (!isdigit(str[0])) { + ret = -EINVAL; + goto bail; + } + + val = simple_strtoul(str, &end, 0); + + if (val > 0xffff) { + ret = -EINVAL; + goto bail; + } + + *valp = val; + + ret = end + 1 - str; + if (ret == 0) + ret = -EINVAL; + +bail: + return ret; +} + +/* start of per-port functions */ +/* + * Get/Set heartbeat enable. OR of 1=enabled, 2=auto + */ +static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf) +{ + struct qib_devdata *dd = ppd->dd; + int ret; + + ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT); + ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); + return ret; +} + +static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, + size_t count) +{ + struct qib_devdata *dd = ppd->dd; + int ret; + u16 val; + + ret = qib_parse_ushort(buf, &val); + + /* + * Set the "intentional" heartbeat enable per either of + * "Enable" and "Auto", as these are normally set together. + * This bit is consulted when leaving loopback mode, + * because entering loopback mode overrides it and automatically + * disables heartbeat. + */ + if (ret >= 0) + ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); + if (ret < 0) + qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); + return ret < 0 ? ret : count; +} + +static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf, + size_t count) +{ + struct qib_devdata *dd = ppd->dd; + int ret = count, r; + + r = dd->f_set_ib_loopback(ppd, buf); + if (r < 0) + ret = r; + + return ret; +} + +static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, + size_t count) +{ + struct qib_devdata *dd = ppd->dd; + int ret; + u16 val; + + ret = qib_parse_ushort(buf, &val); + if (ret > 0) + qib_set_led_override(ppd, val); + else + qib_dev_err(dd, "attempt to set invalid LED override\n"); + return ret < 0 ? ret : count; +} + +static ssize_t show_status(struct qib_pportdata *ppd, char *buf) +{ + ssize_t ret; + + if (!ppd->statusp) + ret = -EINVAL; + else + ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n", + (unsigned long long) *(ppd->statusp)); + return ret; +} + +/* + * For userland compatibility, these offsets must remain fixed. + * They are strings for QIB_STATUS_* + */ +static const char *qib_status_str[] = { + "Initted", + "", + "", + "", + "", + "Present", + "IB_link_up", + "IB_configured", + "", + "Fatal_Hardware_Error", + NULL, +}; + +static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf) +{ + int i, any; + u64 s; + ssize_t ret; + + if (!ppd->statusp) { + ret = -EINVAL; + goto bail; + } + + s = *(ppd->statusp); + *buf = '\0'; + for (any = i = 0; s && qib_status_str[i]; i++) { + if (s & 1) { + /* if overflow */ + if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) + break; + if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >= + PAGE_SIZE) + break; + any = 1; + } + s >>= 1; + } + if (any) + strlcat(buf, "\n", PAGE_SIZE); + + ret = strlen(buf); + +bail: + return ret; +} + +/* end of per-port functions */ + +/* + * Start of per-port file structures and support code + * Because we are fitting into other infrastructure, we have to supply the + * full set of kobject/sysfs_ops structures and routines. + */ +#define QIB_PORT_ATTR(name, mode, show, store) \ + static struct qib_port_attr qib_port_attr_##name = \ + __ATTR(name, mode, show, store) + +struct qib_port_attr { + struct attribute attr; + ssize_t (*show)(struct qib_pportdata *, char *); + ssize_t (*store)(struct qib_pportdata *, const char *, size_t); +}; + +QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback); +QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override); +QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb, + store_hrtbt_enb); +QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL); +QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL); + +static struct attribute *port_default_attributes[] = { + &qib_port_attr_loopback.attr, + &qib_port_attr_led_override.attr, + &qib_port_attr_hrtbt_enable.attr, + &qib_port_attr_status.attr, + &qib_port_attr_status_str.attr, + NULL +}; + +static ssize_t qib_portattr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct qib_port_attr *pattr = + container_of(attr, struct qib_port_attr, attr); + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, pport_kobj); + + return pattr->show(ppd, buf); +} + +static ssize_t qib_portattr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t len) +{ + struct qib_port_attr *pattr = + container_of(attr, struct qib_port_attr, attr); + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, pport_kobj); + + return pattr->store(ppd, buf, len); +} + +static void qib_port_release(struct kobject *kobj) +{ + /* nothing to do since memory is freed by qib_free_devdata() */ +} + +static const struct sysfs_ops qib_port_ops = { + .show = qib_portattr_show, + .store = qib_portattr_store, +}; + +static struct kobj_type qib_port_ktype = { + .release = qib_port_release, + .sysfs_ops = &qib_port_ops, + .default_attrs = port_default_attributes +}; + +/* Start sl2vl */ + +#define QIB_SL2VL_ATTR(N) \ + static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \ + .attr = { .name = __stringify(N), .mode = 0444 }, \ + .sl = N \ + } + +struct qib_sl2vl_attr { + struct attribute attr; + int sl; +}; + +QIB_SL2VL_ATTR(0); +QIB_SL2VL_ATTR(1); +QIB_SL2VL_ATTR(2); +QIB_SL2VL_ATTR(3); +QIB_SL2VL_ATTR(4); +QIB_SL2VL_ATTR(5); +QIB_SL2VL_ATTR(6); +QIB_SL2VL_ATTR(7); +QIB_SL2VL_ATTR(8); +QIB_SL2VL_ATTR(9); +QIB_SL2VL_ATTR(10); +QIB_SL2VL_ATTR(11); +QIB_SL2VL_ATTR(12); +QIB_SL2VL_ATTR(13); +QIB_SL2VL_ATTR(14); +QIB_SL2VL_ATTR(15); + +static struct attribute *sl2vl_default_attributes[] = { + &qib_sl2vl_attr_0.attr, + &qib_sl2vl_attr_1.attr, + &qib_sl2vl_attr_2.attr, + &qib_sl2vl_attr_3.attr, + &qib_sl2vl_attr_4.attr, + &qib_sl2vl_attr_5.attr, + &qib_sl2vl_attr_6.attr, + &qib_sl2vl_attr_7.attr, + &qib_sl2vl_attr_8.attr, + &qib_sl2vl_attr_9.attr, + &qib_sl2vl_attr_10.attr, + &qib_sl2vl_attr_11.attr, + &qib_sl2vl_attr_12.attr, + &qib_sl2vl_attr_13.attr, + &qib_sl2vl_attr_14.attr, + &qib_sl2vl_attr_15.attr, + NULL +}; + +static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct qib_sl2vl_attr *sattr = + container_of(attr, struct qib_sl2vl_attr, attr); + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, sl2vl_kobj); + struct qib_ibport *qibp = &ppd->ibport_data; + + return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]); +} + +static const struct sysfs_ops qib_sl2vl_ops = { + .show = sl2vl_attr_show, +}; + +static struct kobj_type qib_sl2vl_ktype = { + .release = qib_port_release, + .sysfs_ops = &qib_sl2vl_ops, + .default_attrs = sl2vl_default_attributes +}; + +/* End sl2vl */ + +/* Start diag_counters */ + +#define QIB_DIAGC_ATTR(N) \ + static struct qib_diagc_attr qib_diagc_attr_##N = { \ + .attr = { .name = __stringify(N), .mode = 0444 }, \ + .counter = offsetof(struct qib_ibport, n_##N) \ + } + +struct qib_diagc_attr { + struct attribute attr; + size_t counter; +}; + +QIB_DIAGC_ATTR(rc_resends); +QIB_DIAGC_ATTR(rc_acks); +QIB_DIAGC_ATTR(rc_qacks); +QIB_DIAGC_ATTR(rc_delayed_comp); +QIB_DIAGC_ATTR(seq_naks); +QIB_DIAGC_ATTR(rdma_seq); +QIB_DIAGC_ATTR(rnr_naks); +QIB_DIAGC_ATTR(other_naks); +QIB_DIAGC_ATTR(rc_timeouts); +QIB_DIAGC_ATTR(loop_pkts); +QIB_DIAGC_ATTR(pkt_drops); +QIB_DIAGC_ATTR(dmawait); +QIB_DIAGC_ATTR(unaligned); +QIB_DIAGC_ATTR(rc_dupreq); +QIB_DIAGC_ATTR(rc_seqnak); + +static struct attribute *diagc_default_attributes[] = { + &qib_diagc_attr_rc_resends.attr, + &qib_diagc_attr_rc_acks.attr, + &qib_diagc_attr_rc_qacks.attr, + &qib_diagc_attr_rc_delayed_comp.attr, + &qib_diagc_attr_seq_naks.attr, + &qib_diagc_attr_rdma_seq.attr, + &qib_diagc_attr_rnr_naks.attr, + &qib_diagc_attr_other_naks.attr, + &qib_diagc_attr_rc_timeouts.attr, + &qib_diagc_attr_loop_pkts.attr, + &qib_diagc_attr_pkt_drops.attr, + &qib_diagc_attr_dmawait.attr, + &qib_diagc_attr_unaligned.attr, + &qib_diagc_attr_rc_dupreq.attr, + &qib_diagc_attr_rc_seqnak.attr, + NULL +}; + +static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct qib_diagc_attr *dattr = + container_of(attr, struct qib_diagc_attr, attr); + struct qib_pportdata *ppd = + container_of(kobj, struct qib_pportdata, diagc_kobj); + struct qib_ibport *qibp = &ppd->ibport_data; + + return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); +} + +static const struct sysfs_ops qib_diagc_ops = { + .show = diagc_attr_show, +}; + +static struct kobj_type qib_diagc_ktype = { + .release = qib_port_release, + .sysfs_ops = &qib_diagc_ops, + .default_attrs = diagc_default_attributes +}; + +/* End diag_counters */ + +/* end of per-port file structures and support code */ + +/* + * Start of per-unit (or driver, in some cases, but replicated + * per unit) functions (these get a device *) + */ +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + + return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev); +} + +static ssize_t show_hca(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + int ret; + + if (!dd->boardname) + ret = -EINVAL; + else + ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname); + return ret; +} + +static ssize_t show_version(struct device *device, + struct device_attribute *attr, char *buf) +{ + /* The string printed here is already newline-terminated. */ + return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version); +} + +static ssize_t show_boardversion(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + + /* The string printed here is already newline-terminated. */ + return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion); +} + + +static ssize_t show_localbus_info(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + + /* The string printed here is already newline-terminated. */ + return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info); +} + + +static ssize_t show_nctxts(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + + /* Return the number of user ports (contexts) available. */ + return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - + dd->first_user_ctxt); +} + +static ssize_t show_serial(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + + buf[sizeof dd->serial] = '\0'; + memcpy(buf, dd->serial, sizeof dd->serial); + strcat(buf, "\n"); + return strlen(buf); +} + +static ssize_t store_chip_reset(struct device *device, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + int ret; + + if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) { + ret = -EINVAL; + goto bail; + } + + ret = qib_reset_device(dd->unit); +bail: + return ret < 0 ? ret : count; +} + +static ssize_t show_logged_errs(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + int idx, count; + + /* force consistency with actual EEPROM */ + if (qib_update_eeprom_log(dd) != 0) + return -ENXIO; + + count = 0; + for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { + count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", + dd->eep_st_errs[idx], + idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); + } + + return count; +} + +/* + * Dump tempsense regs. in decimal, to ease shell-scripts. + */ +static ssize_t show_tempsense(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct qib_ibdev *dev = + container_of(device, struct qib_ibdev, ibdev.dev); + struct qib_devdata *dd = dd_from_dev(dev); + int ret; + int idx; + u8 regvals[8]; + + ret = -ENXIO; + for (idx = 0; idx < 8; ++idx) { + if (idx == 6) + continue; + ret = dd->f_tempsense_rd(dd, idx); + if (ret < 0) + break; + regvals[idx] = ret; + } + if (idx == 8) + ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n", + *(signed char *)(regvals), + *(signed char *)(regvals + 1), + regvals[2], regvals[3], + *(signed char *)(regvals + 5), + *(signed char *)(regvals + 7)); + return ret; +} + +/* + * end of per-unit (or driver, in some cases, but replicated + * per unit) functions + */ + +/* start of per-unit file structures and support code */ +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); +static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); +static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); +static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); +static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); +static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); +static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); +static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); + +static struct device_attribute *qib_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type, + &dev_attr_board_id, + &dev_attr_version, + &dev_attr_nctxts, + &dev_attr_serial, + &dev_attr_boardversion, + &dev_attr_logged_errors, + &dev_attr_tempsense, + &dev_attr_localbus_info, + &dev_attr_chip_reset, +}; + +int qib_create_port_files(struct ib_device *ibdev, u8 port_num, + struct kobject *kobj) +{ + struct qib_pportdata *ppd; + struct qib_devdata *dd = dd_from_ibdev(ibdev); + int ret; + + if (!port_num || port_num > dd->num_pports) { + qib_dev_err(dd, "Skipping infiniband class with " + "invalid port %u\n", port_num); + ret = -ENODEV; + goto bail; + } + ppd = &dd->pport[port_num - 1]; + + ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, + "linkcontrol"); + if (ret) { + qib_dev_err(dd, "Skipping linkcontrol sysfs info, " + "(err %d) port %u\n", ret, port_num); + goto bail; + } + kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); + + ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, + "sl2vl"); + if (ret) { + qib_dev_err(dd, "Skipping sl2vl sysfs info, " + "(err %d) port %u\n", ret, port_num); + goto bail_sl; + } + kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); + + ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, + "diag_counters"); + if (ret) { + qib_dev_err(dd, "Skipping diag_counters sysfs info, " + "(err %d) port %u\n", ret, port_num); + goto bail_diagc; + } + kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); + + return 0; + +bail_diagc: + kobject_put(&ppd->sl2vl_kobj); +bail_sl: + kobject_put(&ppd->pport_kobj); +bail: + return ret; +} + +/* + * Register and create our files in /sys/class/infiniband. + */ +int qib_verbs_register_sysfs(struct qib_devdata *dd) +{ + struct ib_device *dev = &dd->verbs_dev.ibdev; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { + ret = device_create_file(&dev->dev, qib_attributes[i]); + if (ret) + return ret; + } + + return 0; +} + +/* + * Unregister and remove our files in /sys/class/infiniband. + */ +void qib_verbs_unregister_sysfs(struct qib_devdata *dd) +{ + struct qib_pportdata *ppd; + int i; + + for (i = 0; i < dd->num_pports; i++) { + ppd = &dd->pport[i]; + kobject_put(&ppd->pport_kobj); + kobject_put(&ppd->sl2vl_kobj); + } +} diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c new file mode 100644 index 000000000000..6f31ca5039db --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_twsi.c @@ -0,0 +1,498 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> + +#include "qib.h" + +/* + * QLogic_IB "Two Wire Serial Interface" driver. + * Originally written for a not-quite-i2c serial eeprom, which is + * still used on some supported boards. Later boards have added a + * variety of other uses, most board-specific, so teh bit-boffing + * part has been split off to this file, while the other parts + * have been moved to chip-specific files. + * + * We have also dropped all pretense of fully generic (e.g. pretend + * we don't know whether '1' is the higher voltage) interface, as + * the restrictions of the generic i2c interface (e.g. no access from + * driver itself) make it unsuitable for this use. + */ + +#define READ_CMD 1 +#define WRITE_CMD 0 + +/** + * i2c_wait_for_writes - wait for a write + * @dd: the qlogic_ib device + * + * We use this instead of udelay directly, so we can make sure + * that previous register writes have been flushed all the way + * to the chip. Since we are delaying anyway, the cost doesn't + * hurt, and makes the bit twiddling more regular + */ +static void i2c_wait_for_writes(struct qib_devdata *dd) +{ + /* + * implicit read of EXTStatus is as good as explicit + * read of scratch, if all we want to do is flush + * writes. + */ + dd->f_gpio_mod(dd, 0, 0, 0); + rmb(); /* inlined, so prevent compiler reordering */ +} + +/* + * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that + * for "almost compliant" modules + */ +#define SCL_WAIT_USEC 1000 + +/* BUF_WAIT is time bus must be free between STOP or ACK and to next START. + * Should be 20, but some chips need more. + */ +#define TWSI_BUF_WAIT_USEC 60 + +static void scl_out(struct qib_devdata *dd, u8 bit) +{ + u32 mask; + + udelay(1); + + mask = 1UL << dd->gpio_scl_num; + + /* SCL is meant to be bare-drain, so never set "OUT", just DIR */ + dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); + + /* + * Allow for slow slaves by simple + * delay for falling edge, sampling on rise. + */ + if (!bit) + udelay(2); + else { + int rise_usec; + for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { + if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) + break; + udelay(2); + } + if (rise_usec <= 0) + qib_dev_err(dd, "SCL interface stuck low > %d uSec\n", + SCL_WAIT_USEC); + } + i2c_wait_for_writes(dd); +} + +static void sda_out(struct qib_devdata *dd, u8 bit) +{ + u32 mask; + + mask = 1UL << dd->gpio_sda_num; + + /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ + dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); + + i2c_wait_for_writes(dd); + udelay(2); +} + +static u8 sda_in(struct qib_devdata *dd, int wait) +{ + int bnum; + u32 read_val, mask; + + bnum = dd->gpio_sda_num; + mask = (1UL << bnum); + /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ + dd->f_gpio_mod(dd, 0, 0, mask); + read_val = dd->f_gpio_mod(dd, 0, 0, 0); + if (wait) + i2c_wait_for_writes(dd); + return (read_val & mask) >> bnum; +} + +/** + * i2c_ackrcv - see if ack following write is true + * @dd: the qlogic_ib device + */ +static int i2c_ackrcv(struct qib_devdata *dd) +{ + u8 ack_received; + + /* AT ENTRY SCL = LOW */ + /* change direction, ignore data */ + ack_received = sda_in(dd, 1); + scl_out(dd, 1); + ack_received = sda_in(dd, 1) == 0; + scl_out(dd, 0); + return ack_received; +} + +static void stop_cmd(struct qib_devdata *dd); + +/** + * rd_byte - read a byte, sending STOP on last, else ACK + * @dd: the qlogic_ib device + * + * Returns byte shifted out of device + */ +static int rd_byte(struct qib_devdata *dd, int last) +{ + int bit_cntr, data; + + data = 0; + + for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) { + data <<= 1; + scl_out(dd, 1); + data |= sda_in(dd, 0); + scl_out(dd, 0); + } + if (last) { + scl_out(dd, 1); + stop_cmd(dd); + } else { + sda_out(dd, 0); + scl_out(dd, 1); + scl_out(dd, 0); + sda_out(dd, 1); + } + return data; +} + +/** + * wr_byte - write a byte, one bit at a time + * @dd: the qlogic_ib device + * @data: the byte to write + * + * Returns 0 if we got the following ack, otherwise 1 + */ +static int wr_byte(struct qib_devdata *dd, u8 data) +{ + int bit_cntr; + u8 bit; + + for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) { + bit = (data >> bit_cntr) & 1; + sda_out(dd, bit); + scl_out(dd, 1); + scl_out(dd, 0); + } + return (!i2c_ackrcv(dd)) ? 1 : 0; +} + +/* + * issue TWSI start sequence: + * (both clock/data high, clock high, data low while clock is high) + */ +static void start_seq(struct qib_devdata *dd) +{ + sda_out(dd, 1); + scl_out(dd, 1); + sda_out(dd, 0); + udelay(1); + scl_out(dd, 0); +} + +/** + * stop_seq - transmit the stop sequence + * @dd: the qlogic_ib device + * + * (both clock/data low, clock high, data high while clock is high) + */ +static void stop_seq(struct qib_devdata *dd) +{ + scl_out(dd, 0); + sda_out(dd, 0); + scl_out(dd, 1); + sda_out(dd, 1); +} + +/** + * stop_cmd - transmit the stop condition + * @dd: the qlogic_ib device + * + * (both clock/data low, clock high, data high while clock is high) + */ +static void stop_cmd(struct qib_devdata *dd) +{ + stop_seq(dd); + udelay(TWSI_BUF_WAIT_USEC); +} + +/** + * qib_twsi_reset - reset I2C communication + * @dd: the qlogic_ib device + */ + +int qib_twsi_reset(struct qib_devdata *dd) +{ + int clock_cycles_left = 9; + int was_high = 0; + u32 pins, mask; + + /* Both SCL and SDA should be high. If not, there + * is something wrong. + */ + mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num); + + /* + * Force pins to desired innocuous state. + * This is the default power-on state with out=0 and dir=0, + * So tri-stated and should be floating high (barring HW problems) + */ + dd->f_gpio_mod(dd, 0, 0, mask); + + /* + * Clock nine times to get all listeners into a sane state. + * If SDA does not go high at any point, we are wedged. + * One vendor recommends then issuing START followed by STOP. + * we cannot use our "normal" functions to do that, because + * if SCL drops between them, another vendor's part will + * wedge, dropping SDA and keeping it low forever, at the end of + * the next transaction (even if it was not the device addressed). + * So our START and STOP take place with SCL held high. + */ + while (clock_cycles_left--) { + scl_out(dd, 0); + scl_out(dd, 1); + /* Note if SDA is high, but keep clocking to sync slave */ + was_high |= sda_in(dd, 0); + } + + if (was_high) { + /* + * We saw a high, which we hope means the slave is sync'd. + * Issue START, STOP, pause for T_BUF. + */ + + pins = dd->f_gpio_mod(dd, 0, 0, 0); + if ((pins & mask) != mask) + qib_dev_err(dd, "GPIO pins not at rest: %d\n", + pins & mask); + /* Drop SDA to issue START */ + udelay(1); /* Guarantee .6 uSec setup */ + sda_out(dd, 0); + udelay(1); /* Guarantee .6 uSec hold */ + /* At this point, SCL is high, SDA low. Raise SDA for STOP */ + sda_out(dd, 1); + udelay(TWSI_BUF_WAIT_USEC); + } + + return !was_high; +} + +#define QIB_TWSI_START 0x100 +#define QIB_TWSI_STOP 0x200 + +/* Write byte to TWSI, optionally prefixed with START or suffixed with + * STOP. + * returns 0 if OK (ACK received), else != 0 + */ +static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) +{ + int ret = 1; + if (flags & QIB_TWSI_START) + start_seq(dd); + + ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */ + + if (flags & QIB_TWSI_STOP) + stop_cmd(dd); + return ret; +} + +/* Added functionality for IBA7220-based cards */ +#define QIB_TEMP_DEV 0x98 + +/* + * qib_twsi_blk_rd + * Formerly called qib_eeprom_internal_read, and only used for eeprom, + * but now the general interface for data transfer from twsi devices. + * One vestige of its former role is that it recognizes a device + * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, + * which responded to all TWSI device codes, interpreting them as + * address within device. On all other devices found on board handled by + * this driver, the device is followed by a one-byte "address" which selects + * the "register" or "offset" within the device from which data should + * be read. + */ +int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, + void *buffer, int len) +{ + int ret; + u8 *bp = buffer; + + ret = 1; + + if (dev == QIB_TWSI_NO_DEV) { + /* legacy not-really-I2C */ + addr = (addr << 1) | READ_CMD; + ret = qib_twsi_wr(dd, addr, QIB_TWSI_START); + } else { + /* Actual I2C */ + ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START); + if (ret) { + stop_cmd(dd); + ret = 1; + goto bail; + } + /* + * SFF spec claims we do _not_ stop after the addr + * but simply issue a start with the "read" dev-addr. + * Since we are implicitely waiting for ACK here, + * we need t_buf (nominally 20uSec) before that start, + * and cannot rely on the delay built in to the STOP + */ + ret = qib_twsi_wr(dd, addr, 0); + udelay(TWSI_BUF_WAIT_USEC); + + if (ret) { + qib_dev_err(dd, + "Failed to write interface read addr %02X\n", + addr); + ret = 1; + goto bail; + } + ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START); + } + if (ret) { + stop_cmd(dd); + ret = 1; + goto bail; + } + + /* + * block devices keeps clocking data out as long as we ack, + * automatically incrementing the address. Some have "pages" + * whose boundaries will not be crossed, but the handling + * of these is left to the caller, who is in a better + * position to know. + */ + while (len-- > 0) { + /* + * Get and store data, sending ACK if length remaining, + * else STOP + */ + *bp++ = rd_byte(dd, !len); + } + + ret = 0; + +bail: + return ret; +} + +/* + * qib_twsi_blk_wr + * Formerly called qib_eeprom_internal_write, and only used for eeprom, + * but now the general interface for data transfer to twsi devices. + * One vestige of its former role is that it recognizes a device + * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, + * which responded to all TWSI device codes, interpreting them as + * address within device. On all other devices found on board handled by + * this driver, the device is followed by a one-byte "address" which selects + * the "register" or "offset" within the device to which data should + * be written. + */ +int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, + const void *buffer, int len) +{ + int sub_len; + const u8 *bp = buffer; + int max_wait_time, i; + int ret; + ret = 1; + + while (len > 0) { + if (dev == QIB_TWSI_NO_DEV) { + if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD, + QIB_TWSI_START)) { + goto failed_write; + } + } else { + /* Real I2C */ + if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START)) + goto failed_write; + ret = qib_twsi_wr(dd, addr, 0); + if (ret) { + qib_dev_err(dd, "Failed to write interface" + " write addr %02X\n", addr); + goto failed_write; + } + } + + sub_len = min(len, 4); + addr += sub_len; + len -= sub_len; + + for (i = 0; i < sub_len; i++) + if (qib_twsi_wr(dd, *bp++, 0)) + goto failed_write; + + stop_cmd(dd); + + /* + * Wait for write complete by waiting for a successful + * read (the chip replies with a zero after the write + * cmd completes, and before it writes to the eeprom. + * The startcmd for the read will fail the ack until + * the writes have completed. We do this inline to avoid + * the debug prints that are in the real read routine + * if the startcmd fails. + * We also use the proper device address, so it doesn't matter + * whether we have real eeprom_dev. Legacy likes any address. + */ + max_wait_time = 100; + while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) { + stop_cmd(dd); + if (!--max_wait_time) + goto failed_write; + } + /* now read (and ignore) the resulting byte */ + rd_byte(dd, 1); + } + + ret = 0; + goto bail; + +failed_write: + stop_cmd(dd); + ret = 1; + +bail: + return ret; +} diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c new file mode 100644 index 000000000000..f7eb1ddff5f3 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_tx.c @@ -0,0 +1,557 @@ +/* + * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> + +#include "qib.h" + +static unsigned qib_hol_timeout_ms = 3000; +module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO); +MODULE_PARM_DESC(hol_timeout_ms, + "duration of user app suspension after link failure"); + +unsigned qib_sdma_fetch_arb = 1; +module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO); +MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); + +/** + * qib_disarm_piobufs - cancel a range of PIO buffers + * @dd: the qlogic_ib device + * @first: the first PIO buffer to cancel + * @cnt: the number of PIO buffers to cancel + * + * Cancel a range of PIO buffers. Used at user process close, + * in case it died while writing to a PIO buffer. + */ +void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) +{ + unsigned long flags; + unsigned i; + unsigned last; + + last = first + cnt; + spin_lock_irqsave(&dd->pioavail_lock, flags); + for (i = first; i < last; i++) { + __clear_bit(i, dd->pio_need_disarm); + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); + } + spin_unlock_irqrestore(&dd->pioavail_lock, flags); +} + +/* + * This is called by a user process when it sees the DISARM_BUFS event + * bit is set. + */ +int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) +{ + struct qib_devdata *dd = rcd->dd; + unsigned i; + unsigned last; + unsigned n = 0; + + last = rcd->pio_base + rcd->piocnt; + /* + * Don't need uctxt_lock here, since user has called in to us. + * Clear at start in case more interrupts set bits while we + * are disarming + */ + if (rcd->user_event_mask) { + /* + * subctxt_cnt is 0 if not shared, so do base + * separately, first, then remaining subctxt, if any + */ + clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); + for (i = 1; i < rcd->subctxt_cnt; i++) + clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[i]); + } + spin_lock_irq(&dd->pioavail_lock); + for (i = rcd->pio_base; i < last; i++) { + if (__test_and_clear_bit(i, dd->pio_need_disarm)) { + n++; + dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); + } + } + spin_unlock_irq(&dd->pioavail_lock); + return 0; +} + +static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) +{ + struct qib_pportdata *ppd; + unsigned pidx; + + for (pidx = 0; pidx < dd->num_pports; pidx++) { + ppd = dd->pport + pidx; + if (i >= ppd->sdma_state.first_sendbuf && + i < ppd->sdma_state.last_sendbuf) + return ppd; + } + return NULL; +} + +/* + * Return true if send buffer is being used by a user context. + * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect + */ +static int find_ctxt(struct qib_devdata *dd, unsigned bufn) +{ + struct qib_ctxtdata *rcd; + unsigned ctxt; + int ret = 0; + + spin_lock(&dd->uctxt_lock); + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + rcd = dd->rcd[ctxt]; + if (!rcd || bufn < rcd->pio_base || + bufn >= rcd->pio_base + rcd->piocnt) + continue; + if (rcd->user_event_mask) { + int i; + /* + * subctxt_cnt is 0 if not shared, so do base + * separately, first, then remaining subctxt, if any + */ + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[0]); + for (i = 1; i < rcd->subctxt_cnt; i++) + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[i]); + } + ret = 1; + break; + } + spin_unlock(&dd->uctxt_lock); + + return ret; +} + +/* + * Disarm a set of send buffers. If the buffer might be actively being + * written to, mark the buffer to be disarmed later when it is not being + * written to. + * + * This should only be called from the IRQ error handler. + */ +void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, + unsigned cnt) +{ + struct qib_pportdata *ppd, *pppd[dd->num_pports]; + unsigned i; + unsigned long flags; + + for (i = 0; i < dd->num_pports; i++) + pppd[i] = NULL; + + for (i = 0; i < cnt; i++) { + int which; + if (!test_bit(i, mask)) + continue; + /* + * If the buffer is owned by the DMA hardware, + * reset the DMA engine. + */ + ppd = is_sdma_buf(dd, i); + if (ppd) { + pppd[ppd->port] = ppd; + continue; + } + /* + * If the kernel is writing the buffer or the buffer is + * owned by a user process, we can't clear it yet. + */ + spin_lock_irqsave(&dd->pioavail_lock, flags); + if (test_bit(i, dd->pio_writing) || + (!test_bit(i << 1, dd->pioavailkernel) && + find_ctxt(dd, i))) { + __set_bit(i, dd->pio_need_disarm); + which = 0; + } else { + which = 1; + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); + } + spin_unlock_irqrestore(&dd->pioavail_lock, flags); + } + + /* do cancel_sends once per port that had sdma piobufs in error */ + for (i = 0; i < dd->num_pports; i++) + if (pppd[i]) + qib_cancel_sends(pppd[i]); +} + +/** + * update_send_bufs - update shadow copy of the PIO availability map + * @dd: the qlogic_ib device + * + * called whenever our local copy indicates we have run out of send buffers + */ +static void update_send_bufs(struct qib_devdata *dd) +{ + unsigned long flags; + unsigned i; + const unsigned piobregs = dd->pioavregs; + + /* + * If the generation (check) bits have changed, then we update the + * busy bit for the corresponding PIO buffer. This algorithm will + * modify positions to the value they already have in some cases + * (i.e., no change), but it's faster than changing only the bits + * that have changed. + * + * We would like to do this atomicly, to avoid spinlocks in the + * critical send path, but that's not really possible, given the + * type of changes, and that this routine could be called on + * multiple cpu's simultaneously, so we lock in this routine only, + * to avoid conflicting updates; all we change is the shadow, and + * it's a single 64 bit memory location, so by definition the update + * is atomic in terms of what other cpu's can see in testing the + * bits. The spin_lock overhead isn't too bad, since it only + * happens when all buffers are in use, so only cpu overhead, not + * latency or bandwidth is affected. + */ + if (!dd->pioavailregs_dma) + return; + spin_lock_irqsave(&dd->pioavail_lock, flags); + for (i = 0; i < piobregs; i++) { + u64 pchbusy, pchg, piov, pnew; + + piov = le64_to_cpu(dd->pioavailregs_dma[i]); + pchg = dd->pioavailkernel[i] & + ~(dd->pioavailshadow[i] ^ piov); + pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT; + if (pchg && (pchbusy & dd->pioavailshadow[i])) { + pnew = dd->pioavailshadow[i] & ~pchbusy; + pnew |= piov & pchbusy; + dd->pioavailshadow[i] = pnew; + } + } + spin_unlock_irqrestore(&dd->pioavail_lock, flags); +} + +/* + * Debugging code and stats updates if no pio buffers available. + */ +static noinline void no_send_bufs(struct qib_devdata *dd) +{ + dd->upd_pio_shadow = 1; + + /* not atomic, but if we lose a stat count in a while, that's OK */ + qib_stats.sps_nopiobufs++; +} + +/* + * Common code for normal driver send buffer allocation, and reserved + * allocation. + * + * Do appropriate marking as busy, etc. + * Returns buffer pointer if one is found, otherwise NULL. + */ +u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, + u32 first, u32 last) +{ + unsigned i, j, updated = 0; + unsigned nbufs; + unsigned long flags; + unsigned long *shadow = dd->pioavailshadow; + u32 __iomem *buf; + + if (!(dd->flags & QIB_PRESENT)) + return NULL; + + nbufs = last - first + 1; /* number in range to check */ + if (dd->upd_pio_shadow) { + /* + * Minor optimization. If we had no buffers on last call, + * start out by doing the update; continue and do scan even + * if no buffers were updated, to be paranoid. + */ + update_send_bufs(dd); + updated++; + } + i = first; +rescan: + /* + * While test_and_set_bit() is atomic, we do that and then the + * change_bit(), and the pair is not. See if this is the cause + * of the remaining armlaunch errors. + */ + spin_lock_irqsave(&dd->pioavail_lock, flags); + for (j = 0; j < nbufs; j++, i++) { + if (i > last) + i = first; + if (__test_and_set_bit((2 * i) + 1, shadow)) + continue; + /* flip generation bit */ + __change_bit(2 * i, shadow); + /* remember that the buffer can be written to now */ + __set_bit(i, dd->pio_writing); + break; + } + spin_unlock_irqrestore(&dd->pioavail_lock, flags); + + if (j == nbufs) { + if (!updated) { + /* + * First time through; shadow exhausted, but may be + * buffers available, try an update and then rescan. + */ + update_send_bufs(dd); + updated++; + i = first; + goto rescan; + } + no_send_bufs(dd); + buf = NULL; + } else { + if (i < dd->piobcnt2k) + buf = (u32 __iomem *)(dd->pio2kbase + + i * dd->palign); + else + buf = (u32 __iomem *)(dd->pio4kbase + + (i - dd->piobcnt2k) * dd->align4k); + if (pbufnum) + *pbufnum = i; + dd->upd_pio_shadow = 0; + } + + return buf; +} + +/* + * Record that the caller is finished writing to the buffer so we don't + * disarm it while it is being written and disarm it now if needed. + */ +void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->pioavail_lock, flags); + __clear_bit(n, dd->pio_writing); + if (__test_and_clear_bit(n, dd->pio_need_disarm)) + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); + spin_unlock_irqrestore(&dd->pioavail_lock, flags); +} + +/** + * qib_chg_pioavailkernel - change which send buffers are available for kernel + * @dd: the qlogic_ib device + * @start: the starting send buffer number + * @len: the number of send buffers + * @avail: true if the buffers are available for kernel use, false otherwise + */ +void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, + unsigned len, u32 avail, struct qib_ctxtdata *rcd) +{ + unsigned long flags; + unsigned end; + unsigned ostart = start; + + /* There are two bits per send buffer (busy and generation) */ + start *= 2; + end = start + len * 2; + + spin_lock_irqsave(&dd->pioavail_lock, flags); + /* Set or clear the busy bit in the shadow. */ + while (start < end) { + if (avail) { + unsigned long dma; + int i; + + /* + * The BUSY bit will never be set, because we disarm + * the user buffers before we hand them back to the + * kernel. We do have to make sure the generation + * bit is set correctly in shadow, since it could + * have changed many times while allocated to user. + * We can't use the bitmap functions on the full + * dma array because it is always little-endian, so + * we have to flip to host-order first. + * BITS_PER_LONG is slightly wrong, since it's + * always 64 bits per register in chip... + * We only work on 64 bit kernels, so that's OK. + */ + i = start / BITS_PER_LONG; + __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, + dd->pioavailshadow); + dma = (unsigned long) + le64_to_cpu(dd->pioavailregs_dma[i]); + if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + + start) % BITS_PER_LONG, &dma)) + __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + + start, dd->pioavailshadow); + else + __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + + start, dd->pioavailshadow); + __set_bit(start, dd->pioavailkernel); + } else { + __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, + dd->pioavailshadow); + __clear_bit(start, dd->pioavailkernel); + } + start += 2; + } + + spin_unlock_irqrestore(&dd->pioavail_lock, flags); + + dd->f_txchk_change(dd, ostart, len, avail, rcd); +} + +/* + * Flush all sends that might be in the ready to send state, as well as any + * that are in the process of being sent. Used whenever we need to be + * sure the send side is idle. Cleans up all buffer state by canceling + * all pio buffers, and issuing an abort, which cleans up anything in the + * launch fifo. The cancel is superfluous on some chip versions, but + * it's safer to always do it. + * PIOAvail bits are updated by the chip as if a normal send had happened. + */ +void qib_cancel_sends(struct qib_pportdata *ppd) +{ + struct qib_devdata *dd = ppd->dd; + struct qib_ctxtdata *rcd; + unsigned long flags; + unsigned ctxt; + unsigned i; + unsigned last; + + /* + * Tell PSM to disarm buffers again before trying to reuse them. + * We need to be sure the rcd doesn't change out from under us + * while we do so. We hold the two locks sequentially. We might + * needlessly set some need_disarm bits as a result, if the + * context is closed after we release the uctxt_lock, but that's + * fairly benign, and safer than nesting the locks. + */ + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + spin_lock_irqsave(&dd->uctxt_lock, flags); + rcd = dd->rcd[ctxt]; + if (rcd && rcd->ppd == ppd) { + last = rcd->pio_base + rcd->piocnt; + if (rcd->user_event_mask) { + /* + * subctxt_cnt is 0 if not shared, so do base + * separately, first, then remaining subctxt, + * if any + */ + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[0]); + for (i = 1; i < rcd->subctxt_cnt; i++) + set_bit(_QIB_EVENT_DISARM_BUFS_BIT, + &rcd->user_event_mask[i]); + } + i = rcd->pio_base; + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + spin_lock_irqsave(&dd->pioavail_lock, flags); + for (; i < last; i++) + __set_bit(i, dd->pio_need_disarm); + spin_unlock_irqrestore(&dd->pioavail_lock, flags); + } else + spin_unlock_irqrestore(&dd->uctxt_lock, flags); + } + + if (!(dd->flags & QIB_HAS_SEND_DMA)) + dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | + QIB_SENDCTRL_FLUSH); +} + +/* + * Force an update of in-memory copy of the pioavail registers, when + * needed for any of a variety of reasons. + * If already off, this routine is a nop, on the assumption that the + * caller (or set of callers) will "do the right thing". + * This is a per-device operation, so just the first port. + */ +void qib_force_pio_avail_update(struct qib_devdata *dd) +{ + dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); +} + +void qib_hol_down(struct qib_pportdata *ppd) +{ + /* + * Cancel sends when the link goes DOWN so that we aren't doing it + * at INIT when we might be trying to send SMI packets. + */ + if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) + qib_cancel_sends(ppd); +} + +/* + * Link is at INIT. + * We start the HoL timer so we can detect stuck packets blocking SMP replies. + * Timer may already be running, so use mod_timer, not add_timer. + */ +void qib_hol_init(struct qib_pportdata *ppd) +{ + if (ppd->hol_state != QIB_HOL_INIT) { + ppd->hol_state = QIB_HOL_INIT; + mod_timer(&ppd->hol_timer, + jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); + } +} + +/* + * Link is up, continue any user processes, and ensure timer + * is a nop, if running. Let timer keep running, if set; it + * will nop when it sees the link is up. + */ +void qib_hol_up(struct qib_pportdata *ppd) +{ + ppd->hol_state = QIB_HOL_UP; +} + +/* + * This is only called via the timer. + */ +void qib_hol_event(unsigned long opaque) +{ + struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + + /* If hardware error, etc, skip. */ + if (!(ppd->dd->flags & QIB_INITTED)) + return; + + if (ppd->hol_state != QIB_HOL_UP) { + /* + * Try to flush sends in case a stuck packet is blocking + * SMP replies. + */ + qib_hol_down(ppd); + mod_timer(&ppd->hol_timer, + jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); + } +} diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c new file mode 100644 index 000000000000..6c7fe78cca64 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "qib.h" + +/* cut down ridiculously long IB macro names */ +#define OP(x) IB_OPCODE_UC_##x + +/** + * qib_make_uc_req - construct a request packet (SEND, RDMA write) + * @qp: a pointer to the QP + * + * Return 1 if constructed; otherwise, return 0. + */ +int qib_make_uc_req(struct qib_qp *qp) +{ + struct qib_other_headers *ohdr; + struct qib_swqe *wqe; + unsigned long flags; + u32 hwords; + u32 bth0; + u32 len; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); + int ret = 0; + + spin_lock_irqsave(&qp->s_lock, flags); + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { + if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + goto bail; + /* We are in the error state, flush the work request. */ + if (qp->s_last == qp->s_head) + goto bail; + /* If DMAs are in progress, we can't flush immediately. */ + if (atomic_read(&qp->s_dma_busy)) { + qp->s_flags |= QIB_S_WAIT_DMA; + goto bail; + } + wqe = get_swqe_ptr(qp, qp->s_last); + qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); + goto done; + } + + ohdr = &qp->s_hdr.u.oth; + if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) + ohdr = &qp->s_hdr.u.l.oth; + + /* header size in 32-bit words LRH+BTH = (8+12)/4. */ + hwords = 5; + bth0 = 0; + + /* Get the next send request. */ + wqe = get_swqe_ptr(qp, qp->s_cur); + qp->s_wqe = NULL; + switch (qp->s_state) { + default: + if (!(ib_qib_state_ops[qp->state] & + QIB_PROCESS_NEXT_SEND_OK)) + goto bail; + /* Check if send work queue is empty. */ + if (qp->s_cur == qp->s_head) + goto bail; + /* + * Start a new request. + */ + wqe->psn = qp->s_next_psn; + qp->s_psn = qp->s_next_psn; + qp->s_sge.sge = wqe->sg_list[0]; + qp->s_sge.sg_list = wqe->sg_list + 1; + qp->s_sge.num_sge = wqe->wr.num_sge; + qp->s_sge.total_len = wqe->length; + len = wqe->length; + qp->s_len = len; + switch (wqe->wr.opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + if (len > pmtu) { + qp->s_state = OP(SEND_FIRST); + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_SEND) + qp->s_state = OP(SEND_ONLY); + else { + qp->s_state = + OP(SEND_ONLY_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + } + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + qp->s_wqe = wqe; + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + ohdr->u.rc.reth.vaddr = + cpu_to_be64(wqe->wr.wr.rdma.remote_addr); + ohdr->u.rc.reth.rkey = + cpu_to_be32(wqe->wr.wr.rdma.rkey); + ohdr->u.rc.reth.length = cpu_to_be32(len); + hwords += sizeof(struct ib_reth) / 4; + if (len > pmtu) { + qp->s_state = OP(RDMA_WRITE_FIRST); + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + qp->s_state = OP(RDMA_WRITE_ONLY); + else { + qp->s_state = + OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); + /* Immediate data comes after the RETH */ + ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + } + qp->s_wqe = wqe; + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + + default: + goto bail; + } + break; + + case OP(SEND_FIRST): + qp->s_state = OP(SEND_MIDDLE); + /* FALLTHROUGH */ + case OP(SEND_MIDDLE): + len = qp->s_len; + if (len > pmtu) { + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_SEND) + qp->s_state = OP(SEND_LAST); + else { + qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + } + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + qp->s_wqe = wqe; + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + + case OP(RDMA_WRITE_FIRST): + qp->s_state = OP(RDMA_WRITE_MIDDLE); + /* FALLTHROUGH */ + case OP(RDMA_WRITE_MIDDLE): + len = qp->s_len; + if (len > pmtu) { + len = pmtu; + break; + } + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + qp->s_state = OP(RDMA_WRITE_LAST); + else { + qp->s_state = + OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); + /* Immediate data comes after the BTH */ + ohdr->u.imm_data = wqe->wr.ex.imm_data; + hwords += 1; + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + } + qp->s_wqe = wqe; + if (++qp->s_cur >= qp->s_size) + qp->s_cur = 0; + break; + } + qp->s_len -= len; + qp->s_hdrwords = hwords; + qp->s_cur_sge = &qp->s_sge; + qp->s_cur_size = len; + qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), + qp->s_next_psn++ & QIB_PSN_MASK); +done: + ret = 1; + goto unlock; + +bail: + qp->s_flags &= ~QIB_S_BUSY; +unlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + return ret; +} + +/** + * qib_uc_rcv - handle an incoming UC packet + * @ibp: the port the packet came in on + * @hdr: the header of the packet + * @has_grh: true if the packet has a GRH + * @data: the packet data + * @tlen: the length of the packet + * @qp: the QP for this packet. + * + * This is called from qib_qp_rcv() to process an incoming UC packet + * for the given QP. + * Called at interrupt level. + */ +void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp) +{ + struct qib_other_headers *ohdr; + unsigned long flags; + u32 opcode; + u32 hdrsize; + u32 psn; + u32 pad; + struct ib_wc wc; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); + struct ib_reth *reth; + int ret; + + /* Check for GRH */ + if (!has_grh) { + ohdr = &hdr->u.oth; + hdrsize = 8 + 12; /* LRH + BTH */ + } else { + ohdr = &hdr->u.l.oth; + hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ + } + + opcode = be32_to_cpu(ohdr->bth[0]); + spin_lock_irqsave(&qp->s_lock, flags); + if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) + goto sunlock; + spin_unlock_irqrestore(&qp->s_lock, flags); + + psn = be32_to_cpu(ohdr->bth[2]); + opcode >>= 24; + memset(&wc, 0, sizeof wc); + + /* Prevent simultaneous processing after APM on different CPUs */ + spin_lock(&qp->r_lock); + + /* Compare the PSN verses the expected PSN. */ + if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { + /* + * Handle a sequence error. + * Silently drop any current message. + */ + qp->r_psn = psn; +inv: + if (qp->r_state == OP(SEND_FIRST) || + qp->r_state == OP(SEND_MIDDLE)) { + set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); + qp->r_sge.num_sge = 0; + } else + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + qp->r_state = OP(SEND_LAST); + switch (opcode) { + case OP(SEND_FIRST): + case OP(SEND_ONLY): + case OP(SEND_ONLY_WITH_IMMEDIATE): + goto send_first; + + case OP(RDMA_WRITE_FIRST): + case OP(RDMA_WRITE_ONLY): + case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): + goto rdma_first; + + default: + goto drop; + } + } + + /* Check for opcode sequence errors. */ + switch (qp->r_state) { + case OP(SEND_FIRST): + case OP(SEND_MIDDLE): + if (opcode == OP(SEND_MIDDLE) || + opcode == OP(SEND_LAST) || + opcode == OP(SEND_LAST_WITH_IMMEDIATE)) + break; + goto inv; + + case OP(RDMA_WRITE_FIRST): + case OP(RDMA_WRITE_MIDDLE): + if (opcode == OP(RDMA_WRITE_MIDDLE) || + opcode == OP(RDMA_WRITE_LAST) || + opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) + break; + goto inv; + + default: + if (opcode == OP(SEND_FIRST) || + opcode == OP(SEND_ONLY) || + opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || + opcode == OP(RDMA_WRITE_FIRST) || + opcode == OP(RDMA_WRITE_ONLY) || + opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) + break; + goto inv; + } + + if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { + qp->r_flags |= QIB_R_COMM_EST; + if (qp->ibqp.event_handler) { + struct ib_event ev; + + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = IB_EVENT_COMM_EST; + qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + } + } + + /* OK, process the packet. */ + switch (opcode) { + case OP(SEND_FIRST): + case OP(SEND_ONLY): + case OP(SEND_ONLY_WITH_IMMEDIATE): +send_first: + if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) + qp->r_sge = qp->s_rdma_read_sge; + else { + ret = qib_get_rwqe(qp, 0); + if (ret < 0) + goto op_err; + if (!ret) + goto drop; + /* + * qp->s_rdma_read_sge will be the owner + * of the mr references. + */ + qp->s_rdma_read_sge = qp->r_sge; + } + qp->r_rcv_len = 0; + if (opcode == OP(SEND_ONLY)) + goto send_last; + else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) + goto send_last_imm; + /* FALLTHROUGH */ + case OP(SEND_MIDDLE): + /* Check for invalid length PMTU or posted rwqe len. */ + if (unlikely(tlen != (hdrsize + pmtu + 4))) + goto rewind; + qp->r_rcv_len += pmtu; + if (unlikely(qp->r_rcv_len > qp->r_len)) + goto rewind; + qib_copy_sge(&qp->r_sge, data, pmtu, 0); + break; + + case OP(SEND_LAST_WITH_IMMEDIATE): +send_last_imm: + wc.ex.imm_data = ohdr->u.imm_data; + hdrsize += 4; + wc.wc_flags = IB_WC_WITH_IMM; + /* FALLTHROUGH */ + case OP(SEND_LAST): +send_last: + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* Check for invalid length. */ + /* XXX LAST len should be >= 1 */ + if (unlikely(tlen < (hdrsize + pad + 4))) + goto rewind; + /* Don't count the CRC. */ + tlen -= (hdrsize + pad + 4); + wc.byte_len = tlen + qp->r_rcv_len; + if (unlikely(wc.byte_len > qp->r_len)) + goto rewind; + wc.opcode = IB_WC_RECV; +last_imm: + qib_copy_sge(&qp->r_sge, data, tlen, 0); + while (qp->s_rdma_read_sge.num_sge) { + atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); + if (--qp->s_rdma_read_sge.num_sge) + qp->s_rdma_read_sge.sge = + *qp->s_rdma_read_sge.sg_list++; + } + wc.wr_id = qp->r_wr_id; + wc.status = IB_WC_SUCCESS; + wc.qp = &qp->ibqp; + wc.src_qp = qp->remote_qpn; + wc.slid = qp->remote_ah_attr.dlid; + wc.sl = qp->remote_ah_attr.sl; + /* Signal completion event if the solicited bit is set. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + (ohdr->bth[0] & + cpu_to_be32(IB_BTH_SOLICITED)) != 0); + break; + + case OP(RDMA_WRITE_FIRST): + case OP(RDMA_WRITE_ONLY): + case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ +rdma_first: + if (unlikely(!(qp->qp_access_flags & + IB_ACCESS_REMOTE_WRITE))) { + goto drop; + } + reth = &ohdr->u.rc.reth; + hdrsize += sizeof(*reth); + qp->r_len = be32_to_cpu(reth->length); + qp->r_rcv_len = 0; + qp->r_sge.sg_list = NULL; + if (qp->r_len != 0) { + u32 rkey = be32_to_cpu(reth->rkey); + u64 vaddr = be64_to_cpu(reth->vaddr); + int ok; + + /* Check rkey */ + ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, + vaddr, rkey, IB_ACCESS_REMOTE_WRITE); + if (unlikely(!ok)) + goto drop; + qp->r_sge.num_sge = 1; + } else { + qp->r_sge.num_sge = 0; + qp->r_sge.sge.mr = NULL; + qp->r_sge.sge.vaddr = NULL; + qp->r_sge.sge.length = 0; + qp->r_sge.sge.sge_length = 0; + } + if (opcode == OP(RDMA_WRITE_ONLY)) + goto rdma_last; + else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) + goto rdma_last_imm; + /* FALLTHROUGH */ + case OP(RDMA_WRITE_MIDDLE): + /* Check for invalid length PMTU or posted rwqe len. */ + if (unlikely(tlen != (hdrsize + pmtu + 4))) + goto drop; + qp->r_rcv_len += pmtu; + if (unlikely(qp->r_rcv_len > qp->r_len)) + goto drop; + qib_copy_sge(&qp->r_sge, data, pmtu, 1); + break; + + case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): +rdma_last_imm: + wc.ex.imm_data = ohdr->u.imm_data; + hdrsize += 4; + wc.wc_flags = IB_WC_WITH_IMM; + + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* Check for invalid length. */ + /* XXX LAST len should be >= 1 */ + if (unlikely(tlen < (hdrsize + pad + 4))) + goto drop; + /* Don't count the CRC. */ + tlen -= (hdrsize + pad + 4); + if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) + goto drop; + if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) + while (qp->s_rdma_read_sge.num_sge) { + atomic_dec(&qp->s_rdma_read_sge.sge.mr-> + refcount); + if (--qp->s_rdma_read_sge.num_sge) + qp->s_rdma_read_sge.sge = + *qp->s_rdma_read_sge.sg_list++; + } + else { + ret = qib_get_rwqe(qp, 1); + if (ret < 0) + goto op_err; + if (!ret) + goto drop; + } + wc.byte_len = qp->r_len; + wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; + goto last_imm; + + case OP(RDMA_WRITE_LAST): +rdma_last: + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + /* Check for invalid length. */ + /* XXX LAST len should be >= 1 */ + if (unlikely(tlen < (hdrsize + pad + 4))) + goto drop; + /* Don't count the CRC. */ + tlen -= (hdrsize + pad + 4); + if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) + goto drop; + qib_copy_sge(&qp->r_sge, data, tlen, 1); + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + break; + + default: + /* Drop packet for unknown opcodes. */ + goto drop; + } + qp->r_psn++; + qp->r_state = opcode; + spin_unlock(&qp->r_lock); + return; + +rewind: + set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); + qp->r_sge.num_sge = 0; +drop: + ibp->n_pkt_drops++; + spin_unlock(&qp->r_lock); + return; + +op_err: + qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + spin_unlock(&qp->r_lock); + return; + +sunlock: + spin_unlock_irqrestore(&qp->s_lock, flags); +} diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c new file mode 100644 index 000000000000..c838cda73347 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -0,0 +1,607 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <rdma/ib_smi.h> + +#include "qib.h" +#include "qib_mad.h" + +/** + * qib_ud_loopback - handle send on loopback QPs + * @sqp: the sending QP + * @swqe: the send work request + * + * This is called from qib_make_ud_req() to forward a WQE addressed + * to the same HCA. + * Note that the receive interrupt handler may be calling qib_ud_rcv() + * while this is being called. + */ +static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) +{ + struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); + struct qib_pportdata *ppd; + struct qib_qp *qp; + struct ib_ah_attr *ah_attr; + unsigned long flags; + struct qib_sge_state ssge; + struct qib_sge *sge; + struct ib_wc wc; + u32 length; + + qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); + if (!qp) { + ibp->n_pkt_drops++; + return; + } + if (qp->ibqp.qp_type != sqp->ibqp.qp_type || + !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { + ibp->n_pkt_drops++; + goto drop; + } + + ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; + ppd = ppd_from_ibp(ibp); + + if (qp->ibqp.qp_num > 1) { + u16 pkey1; + u16 pkey2; + u16 lid; + + pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index); + pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); + if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { + lid = ppd->lid | (ah_attr->src_path_bits & + ((1 << ppd->lmc) - 1)); + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1, + ah_attr->sl, + sqp->ibqp.qp_num, qp->ibqp.qp_num, + cpu_to_be16(lid), + cpu_to_be16(ah_attr->dlid)); + goto drop; + } + } + + /* + * Check that the qkey matches (except for QP0, see 9.6.1.4.1). + * Qkeys with the high order bit set mean use the + * qkey from the QP context instead of the WR (see 10.2.5). + */ + if (qp->ibqp.qp_num) { + u32 qkey; + + qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? + sqp->qkey : swqe->wr.wr.ud.remote_qkey; + if (unlikely(qkey != qp->qkey)) { + u16 lid; + + lid = ppd->lid | (ah_attr->src_path_bits & + ((1 << ppd->lmc) - 1)); + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, + ah_attr->sl, + sqp->ibqp.qp_num, qp->ibqp.qp_num, + cpu_to_be16(lid), + cpu_to_be16(ah_attr->dlid)); + goto drop; + } + } + + /* + * A GRH is expected to preceed the data even if not + * present on the wire. + */ + length = swqe->length; + memset(&wc, 0, sizeof wc); + wc.byte_len = length + sizeof(struct ib_grh); + + if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { + wc.wc_flags = IB_WC_WITH_IMM; + wc.ex.imm_data = swqe->wr.ex.imm_data; + } + + spin_lock_irqsave(&qp->r_lock, flags); + + /* + * Get the next work request entry to find where to put the data. + */ + if (qp->r_flags & QIB_R_REUSE_SGE) + qp->r_flags &= ~QIB_R_REUSE_SGE; + else { + int ret; + + ret = qib_get_rwqe(qp, 0); + if (ret < 0) { + qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + goto bail_unlock; + } + if (!ret) { + if (qp->ibqp.qp_num == 0) + ibp->n_vl15_dropped++; + goto bail_unlock; + } + } + /* Silently drop packets which are too big. */ + if (unlikely(wc.byte_len > qp->r_len)) { + qp->r_flags |= QIB_R_REUSE_SGE; + ibp->n_pkt_drops++; + goto bail_unlock; + } + + if (ah_attr->ah_flags & IB_AH_GRH) { + qib_copy_sge(&qp->r_sge, &ah_attr->grh, + sizeof(struct ib_grh), 1); + wc.wc_flags |= IB_WC_GRH; + } else + qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + ssge.sg_list = swqe->sg_list + 1; + ssge.sge = *swqe->sg_list; + ssge.num_sge = swqe->wr.num_sge; + sge = &ssge.sge; + while (length) { + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (--ssge.num_sge) + *sge = *ssge.sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + length -= len; + } + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + goto bail_unlock; + wc.wr_id = qp->r_wr_id; + wc.status = IB_WC_SUCCESS; + wc.opcode = IB_WC_RECV; + wc.qp = &qp->ibqp; + wc.src_qp = sqp->ibqp.qp_num; + wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? + swqe->wr.wr.ud.pkey_index : 0; + wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); + wc.sl = ah_attr->sl; + wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); + wc.port_num = qp->port_num; + /* Signal completion event if the solicited bit is set. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + swqe->wr.send_flags & IB_SEND_SOLICITED); + ibp->n_loop_pkts++; +bail_unlock: + spin_unlock_irqrestore(&qp->r_lock, flags); +drop: + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); +} + +/** + * qib_make_ud_req - construct a UD request packet + * @qp: the QP + * + * Return 1 if constructed; otherwise, return 0. + */ +int qib_make_ud_req(struct qib_qp *qp) +{ + struct qib_other_headers *ohdr; + struct ib_ah_attr *ah_attr; + struct qib_pportdata *ppd; + struct qib_ibport *ibp; + struct qib_swqe *wqe; + unsigned long flags; + u32 nwords; + u32 extra_bytes; + u32 bth0; + u16 lrh0; + u16 lid; + int ret = 0; + int next_cur; + + spin_lock_irqsave(&qp->s_lock, flags); + + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) { + if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + goto bail; + /* We are in the error state, flush the work request. */ + if (qp->s_last == qp->s_head) + goto bail; + /* If DMAs are in progress, we can't flush immediately. */ + if (atomic_read(&qp->s_dma_busy)) { + qp->s_flags |= QIB_S_WAIT_DMA; + goto bail; + } + wqe = get_swqe_ptr(qp, qp->s_last); + qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); + goto done; + } + + if (qp->s_cur == qp->s_head) + goto bail; + + wqe = get_swqe_ptr(qp, qp->s_cur); + next_cur = qp->s_cur + 1; + if (next_cur >= qp->s_size) + next_cur = 0; + + /* Construct the header. */ + ibp = to_iport(qp->ibqp.device, qp->port_num); + ppd = ppd_from_ibp(ibp); + ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; + if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { + if (ah_attr->dlid != QIB_PERMISSIVE_LID) + ibp->n_multicast_xmit++; + else + ibp->n_unicast_xmit++; + } else { + ibp->n_unicast_xmit++; + lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); + if (unlikely(lid == ppd->lid)) { + /* + * If DMAs are in progress, we can't generate + * a completion for the loopback packet since + * it would be out of order. + * XXX Instead of waiting, we could queue a + * zero length descriptor so we get a callback. + */ + if (atomic_read(&qp->s_dma_busy)) { + qp->s_flags |= QIB_S_WAIT_DMA; + goto bail; + } + qp->s_cur = next_cur; + spin_unlock_irqrestore(&qp->s_lock, flags); + qib_ud_loopback(qp, wqe); + spin_lock_irqsave(&qp->s_lock, flags); + qib_send_complete(qp, wqe, IB_WC_SUCCESS); + goto done; + } + } + + qp->s_cur = next_cur; + extra_bytes = -wqe->length & 3; + nwords = (wqe->length + extra_bytes) >> 2; + + /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */ + qp->s_hdrwords = 7; + qp->s_cur_size = wqe->length; + qp->s_cur_sge = &qp->s_sge; + qp->s_srate = ah_attr->static_rate; + qp->s_wqe = wqe; + qp->s_sge.sge = wqe->sg_list[0]; + qp->s_sge.sg_list = wqe->sg_list + 1; + qp->s_sge.num_sge = wqe->wr.num_sge; + qp->s_sge.total_len = wqe->length; + + if (ah_attr->ah_flags & IB_AH_GRH) { + /* Header size in 32-bit words. */ + qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, + &ah_attr->grh, + qp->s_hdrwords, nwords); + lrh0 = QIB_LRH_GRH; + ohdr = &qp->s_hdr.u.l.oth; + /* + * Don't worry about sending to locally attached multicast + * QPs. It is unspecified by the spec. what happens. + */ + } else { + /* Header size in 32-bit words. */ + lrh0 = QIB_LRH_BTH; + ohdr = &qp->s_hdr.u.oth; + } + if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { + qp->s_hdrwords++; + ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; + bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; + } else + bth0 = IB_OPCODE_UD_SEND_ONLY << 24; + lrh0 |= ah_attr->sl << 4; + if (qp->ibqp.qp_type == IB_QPT_SMI) + lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ + else + lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; + qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); + qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ + qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); + lid = ppd->lid; + if (lid) { + lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); + qp->s_hdr.lrh[3] = cpu_to_be16(lid); + } else + qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; + if (wqe->wr.send_flags & IB_SEND_SOLICITED) + bth0 |= IB_BTH_SOLICITED; + bth0 |= extra_bytes << 20; + bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : + qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? + wqe->wr.wr.ud.pkey_index : qp->s_pkey_index); + ohdr->bth[0] = cpu_to_be32(bth0); + /* + * Use the multicast QP if the destination LID is a multicast LID. + */ + ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && + ah_attr->dlid != QIB_PERMISSIVE_LID ? + cpu_to_be32(QIB_MULTICAST_QPN) : + cpu_to_be32(wqe->wr.wr.ud.remote_qpn); + ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); + /* + * Qkeys with the high order bit set mean use the + * qkey from the QP context instead of the WR (see 10.2.5). + */ + ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? + qp->qkey : wqe->wr.wr.ud.remote_qkey); + ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); + +done: + ret = 1; + goto unlock; + +bail: + qp->s_flags &= ~QIB_S_BUSY; +unlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + return ret; +} + +static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey) +{ + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = ppd->dd; + unsigned ctxt = ppd->hw_pidx; + unsigned i; + + pkey &= 0x7fff; /* remove limited/full membership bit */ + + for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) + if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) + return i; + + /* + * Should not get here, this means hardware failed to validate pkeys. + * Punt and return index 0. + */ + return 0; +} + +/** + * qib_ud_rcv - receive an incoming UD packet + * @ibp: the port the packet came in on + * @hdr: the packet header + * @has_grh: true if the packet has a GRH + * @data: the packet data + * @tlen: the packet length + * @qp: the QP the packet came on + * + * This is called from qib_qp_rcv() to process an incoming UD packet + * for the given QP. + * Called at interrupt level. + */ +void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp) +{ + struct qib_other_headers *ohdr; + int opcode; + u32 hdrsize; + u32 pad; + struct ib_wc wc; + u32 qkey; + u32 src_qp; + u16 dlid; + + /* Check for GRH */ + if (!has_grh) { + ohdr = &hdr->u.oth; + hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */ + } else { + ohdr = &hdr->u.l.oth; + hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ + } + qkey = be32_to_cpu(ohdr->u.ud.deth[0]); + src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; + + /* Get the number of bytes the message was padded by. */ + pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + if (unlikely(tlen < (hdrsize + pad + 4))) { + /* Drop incomplete packets. */ + ibp->n_pkt_drops++; + goto bail; + } + tlen -= hdrsize + pad + 4; + + /* + * Check that the permissive LID is only used on QP0 + * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). + */ + if (qp->ibqp.qp_num) { + if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || + hdr->lrh[3] == IB_LID_PERMISSIVE)) { + ibp->n_pkt_drops++; + goto bail; + } + if (qp->ibqp.qp_num > 1) { + u16 pkey1, pkey2; + + pkey1 = be32_to_cpu(ohdr->bth[0]); + pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); + if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, + pkey1, + (be16_to_cpu(hdr->lrh[0]) >> 4) & + 0xF, + src_qp, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); + goto bail; + } + } + if (unlikely(qkey != qp->qkey)) { + qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, + (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, + src_qp, qp->ibqp.qp_num, + hdr->lrh[3], hdr->lrh[1]); + goto bail; + } + /* Drop invalid MAD packets (see 13.5.3.1). */ + if (unlikely(qp->ibqp.qp_num == 1 && + (tlen != 256 || + (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { + ibp->n_pkt_drops++; + goto bail; + } + } else { + struct ib_smp *smp; + + /* Drop invalid MAD packets (see 13.5.3.1). */ + if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { + ibp->n_pkt_drops++; + goto bail; + } + smp = (struct ib_smp *) data; + if ((hdr->lrh[1] == IB_LID_PERMISSIVE || + hdr->lrh[3] == IB_LID_PERMISSIVE) && + smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + ibp->n_pkt_drops++; + goto bail; + } + } + + /* + * The opcode is in the low byte when its in network order + * (top byte when in host order). + */ + opcode = be32_to_cpu(ohdr->bth[0]) >> 24; + if (qp->ibqp.qp_num > 1 && + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { + wc.ex.imm_data = ohdr->u.ud.imm_data; + wc.wc_flags = IB_WC_WITH_IMM; + hdrsize += sizeof(u32); + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { + wc.ex.imm_data = 0; + wc.wc_flags = 0; + } else { + ibp->n_pkt_drops++; + goto bail; + } + + /* + * A GRH is expected to preceed the data even if not + * present on the wire. + */ + wc.byte_len = tlen + sizeof(struct ib_grh); + + /* + * We need to serialize getting a receive work queue entry and + * generating a completion for it against QPs sending to this QP + * locally. + */ + spin_lock(&qp->r_lock); + + /* + * Get the next work request entry to find where to put the data. + */ + if (qp->r_flags & QIB_R_REUSE_SGE) + qp->r_flags &= ~QIB_R_REUSE_SGE; + else { + int ret; + + ret = qib_get_rwqe(qp, 0); + if (ret < 0) { + qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + goto bail_unlock; + } + if (!ret) { + if (qp->ibqp.qp_num == 0) + ibp->n_vl15_dropped++; + goto bail_unlock; + } + } + /* Silently drop packets which are too big. */ + if (unlikely(wc.byte_len > qp->r_len)) { + qp->r_flags |= QIB_R_REUSE_SGE; + ibp->n_pkt_drops++; + goto bail_unlock; + } + if (has_grh) { + qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, + sizeof(struct ib_grh), 1); + wc.wc_flags |= IB_WC_GRH; + } else + qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); + while (qp->r_sge.num_sge) { + atomic_dec(&qp->r_sge.sge.mr->refcount); + if (--qp->r_sge.num_sge) + qp->r_sge.sge = *qp->r_sge.sg_list++; + } + if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + goto bail_unlock; + wc.wr_id = qp->r_wr_id; + wc.status = IB_WC_SUCCESS; + wc.opcode = IB_WC_RECV; + wc.vendor_err = 0; + wc.qp = &qp->ibqp; + wc.src_qp = src_qp; + wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? + qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0; + wc.slid = be16_to_cpu(hdr->lrh[3]); + wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF; + dlid = be16_to_cpu(hdr->lrh[1]); + /* + * Save the LMC lower bits if the destination LID is a unicast LID. + */ + wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 : + dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); + wc.port_num = qp->port_num; + /* Signal completion event if the solicited bit is set. */ + qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + (ohdr->bth[0] & + cpu_to_be32(IB_BTH_SOLICITED)) != 0); +bail_unlock: + spin_unlock(&qp->r_lock); +bail:; +} diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c new file mode 100644 index 000000000000..d7a26c1d4f37 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mm.h> +#include <linux/device.h> + +#include "qib.h" + +static void __qib_release_user_pages(struct page **p, size_t num_pages, + int dirty) +{ + size_t i; + + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty_lock(p[i]); + put_page(p[i]); + } +} + +/* + * Call with current->mm->mmap_sem held. + */ +static int __get_user_pages(unsigned long start_page, size_t num_pages, + struct page **p, struct vm_area_struct **vma) +{ + unsigned long lock_limit; + size_t got; + int ret; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) { + ret = -ENOMEM; + goto bail; + } + + for (got = 0; got < num_pages; got += ret) { + ret = get_user_pages(current, current->mm, + start_page + got * PAGE_SIZE, + num_pages - got, 1, 1, + p + got, vma); + if (ret < 0) + goto bail_release; + } + + current->mm->locked_vm += num_pages; + + ret = 0; + goto bail; + +bail_release: + __qib_release_user_pages(p, got, 0); +bail: + return ret; +} + +/** + * qib_map_page - a safety wrapper around pci_map_page() + * + * A dma_addr of all 0's is interpreted by the chip as "disabled". + * Unfortunately, it can also be a valid dma_addr returned on some + * architectures. + * + * The powerpc iommu assigns dma_addrs in ascending order, so we don't + * have to bother with retries or mapping a dummy page to insure we + * don't just get the same mapping again. + * + * I'm sure we won't be so lucky with other iommu's, so FIXME. + */ +dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, int direction) +{ + dma_addr_t phys; + + phys = pci_map_page(hwdev, page, offset, size, direction); + + if (phys == 0) { + pci_unmap_page(hwdev, phys, size, direction); + phys = pci_map_page(hwdev, page, offset, size, direction); + /* + * FIXME: If we get 0 again, we should keep this page, + * map another, then free the 0 page. + */ + } + + return phys; +} + +/** + * qib_get_user_pages - lock user pages into memory + * @start_page: the start page + * @num_pages: the number of pages + * @p: the output page structures + * + * This function takes a given start page (page aligned user virtual + * address) and pins it and the following specified number of pages. For + * now, num_pages is always 1, but that will probably change at some point + * (because caller is doing expected sends on a single virtually contiguous + * buffer, so we can do all pages at once). + */ +int qib_get_user_pages(unsigned long start_page, size_t num_pages, + struct page **p) +{ + int ret; + + down_write(¤t->mm->mmap_sem); + + ret = __get_user_pages(start_page, num_pages, p, NULL); + + up_write(¤t->mm->mmap_sem); + + return ret; +} + +void qib_release_user_pages(struct page **p, size_t num_pages) +{ + if (current->mm) /* during close after signal, mm can be NULL */ + down_write(¤t->mm->mmap_sem); + + __qib_release_user_pages(p, num_pages, 1); + + if (current->mm) { + current->mm->locked_vm -= num_pages; + up_write(¤t->mm->mmap_sem); + } +} diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c new file mode 100644 index 000000000000..4c19e06b5e85 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -0,0 +1,897 @@ +/* + * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/mm.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/dmapool.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/highmem.h> +#include <linux/io.h> +#include <linux/uio.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + +#include "qib.h" +#include "qib_user_sdma.h" + +/* minimum size of header */ +#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64 +/* expected size of headers (for dma_pool) */ +#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 +/* attempt to drain the queue for 5secs */ +#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 + +struct qib_user_sdma_pkt { + u8 naddr; /* dimension of addr (1..3) ... */ + u32 counter; /* sdma pkts queued counter for this entry */ + u64 added; /* global descq number of entries */ + + struct { + u32 offset; /* offset for kvaddr, addr */ + u32 length; /* length in page */ + u8 put_page; /* should we put_page? */ + u8 dma_mapped; /* is page dma_mapped? */ + struct page *page; /* may be NULL (coherent mem) */ + void *kvaddr; /* FIXME: only for pio hack */ + dma_addr_t addr; + } addr[4]; /* max pages, any more and we coalesce */ + struct list_head list; /* list element */ +}; + +struct qib_user_sdma_queue { + /* + * pkts sent to dma engine are queued on this + * list head. the type of the elements of this + * list are struct qib_user_sdma_pkt... + */ + struct list_head sent; + + /* headers with expected length are allocated from here... */ + char header_cache_name[64]; + struct dma_pool *header_cache; + + /* packets are allocated from the slab cache... */ + char pkt_slab_name[64]; + struct kmem_cache *pkt_slab; + + /* as packets go on the queued queue, they are counted... */ + u32 counter; + u32 sent_counter; + + /* dma page table */ + struct rb_root dma_pages_root; + + /* protect everything above... */ + struct mutex lock; +}; + +struct qib_user_sdma_queue * +qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) +{ + struct qib_user_sdma_queue *pq = + kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); + + if (!pq) + goto done; + + pq->counter = 0; + pq->sent_counter = 0; + INIT_LIST_HEAD(&pq->sent); + + mutex_init(&pq->lock); + + snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), + "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); + pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, + sizeof(struct qib_user_sdma_pkt), + 0, 0, NULL); + + if (!pq->pkt_slab) + goto err_kfree; + + snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), + "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); + pq->header_cache = dma_pool_create(pq->header_cache_name, + dev, + QIB_USER_SDMA_EXP_HEADER_LENGTH, + 4, 0); + if (!pq->header_cache) + goto err_slab; + + pq->dma_pages_root = RB_ROOT; + + goto done; + +err_slab: + kmem_cache_destroy(pq->pkt_slab); +err_kfree: + kfree(pq); + pq = NULL; + +done: + return pq; +} + +static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt, + int i, size_t offset, size_t len, + int put_page, int dma_mapped, + struct page *page, + void *kvaddr, dma_addr_t dma_addr) +{ + pkt->addr[i].offset = offset; + pkt->addr[i].length = len; + pkt->addr[i].put_page = put_page; + pkt->addr[i].dma_mapped = dma_mapped; + pkt->addr[i].page = page; + pkt->addr[i].kvaddr = kvaddr; + pkt->addr[i].addr = dma_addr; +} + +static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt, + u32 counter, size_t offset, + size_t len, int dma_mapped, + struct page *page, + void *kvaddr, dma_addr_t dma_addr) +{ + pkt->naddr = 1; + pkt->counter = counter; + qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page, + kvaddr, dma_addr); +} + +/* we've too many pages in the iovec, coalesce to a single page */ +static int qib_user_sdma_coalesce(const struct qib_devdata *dd, + struct qib_user_sdma_pkt *pkt, + const struct iovec *iov, + unsigned long niov) +{ + int ret = 0; + struct page *page = alloc_page(GFP_KERNEL); + void *mpage_save; + char *mpage; + int i; + int len = 0; + dma_addr_t dma_addr; + + if (!page) { + ret = -ENOMEM; + goto done; + } + + mpage = kmap(page); + mpage_save = mpage; + for (i = 0; i < niov; i++) { + int cfur; + + cfur = copy_from_user(mpage, + iov[i].iov_base, iov[i].iov_len); + if (cfur) { + ret = -EFAULT; + goto free_unmap; + } + + mpage += iov[i].iov_len; + len += iov[i].iov_len; + } + + dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { + ret = -ENOMEM; + goto free_unmap; + } + + qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save, + dma_addr); + pkt->naddr = 2; + + goto done; + +free_unmap: + kunmap(page); + __free_page(page); +done: + return ret; +} + +/* + * How many pages in this iovec element? + */ +static int qib_user_sdma_num_pages(const struct iovec *iov) +{ + const unsigned long addr = (unsigned long) iov->iov_base; + const unsigned long len = iov->iov_len; + const unsigned long spage = addr & PAGE_MASK; + const unsigned long epage = (addr + len - 1) & PAGE_MASK; + + return 1 + ((epage - spage) >> PAGE_SHIFT); +} + +/* + * Truncate length to page boundry. + */ +static int qib_user_sdma_page_length(unsigned long addr, unsigned long len) +{ + const unsigned long offset = addr & ~PAGE_MASK; + + return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len; +} + +static void qib_user_sdma_free_pkt_frag(struct device *dev, + struct qib_user_sdma_queue *pq, + struct qib_user_sdma_pkt *pkt, + int frag) +{ + const int i = frag; + + if (pkt->addr[i].page) { + if (pkt->addr[i].dma_mapped) + dma_unmap_page(dev, + pkt->addr[i].addr, + pkt->addr[i].length, + DMA_TO_DEVICE); + + if (pkt->addr[i].kvaddr) + kunmap(pkt->addr[i].page); + + if (pkt->addr[i].put_page) + put_page(pkt->addr[i].page); + else + __free_page(pkt->addr[i].page); + } else if (pkt->addr[i].kvaddr) + /* free coherent mem from cache... */ + dma_pool_free(pq->header_cache, + pkt->addr[i].kvaddr, pkt->addr[i].addr); +} + +/* return number of pages pinned... */ +static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, + struct qib_user_sdma_pkt *pkt, + unsigned long addr, int tlen, int npages) +{ + struct page *pages[2]; + int j; + int ret; + + ret = get_user_pages(current, current->mm, addr, + npages, 0, 1, pages, NULL); + + if (ret != npages) { + int i; + + for (i = 0; i < ret; i++) + put_page(pages[i]); + + ret = -ENOMEM; + goto done; + } + + for (j = 0; j < npages; j++) { + /* map the pages... */ + const int flen = qib_user_sdma_page_length(addr, tlen); + dma_addr_t dma_addr = + dma_map_page(&dd->pcidev->dev, + pages[j], 0, flen, DMA_TO_DEVICE); + unsigned long fofs = addr & ~PAGE_MASK; + + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { + ret = -ENOMEM; + goto done; + } + + qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1, + pages[j], kmap(pages[j]), dma_addr); + + pkt->naddr++; + addr += flen; + tlen -= flen; + } + +done: + return ret; +} + +static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd, + struct qib_user_sdma_queue *pq, + struct qib_user_sdma_pkt *pkt, + const struct iovec *iov, + unsigned long niov) +{ + int ret = 0; + unsigned long idx; + + for (idx = 0; idx < niov; idx++) { + const int npages = qib_user_sdma_num_pages(iov + idx); + const unsigned long addr = (unsigned long) iov[idx].iov_base; + + ret = qib_user_sdma_pin_pages(dd, pkt, addr, + iov[idx].iov_len, npages); + if (ret < 0) + goto free_pkt; + } + + goto done; + +free_pkt: + for (idx = 0; idx < pkt->naddr; idx++) + qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); + +done: + return ret; +} + +static int qib_user_sdma_init_payload(const struct qib_devdata *dd, + struct qib_user_sdma_queue *pq, + struct qib_user_sdma_pkt *pkt, + const struct iovec *iov, + unsigned long niov, int npages) +{ + int ret = 0; + + if (npages >= ARRAY_SIZE(pkt->addr)) + ret = qib_user_sdma_coalesce(dd, pkt, iov, niov); + else + ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); + + return ret; +} + +/* free a packet list -- return counter value of last packet */ +static void qib_user_sdma_free_pkt_list(struct device *dev, + struct qib_user_sdma_queue *pq, + struct list_head *list) +{ + struct qib_user_sdma_pkt *pkt, *pkt_next; + + list_for_each_entry_safe(pkt, pkt_next, list, list) { + int i; + + for (i = 0; i < pkt->naddr; i++) + qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); + + kmem_cache_free(pq->pkt_slab, pkt); + } +} + +/* + * copy headers, coalesce etc -- pq->lock must be held + * + * we queue all the packets to list, returning the + * number of bytes total. list must be empty initially, + * as, if there is an error we clean it... + */ +static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, + struct qib_user_sdma_queue *pq, + struct list_head *list, + const struct iovec *iov, + unsigned long niov, + int maxpkts) +{ + unsigned long idx = 0; + int ret = 0; + int npkts = 0; + struct page *page = NULL; + __le32 *pbc; + dma_addr_t dma_addr; + struct qib_user_sdma_pkt *pkt = NULL; + size_t len; + size_t nw; + u32 counter = pq->counter; + int dma_mapped = 0; + + while (idx < niov && npkts < maxpkts) { + const unsigned long addr = (unsigned long) iov[idx].iov_base; + const unsigned long idx_save = idx; + unsigned pktnw; + unsigned pktnwc; + int nfrags = 0; + int npages = 0; + int cfur; + + dma_mapped = 0; + len = iov[idx].iov_len; + nw = len >> 2; + page = NULL; + + pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); + if (!pkt) { + ret = -ENOMEM; + goto free_list; + } + + if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH || + len > PAGE_SIZE || len & 3 || addr & 3) { + ret = -EINVAL; + goto free_pkt; + } + + if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH) + pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL, + &dma_addr); + else + pbc = NULL; + + if (!pbc) { + page = alloc_page(GFP_KERNEL); + if (!page) { + ret = -ENOMEM; + goto free_pkt; + } + pbc = kmap(page); + } + + cfur = copy_from_user(pbc, iov[idx].iov_base, len); + if (cfur) { + ret = -EFAULT; + goto free_pbc; + } + + /* + * This assignment is a bit strange. it's because the + * the pbc counts the number of 32 bit words in the full + * packet _except_ the first word of the pbc itself... + */ + pktnwc = nw - 1; + + /* + * pktnw computation yields the number of 32 bit words + * that the caller has indicated in the PBC. note that + * this is one less than the total number of words that + * goes to the send DMA engine as the first 32 bit word + * of the PBC itself is not counted. Armed with this count, + * we can verify that the packet is consistent with the + * iovec lengths. + */ + pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK; + if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) { + ret = -EINVAL; + goto free_pbc; + } + + idx++; + while (pktnwc < pktnw && idx < niov) { + const size_t slen = iov[idx].iov_len; + const unsigned long faddr = + (unsigned long) iov[idx].iov_base; + + if (slen & 3 || faddr & 3 || !slen || + slen > PAGE_SIZE) { + ret = -EINVAL; + goto free_pbc; + } + + npages++; + if ((faddr & PAGE_MASK) != + ((faddr + slen - 1) & PAGE_MASK)) + npages++; + + pktnwc += slen >> 2; + idx++; + nfrags++; + } + + if (pktnwc != pktnw) { + ret = -EINVAL; + goto free_pbc; + } + + if (page) { + dma_addr = dma_map_page(&dd->pcidev->dev, + page, 0, len, DMA_TO_DEVICE); + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { + ret = -ENOMEM; + goto free_pbc; + } + + dma_mapped = 1; + } + + qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped, + page, pbc, dma_addr); + + if (nfrags) { + ret = qib_user_sdma_init_payload(dd, pq, pkt, + iov + idx_save + 1, + nfrags, npages); + if (ret < 0) + goto free_pbc_dma; + } + + counter++; + npkts++; + + list_add_tail(&pkt->list, list); + } + + ret = idx; + goto done; + +free_pbc_dma: + if (dma_mapped) + dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE); +free_pbc: + if (page) { + kunmap(page); + __free_page(page); + } else + dma_pool_free(pq->header_cache, pbc, dma_addr); +free_pkt: + kmem_cache_free(pq->pkt_slab, pkt); +free_list: + qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); +done: + return ret; +} + +static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, + u32 c) +{ + pq->sent_counter = c; +} + +/* try to clean out queue -- needs pq->lock */ +static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq) +{ + struct qib_devdata *dd = ppd->dd; + struct list_head free_list; + struct qib_user_sdma_pkt *pkt; + struct qib_user_sdma_pkt *pkt_prev; + int ret = 0; + + INIT_LIST_HEAD(&free_list); + + list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { + s64 descd = ppd->sdma_descq_removed - pkt->added; + + if (descd < 0) + break; + + list_move_tail(&pkt->list, &free_list); + + /* one more packet cleaned */ + ret++; + } + + if (!list_empty(&free_list)) { + u32 counter; + + pkt = list_entry(free_list.prev, + struct qib_user_sdma_pkt, list); + counter = pkt->counter; + + qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); + qib_user_sdma_set_complete_counter(pq, counter); + } + + return ret; +} + +void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) +{ + if (!pq) + return; + + kmem_cache_destroy(pq->pkt_slab); + dma_pool_destroy(pq->header_cache); + kfree(pq); +} + +/* clean descriptor queue, returns > 0 if some elements cleaned */ +static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + ret = qib_sdma_make_progress(ppd); + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + + return ret; +} + +/* we're in close, drain packets so that we can cleanup successfully... */ +void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq) +{ + struct qib_devdata *dd = ppd->dd; + int i; + + if (!pq) + return; + + for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) { + mutex_lock(&pq->lock); + if (list_empty(&pq->sent)) { + mutex_unlock(&pq->lock); + break; + } + qib_user_sdma_hwqueue_clean(ppd); + qib_user_sdma_queue_clean(ppd, pq); + mutex_unlock(&pq->lock); + msleep(10); + } + + if (!list_empty(&pq->sent)) { + struct list_head free_list; + + qib_dev_err(dd, "user sdma lists not empty: forcing!\n"); + INIT_LIST_HEAD(&free_list); + mutex_lock(&pq->lock); + list_splice_init(&pq->sent, &free_list); + qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); + mutex_unlock(&pq->lock); + } +} + +static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd, + u64 addr, u64 dwlen, u64 dwoffset) +{ + u8 tmpgen; + + tmpgen = ppd->sdma_generation; + + return cpu_to_le64(/* SDmaPhyAddr[31:0] */ + ((addr & 0xfffffffcULL) << 32) | + /* SDmaGeneration[1:0] */ + ((tmpgen & 3ULL) << 30) | + /* SDmaDwordCount[10:0] */ + ((dwlen & 0x7ffULL) << 16) | + /* SDmaBufOffset[12:2] */ + (dwoffset & 0x7ffULL)); +} + +static inline __le64 qib_sdma_make_first_desc0(__le64 descq) +{ + return descq | cpu_to_le64(1ULL << 12); +} + +static inline __le64 qib_sdma_make_last_desc0(__le64 descq) +{ + /* last */ /* dma head */ + return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); +} + +static inline __le64 qib_sdma_make_desc1(u64 addr) +{ + /* SDmaPhyAddr[47:32] */ + return cpu_to_le64(addr >> 32); +} + +static void qib_user_sdma_send_frag(struct qib_pportdata *ppd, + struct qib_user_sdma_pkt *pkt, int idx, + unsigned ofs, u16 tail) +{ + const u64 addr = (u64) pkt->addr[idx].addr + + (u64) pkt->addr[idx].offset; + const u64 dwlen = (u64) pkt->addr[idx].length / 4; + __le64 *descqp; + __le64 descq0; + + descqp = &ppd->sdma_descq[tail].qw[0]; + + descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs); + if (idx == 0) + descq0 = qib_sdma_make_first_desc0(descq0); + if (idx == pkt->naddr - 1) + descq0 = qib_sdma_make_last_desc0(descq0); + + descqp[0] = descq0; + descqp[1] = qib_sdma_make_desc1(addr); +} + +/* pq->lock must be held, get packets on the wire... */ +static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq, + struct list_head *pktlist) +{ + struct qib_devdata *dd = ppd->dd; + int ret = 0; + unsigned long flags; + u16 tail; + u8 generation; + u64 descq_added; + + if (list_empty(pktlist)) + return 0; + + if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) + return -ECOMM; + + spin_lock_irqsave(&ppd->sdma_lock, flags); + + /* keep a copy for restoring purposes in case of problems */ + generation = ppd->sdma_generation; + descq_added = ppd->sdma_descq_added; + + if (unlikely(!__qib_sdma_running(ppd))) { + ret = -ECOMM; + goto unlock; + } + + tail = ppd->sdma_descq_tail; + while (!list_empty(pktlist)) { + struct qib_user_sdma_pkt *pkt = + list_entry(pktlist->next, struct qib_user_sdma_pkt, + list); + int i; + unsigned ofs = 0; + u16 dtail = tail; + + if (pkt->naddr > qib_sdma_descq_freecnt(ppd)) + goto unlock_check_tail; + + for (i = 0; i < pkt->naddr; i++) { + qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail); + ofs += pkt->addr[i].length >> 2; + + if (++tail == ppd->sdma_descq_cnt) { + tail = 0; + ++ppd->sdma_generation; + } + } + + if ((ofs << 2) > ppd->ibmaxlen) { + ret = -EMSGSIZE; + goto unlock; + } + + /* + * If the packet is >= 2KB mtu equivalent, we have to use + * the large buffers, and have to mark each descriptor as + * part of a large buffer packet. + */ + if (ofs > dd->piosize2kmax_dwords) { + for (i = 0; i < pkt->naddr; i++) { + ppd->sdma_descq[dtail].qw[0] |= + cpu_to_le64(1ULL << 14); + if (++dtail == ppd->sdma_descq_cnt) + dtail = 0; + } + } + + ppd->sdma_descq_added += pkt->naddr; + pkt->added = ppd->sdma_descq_added; + list_move_tail(&pkt->list, &pq->sent); + ret++; + } + +unlock_check_tail: + /* advance the tail on the chip if necessary */ + if (ppd->sdma_descq_tail != tail) + dd->f_sdma_update_tail(ppd, tail); + +unlock: + if (unlikely(ret < 0)) { + ppd->sdma_generation = generation; + ppd->sdma_descq_added = descq_added; + } + spin_unlock_irqrestore(&ppd->sdma_lock, flags); + + return ret; +} + +int qib_user_sdma_writev(struct qib_ctxtdata *rcd, + struct qib_user_sdma_queue *pq, + const struct iovec *iov, + unsigned long dim) +{ + struct qib_devdata *dd = rcd->dd; + struct qib_pportdata *ppd = rcd->ppd; + int ret = 0; + struct list_head list; + int npkts = 0; + + INIT_LIST_HEAD(&list); + + mutex_lock(&pq->lock); + + /* why not -ECOMM like qib_user_sdma_push_pkts() below? */ + if (!qib_sdma_running(ppd)) + goto done_unlock; + + if (ppd->sdma_descq_added != ppd->sdma_descq_removed) { + qib_user_sdma_hwqueue_clean(ppd); + qib_user_sdma_queue_clean(ppd, pq); + } + + while (dim) { + const int mxp = 8; + + down_write(¤t->mm->mmap_sem); + ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); + up_write(¤t->mm->mmap_sem); + + if (ret <= 0) + goto done_unlock; + else { + dim -= ret; + iov += ret; + } + + /* force packets onto the sdma hw queue... */ + if (!list_empty(&list)) { + /* + * Lazily clean hw queue. the 4 is a guess of about + * how many sdma descriptors a packet will take (it + * doesn't have to be perfect). + */ + if (qib_sdma_descq_freecnt(ppd) < ret * 4) { + qib_user_sdma_hwqueue_clean(ppd); + qib_user_sdma_queue_clean(ppd, pq); + } + + ret = qib_user_sdma_push_pkts(ppd, pq, &list); + if (ret < 0) + goto done_unlock; + else { + npkts += ret; + pq->counter += ret; + + if (!list_empty(&list)) + goto done_unlock; + } + } + } + +done_unlock: + if (!list_empty(&list)) + qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); + mutex_unlock(&pq->lock); + + return (ret < 0) ? ret : npkts; +} + +int qib_user_sdma_make_progress(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq) +{ + int ret = 0; + + mutex_lock(&pq->lock); + qib_user_sdma_hwqueue_clean(ppd); + ret = qib_user_sdma_queue_clean(ppd, pq); + mutex_unlock(&pq->lock); + + return ret; +} + +u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) +{ + return pq ? pq->sent_counter : 0; +} + +u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) +{ + return pq ? pq->counter : 0; +} diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h new file mode 100644 index 000000000000..ce8cbaf6a5c2 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_sdma.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/device.h> + +struct qib_user_sdma_queue; + +struct qib_user_sdma_queue * +qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport); +void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq); + +int qib_user_sdma_writev(struct qib_ctxtdata *pd, + struct qib_user_sdma_queue *pq, + const struct iovec *iov, + unsigned long dim); + +int qib_user_sdma_make_progress(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq); + +void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, + struct qib_user_sdma_queue *pq); + +u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq); +u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c new file mode 100644 index 000000000000..cda8f4173d23 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -0,0 +1,2248 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <rdma/ib_mad.h> +#include <rdma/ib_user_verbs.h> +#include <linux/io.h> +#include <linux/utsname.h> +#include <linux/rculist.h> +#include <linux/mm.h> + +#include "qib.h" +#include "qib_common.h" + +static unsigned int ib_qib_qp_table_size = 251; +module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); +MODULE_PARM_DESC(qp_table_size, "QP table size"); + +unsigned int ib_qib_lkey_table_size = 16; +module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, + S_IRUGO); +MODULE_PARM_DESC(lkey_table_size, + "LKEY table size in bits (2^n, 1 <= n <= 23)"); + +static unsigned int ib_qib_max_pds = 0xFFFF; +module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); +MODULE_PARM_DESC(max_pds, + "Maximum number of protection domains to support"); + +static unsigned int ib_qib_max_ahs = 0xFFFF; +module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); +MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); + +unsigned int ib_qib_max_cqes = 0x2FFFF; +module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); +MODULE_PARM_DESC(max_cqes, + "Maximum number of completion queue entries to support"); + +unsigned int ib_qib_max_cqs = 0x1FFFF; +module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); +MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); + +unsigned int ib_qib_max_qp_wrs = 0x3FFF; +module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); +MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); + +unsigned int ib_qib_max_qps = 16384; +module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); +MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); + +unsigned int ib_qib_max_sges = 0x60; +module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); +MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); + +unsigned int ib_qib_max_mcast_grps = 16384; +module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); +MODULE_PARM_DESC(max_mcast_grps, + "Maximum number of multicast groups to support"); + +unsigned int ib_qib_max_mcast_qp_attached = 16; +module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, + uint, S_IRUGO); +MODULE_PARM_DESC(max_mcast_qp_attached, + "Maximum number of attached QPs to support"); + +unsigned int ib_qib_max_srqs = 1024; +module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); +MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); + +unsigned int ib_qib_max_srq_sges = 128; +module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); +MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); + +unsigned int ib_qib_max_srq_wrs = 0x1FFFF; +module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); +MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); + +static unsigned int ib_qib_disable_sma; +module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(disable_sma, "Disable the SMA"); + +/* + * Note that it is OK to post send work requests in the SQE and ERR + * states; qib_do_send() will process them and generate error + * completions as per IB 1.2 C10-96. + */ +const int ib_qib_state_ops[IB_QPS_ERR + 1] = { + [IB_QPS_RESET] = 0, + [IB_QPS_INIT] = QIB_POST_RECV_OK, + [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, + [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | + QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | + QIB_PROCESS_NEXT_SEND_OK, + [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | + QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, + [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | + QIB_POST_SEND_OK | QIB_FLUSH_SEND, + [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | + QIB_POST_SEND_OK | QIB_FLUSH_SEND, +}; + +struct qib_ucontext { + struct ib_ucontext ibucontext; +}; + +static inline struct qib_ucontext *to_iucontext(struct ib_ucontext + *ibucontext) +{ + return container_of(ibucontext, struct qib_ucontext, ibucontext); +} + +/* + * Translate ib_wr_opcode into ib_wc_opcode. + */ +const enum ib_wc_opcode ib_qib_wc_opcode[] = { + [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, + [IB_WR_SEND] = IB_WC_SEND, + [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, + [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, + [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD +}; + +/* + * System image GUID. + */ +__be64 ib_qib_sys_image_guid; + +/** + * qib_copy_sge - copy data to SGE memory + * @ss: the SGE state + * @data: the data to copy + * @length: the length of the data + */ +void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) +{ + struct qib_sge *sge = &ss->sge; + + while (length) { + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + memcpy(sge->vaddr, data, len); + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (release) + atomic_dec(&sge->mr->refcount); + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + data += len; + length -= len; + } +} + +/** + * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func + * @ss: the SGE state + * @length: the number of bytes to skip + */ +void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) +{ + struct qib_sge *sge = &ss->sge; + + while (length) { + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (release) + atomic_dec(&sge->mr->refcount); + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + length -= len; + } +} + +/* + * Count the number of DMA descriptors needed to send length bytes of data. + * Don't modify the qib_sge_state to get the count. + * Return zero if any of the segments is not aligned. + */ +static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) +{ + struct qib_sge *sg_list = ss->sg_list; + struct qib_sge sge = ss->sge; + u8 num_sge = ss->num_sge; + u32 ndesc = 1; /* count the header */ + + while (length) { + u32 len = sge.length; + + if (len > length) + len = length; + if (len > sge.sge_length) + len = sge.sge_length; + BUG_ON(len == 0); + if (((long) sge.vaddr & (sizeof(u32) - 1)) || + (len != length && (len & (sizeof(u32) - 1)))) { + ndesc = 0; + break; + } + ndesc++; + sge.vaddr += len; + sge.length -= len; + sge.sge_length -= len; + if (sge.sge_length == 0) { + if (--num_sge) + sge = *sg_list++; + } else if (sge.length == 0 && sge.mr->lkey) { + if (++sge.n >= QIB_SEGSZ) { + if (++sge.m >= sge.mr->mapsz) + break; + sge.n = 0; + } + sge.vaddr = + sge.mr->map[sge.m]->segs[sge.n].vaddr; + sge.length = + sge.mr->map[sge.m]->segs[sge.n].length; + } + length -= len; + } + return ndesc; +} + +/* + * Copy from the SGEs to the data buffer. + */ +static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) +{ + struct qib_sge *sge = &ss->sge; + + while (length) { + u32 len = sge->length; + + if (len > length) + len = length; + if (len > sge->sge_length) + len = sge->sge_length; + BUG_ON(len == 0); + memcpy(data, sge->vaddr, len); + sge->vaddr += len; + sge->length -= len; + sge->sge_length -= len; + if (sge->sge_length == 0) { + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + break; + sge->n = 0; + } + sge->vaddr = + sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = + sge->mr->map[sge->m]->segs[sge->n].length; + } + data += len; + length -= len; + } +} + +/** + * qib_post_one_send - post one RC, UC, or UD send work request + * @qp: the QP to post on + * @wr: the work request to send + */ +static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) +{ + struct qib_swqe *wqe; + u32 next; + int i; + int j; + int acc; + int ret; + unsigned long flags; + struct qib_lkey_table *rkt; + struct qib_pd *pd; + + spin_lock_irqsave(&qp->s_lock, flags); + + /* Check that state is OK to post send. */ + if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) + goto bail_inval; + + /* IB spec says that num_sge == 0 is OK. */ + if (wr->num_sge > qp->s_max_sge) + goto bail_inval; + + /* + * Don't allow RDMA reads or atomic operations on UC or + * undefined operations. + * Make sure buffer is large enough to hold the result for atomics. + */ + if (wr->opcode == IB_WR_FAST_REG_MR) { + if (qib_fast_reg_mr(qp, wr)) + goto bail_inval; + } else if (qp->ibqp.qp_type == IB_QPT_UC) { + if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) + goto bail_inval; + } else if (qp->ibqp.qp_type != IB_QPT_RC) { + /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ + if (wr->opcode != IB_WR_SEND && + wr->opcode != IB_WR_SEND_WITH_IMM) + goto bail_inval; + /* Check UD destination address PD */ + if (qp->ibqp.pd != wr->wr.ud.ah->pd) + goto bail_inval; + } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) + goto bail_inval; + else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && + (wr->num_sge == 0 || + wr->sg_list[0].length < sizeof(u64) || + wr->sg_list[0].addr & (sizeof(u64) - 1))) + goto bail_inval; + else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) + goto bail_inval; + + next = qp->s_head + 1; + if (next >= qp->s_size) + next = 0; + if (next == qp->s_last) { + ret = -ENOMEM; + goto bail; + } + + rkt = &to_idev(qp->ibqp.device)->lk_table; + pd = to_ipd(qp->ibqp.pd); + wqe = get_swqe_ptr(qp, qp->s_head); + wqe->wr = *wr; + wqe->length = 0; + j = 0; + if (wr->num_sge) { + acc = wr->opcode >= IB_WR_RDMA_READ ? + IB_ACCESS_LOCAL_WRITE : 0; + for (i = 0; i < wr->num_sge; i++) { + u32 length = wr->sg_list[i].length; + int ok; + + if (length == 0) + continue; + ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], + &wr->sg_list[i], acc); + if (!ok) + goto bail_inval_free; + wqe->length += length; + j++; + } + wqe->wr.num_sge = j; + } + if (qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_RC) { + if (wqe->length > 0x80000000U) + goto bail_inval_free; + } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + + qp->port_num - 1)->ibmtu) + goto bail_inval_free; + else + atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); + wqe->ssn = qp->s_ssn++; + qp->s_head = next; + + ret = 0; + goto bail; + +bail_inval_free: + while (j) { + struct qib_sge *sge = &wqe->sg_list[--j]; + + atomic_dec(&sge->mr->refcount); + } +bail_inval: + ret = -EINVAL; +bail: + spin_unlock_irqrestore(&qp->s_lock, flags); + return ret; +} + +/** + * qib_post_send - post a send on a QP + * @ibqp: the QP to post the send on + * @wr: the list of work requests to post + * @bad_wr: the first bad WR is put here + * + * This may be called from interrupt context. + */ +static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct qib_qp *qp = to_iqp(ibqp); + int err = 0; + + for (; wr; wr = wr->next) { + err = qib_post_one_send(qp, wr); + if (err) { + *bad_wr = wr; + goto bail; + } + } + + /* Try to do the send work in the caller's context. */ + qib_do_send(&qp->s_work); + +bail: + return err; +} + +/** + * qib_post_receive - post a receive on a QP + * @ibqp: the QP to post the receive on + * @wr: the WR to post + * @bad_wr: the first bad WR is put here + * + * This may be called from interrupt context. + */ +static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct qib_qp *qp = to_iqp(ibqp); + struct qib_rwq *wq = qp->r_rq.wq; + unsigned long flags; + int ret; + + /* Check that state is OK to post receive. */ + if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { + *bad_wr = wr; + ret = -EINVAL; + goto bail; + } + + for (; wr; wr = wr->next) { + struct qib_rwqe *wqe; + u32 next; + int i; + + if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { + *bad_wr = wr; + ret = -EINVAL; + goto bail; + } + + spin_lock_irqsave(&qp->r_rq.lock, flags); + next = wq->head + 1; + if (next >= qp->r_rq.size) + next = 0; + if (next == wq->tail) { + spin_unlock_irqrestore(&qp->r_rq.lock, flags); + *bad_wr = wr; + ret = -ENOMEM; + goto bail; + } + + wqe = get_rwqe_ptr(&qp->r_rq, wq->head); + wqe->wr_id = wr->wr_id; + wqe->num_sge = wr->num_sge; + for (i = 0; i < wr->num_sge; i++) + wqe->sg_list[i] = wr->sg_list[i]; + /* Make sure queue entry is written before the head index. */ + smp_wmb(); + wq->head = next; + spin_unlock_irqrestore(&qp->r_rq.lock, flags); + } + ret = 0; + +bail: + return ret; +} + +/** + * qib_qp_rcv - processing an incoming packet on a QP + * @rcd: the context pointer + * @hdr: the packet header + * @has_grh: true if the packet has a GRH + * @data: the packet data + * @tlen: the packet length + * @qp: the QP the packet came on + * + * This is called from qib_ib_rcv() to process an incoming packet + * for the given QP. + * Called at interrupt level. + */ +static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp) +{ + struct qib_ibport *ibp = &rcd->ppd->ibport_data; + + /* Check for valid receive state. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { + ibp->n_pkt_drops++; + return; + } + + switch (qp->ibqp.qp_type) { + case IB_QPT_SMI: + case IB_QPT_GSI: + if (ib_qib_disable_sma) + break; + /* FALLTHROUGH */ + case IB_QPT_UD: + qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); + break; + + case IB_QPT_RC: + qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); + break; + + case IB_QPT_UC: + qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); + break; + + default: + break; + } +} + +/** + * qib_ib_rcv - process an incoming packet + * @rcd: the context pointer + * @rhdr: the header of the packet + * @data: the packet payload + * @tlen: the packet length + * + * This is called from qib_kreceive() to process an incoming packet at + * interrupt level. Tlen is the length of the header + data + CRC in bytes. + */ +void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) +{ + struct qib_pportdata *ppd = rcd->ppd; + struct qib_ibport *ibp = &ppd->ibport_data; + struct qib_ib_header *hdr = rhdr; + struct qib_other_headers *ohdr; + struct qib_qp *qp; + u32 qp_num; + int lnh; + u8 opcode; + u16 lid; + + /* 24 == LRH+BTH+CRC */ + if (unlikely(tlen < 24)) + goto drop; + + /* Check for a valid destination LID (see ch. 7.11.1). */ + lid = be16_to_cpu(hdr->lrh[1]); + if (lid < QIB_MULTICAST_LID_BASE) { + lid &= ~((1 << ppd->lmc) - 1); + if (unlikely(lid != ppd->lid)) + goto drop; + } + + /* Check for GRH */ + lnh = be16_to_cpu(hdr->lrh[0]) & 3; + if (lnh == QIB_LRH_BTH) + ohdr = &hdr->u.oth; + else if (lnh == QIB_LRH_GRH) { + u32 vtf; + + ohdr = &hdr->u.l.oth; + if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) + goto drop; + vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); + if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) + goto drop; + } else + goto drop; + + opcode = be32_to_cpu(ohdr->bth[0]) >> 24; + ibp->opstats[opcode & 0x7f].n_bytes += tlen; + ibp->opstats[opcode & 0x7f].n_packets++; + + /* Get the destination QP number. */ + qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; + if (qp_num == QIB_MULTICAST_QPN) { + struct qib_mcast *mcast; + struct qib_mcast_qp *p; + + if (lnh != QIB_LRH_GRH) + goto drop; + mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); + if (mcast == NULL) + goto drop; + ibp->n_multicast_rcv++; + list_for_each_entry_rcu(p, &mcast->qp_list, list) + qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); + /* + * Notify qib_multicast_detach() if it is waiting for us + * to finish. + */ + if (atomic_dec_return(&mcast->refcount) <= 1) + wake_up(&mcast->wait); + } else { + qp = qib_lookup_qpn(ibp, qp_num); + if (!qp) + goto drop; + ibp->n_unicast_rcv++; + qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); + /* + * Notify qib_destroy_qp() if it is waiting + * for us to finish. + */ + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } + return; + +drop: + ibp->n_pkt_drops++; +} + +/* + * This is called from a timer to check for QPs + * which need kernel memory in order to send a packet. + */ +static void mem_timer(unsigned long data) +{ + struct qib_ibdev *dev = (struct qib_ibdev *) data; + struct list_head *list = &dev->memwait; + struct qib_qp *qp = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev->pending_lock, flags); + if (!list_empty(list)) { + qp = list_entry(list->next, struct qib_qp, iowait); + list_del_init(&qp->iowait); + atomic_inc(&qp->refcount); + if (!list_empty(list)) + mod_timer(&dev->mem_timer, jiffies + 1); + } + spin_unlock_irqrestore(&dev->pending_lock, flags); + + if (qp) { + spin_lock_irqsave(&qp->s_lock, flags); + if (qp->s_flags & QIB_S_WAIT_KMEM) { + qp->s_flags &= ~QIB_S_WAIT_KMEM; + qib_schedule_send(qp); + } + spin_unlock_irqrestore(&qp->s_lock, flags); + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } +} + +static void update_sge(struct qib_sge_state *ss, u32 length) +{ + struct qib_sge *sge = &ss->sge; + + sge->vaddr += length; + sge->length -= length; + sge->sge_length -= length; + if (sge->sge_length == 0) { + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr->lkey) { + if (++sge->n >= QIB_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + return; + sge->n = 0; + } + sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = sge->mr->map[sge->m]->segs[sge->n].length; + } +} + +#ifdef __LITTLE_ENDIAN +static inline u32 get_upper_bits(u32 data, u32 shift) +{ + return data >> shift; +} + +static inline u32 set_upper_bits(u32 data, u32 shift) +{ + return data << shift; +} + +static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) +{ + data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); + data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); + return data; +} +#else +static inline u32 get_upper_bits(u32 data, u32 shift) +{ + return data << shift; +} + +static inline u32 set_upper_bits(u32 data, u32 shift) +{ + return data >> shift; +} + +static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) +{ + data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); + data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); + return data; +} +#endif + +static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, + u32 length, unsigned flush_wc) +{ + u32 extra = 0; + u32 data = 0; + u32 last; + + while (1) { + u32 len = ss->sge.length; + u32 off; + + if (len > length) + len = length; + if (len > ss->sge.sge_length) + len = ss->sge.sge_length; + BUG_ON(len == 0); + /* If the source address is not aligned, try to align it. */ + off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); + if (off) { + u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & + ~(sizeof(u32) - 1)); + u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); + u32 y; + + y = sizeof(u32) - off; + if (len > y) + len = y; + if (len + extra >= sizeof(u32)) { + data |= set_upper_bits(v, extra * + BITS_PER_BYTE); + len = sizeof(u32) - extra; + if (len == length) { + last = data; + break; + } + __raw_writel(data, piobuf); + piobuf++; + extra = 0; + data = 0; + } else { + /* Clear unused upper bytes */ + data |= clear_upper_bytes(v, len, extra); + if (len == length) { + last = data; + break; + } + extra += len; + } + } else if (extra) { + /* Source address is aligned. */ + u32 *addr = (u32 *) ss->sge.vaddr; + int shift = extra * BITS_PER_BYTE; + int ushift = 32 - shift; + u32 l = len; + + while (l >= sizeof(u32)) { + u32 v = *addr; + + data |= set_upper_bits(v, shift); + __raw_writel(data, piobuf); + data = get_upper_bits(v, ushift); + piobuf++; + addr++; + l -= sizeof(u32); + } + /* + * We still have 'extra' number of bytes leftover. + */ + if (l) { + u32 v = *addr; + + if (l + extra >= sizeof(u32)) { + data |= set_upper_bits(v, shift); + len -= l + extra - sizeof(u32); + if (len == length) { + last = data; + break; + } + __raw_writel(data, piobuf); + piobuf++; + extra = 0; + data = 0; + } else { + /* Clear unused upper bytes */ + data |= clear_upper_bytes(v, l, extra); + if (len == length) { + last = data; + break; + } + extra += l; + } + } else if (len == length) { + last = data; + break; + } + } else if (len == length) { + u32 w; + + /* + * Need to round up for the last dword in the + * packet. + */ + w = (len + 3) >> 2; + qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); + piobuf += w - 1; + last = ((u32 *) ss->sge.vaddr)[w - 1]; + break; + } else { + u32 w = len >> 2; + + qib_pio_copy(piobuf, ss->sge.vaddr, w); + piobuf += w; + + extra = len & (sizeof(u32) - 1); + if (extra) { + u32 v = ((u32 *) ss->sge.vaddr)[w]; + + /* Clear unused upper bytes */ + data = clear_upper_bytes(v, extra, 0); + } + } + update_sge(ss, len); + length -= len; + } + /* Update address before sending packet. */ + update_sge(ss, length); + if (flush_wc) { + /* must flush early everything before trigger word */ + qib_flush_wc(); + __raw_writel(last, piobuf); + /* be sure trigger word is written */ + qib_flush_wc(); + } else + __raw_writel(last, piobuf); +} + +static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, + struct qib_qp *qp, int *retp) +{ + struct qib_verbs_txreq *tx; + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); + spin_lock(&dev->pending_lock); + + if (!list_empty(&dev->txreq_free)) { + struct list_head *l = dev->txreq_free.next; + + list_del(l); + tx = list_entry(l, struct qib_verbs_txreq, txreq.list); + *retp = 0; + } else { + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && + list_empty(&qp->iowait)) { + dev->n_txwait++; + qp->s_flags |= QIB_S_WAIT_TX; + list_add_tail(&qp->iowait, &dev->txwait); + } + tx = NULL; + qp->s_flags &= ~QIB_S_BUSY; + *retp = -EBUSY; + } + + spin_unlock(&dev->pending_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); + + return tx; +} + +void qib_put_txreq(struct qib_verbs_txreq *tx) +{ + struct qib_ibdev *dev; + struct qib_qp *qp; + unsigned long flags; + + qp = tx->qp; + dev = to_idev(qp->ibqp.device); + + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + if (tx->mr) { + atomic_dec(&tx->mr->refcount); + tx->mr = NULL; + } + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { + tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; + dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, + tx->txreq.addr, tx->hdr_dwords << 2, + DMA_TO_DEVICE); + kfree(tx->align_buf); + } + + spin_lock_irqsave(&dev->pending_lock, flags); + + /* Put struct back on free list */ + list_add(&tx->txreq.list, &dev->txreq_free); + + if (!list_empty(&dev->txwait)) { + /* Wake up first QP wanting a free struct */ + qp = list_entry(dev->txwait.next, struct qib_qp, iowait); + list_del_init(&qp->iowait); + atomic_inc(&qp->refcount); + spin_unlock_irqrestore(&dev->pending_lock, flags); + + spin_lock_irqsave(&qp->s_lock, flags); + if (qp->s_flags & QIB_S_WAIT_TX) { + qp->s_flags &= ~QIB_S_WAIT_TX; + qib_schedule_send(qp); + } + spin_unlock_irqrestore(&qp->s_lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } else + spin_unlock_irqrestore(&dev->pending_lock, flags); +} + +/* + * This is called when there are send DMA descriptors that might be + * available. + * + * This is called with ppd->sdma_lock held. + */ +void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) +{ + struct qib_qp *qp, *nqp; + struct qib_qp *qps[20]; + struct qib_ibdev *dev; + unsigned i, n; + + n = 0; + dev = &ppd->dd->verbs_dev; + spin_lock(&dev->pending_lock); + + /* Search wait list for first QP wanting DMA descriptors. */ + list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { + if (qp->port_num != ppd->port) + continue; + if (n == ARRAY_SIZE(qps)) + break; + if (qp->s_tx->txreq.sg_count > avail) + break; + avail -= qp->s_tx->txreq.sg_count; + list_del_init(&qp->iowait); + atomic_inc(&qp->refcount); + qps[n++] = qp; + } + + spin_unlock(&dev->pending_lock); + + for (i = 0; i < n; i++) { + qp = qps[i]; + spin_lock(&qp->s_lock); + if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { + qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; + qib_schedule_send(qp); + } + spin_unlock(&qp->s_lock); + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } +} + +/* + * This is called with ppd->sdma_lock held. + */ +static void sdma_complete(struct qib_sdma_txreq *cookie, int status) +{ + struct qib_verbs_txreq *tx = + container_of(cookie, struct qib_verbs_txreq, txreq); + struct qib_qp *qp = tx->qp; + + spin_lock(&qp->s_lock); + if (tx->wqe) + qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); + else if (qp->ibqp.qp_type == IB_QPT_RC) { + struct qib_ib_header *hdr; + + if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) + hdr = &tx->align_buf->hdr; + else { + struct qib_ibdev *dev = to_idev(qp->ibqp.device); + + hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; + } + qib_rc_send_complete(qp, hdr); + } + if (atomic_dec_and_test(&qp->s_dma_busy)) { + if (qp->state == IB_QPS_RESET) + wake_up(&qp->wait_dma); + else if (qp->s_flags & QIB_S_WAIT_DMA) { + qp->s_flags &= ~QIB_S_WAIT_DMA; + qib_schedule_send(qp); + } + } + spin_unlock(&qp->s_lock); + + qib_put_txreq(tx); +} + +static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&qp->s_lock, flags); + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { + spin_lock(&dev->pending_lock); + if (list_empty(&qp->iowait)) { + if (list_empty(&dev->memwait)) + mod_timer(&dev->mem_timer, jiffies + 1); + qp->s_flags |= QIB_S_WAIT_KMEM; + list_add_tail(&qp->iowait, &dev->memwait); + } + spin_unlock(&dev->pending_lock); + qp->s_flags &= ~QIB_S_BUSY; + ret = -EBUSY; + } + spin_unlock_irqrestore(&qp->s_lock, flags); + + return ret; +} + +static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct qib_sge_state *ss, u32 len, + u32 plen, u32 dwords) +{ + struct qib_ibdev *dev = to_idev(qp->ibqp.device); + struct qib_devdata *dd = dd_from_dev(dev); + struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_verbs_txreq *tx; + struct qib_pio_header *phdr; + u32 control; + u32 ndesc; + int ret; + + tx = qp->s_tx; + if (tx) { + qp->s_tx = NULL; + /* resend previously constructed packet */ + ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); + goto bail; + } + + tx = get_txreq(dev, qp, &ret); + if (!tx) + goto bail; + + control = dd->f_setpbc_control(ppd, plen, qp->s_srate, + be16_to_cpu(hdr->lrh[0]) >> 12); + tx->qp = qp; + atomic_inc(&qp->refcount); + tx->wqe = qp->s_wqe; + tx->mr = qp->s_rdma_mr; + if (qp->s_rdma_mr) + qp->s_rdma_mr = NULL; + tx->txreq.callback = sdma_complete; + if (dd->flags & QIB_HAS_SDMA_TIMEOUT) + tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; + else + tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; + if (plen + 1 > dd->piosize2kmax_dwords) + tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; + + if (len) { + /* + * Don't try to DMA if it takes more descriptors than + * the queue holds. + */ + ndesc = qib_count_sge(ss, len); + if (ndesc >= ppd->sdma_descq_cnt) + ndesc = 0; + } else + ndesc = 1; + if (ndesc) { + phdr = &dev->pio_hdrs[tx->hdr_inx]; + phdr->pbc[0] = cpu_to_le32(plen); + phdr->pbc[1] = cpu_to_le32(control); + memcpy(&phdr->hdr, hdr, hdrwords << 2); + tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; + tx->txreq.sg_count = ndesc; + tx->txreq.addr = dev->pio_hdrs_phys + + tx->hdr_inx * sizeof(struct qib_pio_header); + tx->hdr_dwords = hdrwords + 2; /* add PBC length */ + ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); + goto bail; + } + + /* Allocate a buffer and copy the header and payload to it. */ + tx->hdr_dwords = plen + 1; + phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); + if (!phdr) + goto err_tx; + phdr->pbc[0] = cpu_to_le32(plen); + phdr->pbc[1] = cpu_to_le32(control); + memcpy(&phdr->hdr, hdr, hdrwords << 2); + qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); + + tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, + tx->hdr_dwords << 2, DMA_TO_DEVICE); + if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) + goto map_err; + tx->align_buf = phdr; + tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; + tx->txreq.sg_count = 1; + ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); + goto unaligned; + +map_err: + kfree(phdr); +err_tx: + qib_put_txreq(tx); + ret = wait_kmem(dev, qp); +unaligned: + ibp->n_unaligned++; +bail: + return ret; +} + +/* + * If we are now in the error state, return zero to flush the + * send work request. + */ +static int no_bufs_available(struct qib_qp *qp) +{ + struct qib_ibdev *dev = to_idev(qp->ibqp.device); + struct qib_devdata *dd; + unsigned long flags; + int ret = 0; + + /* + * Note that as soon as want_buffer() is called and + * possibly before it returns, qib_ib_piobufavail() + * could be called. Therefore, put QP on the I/O wait list before + * enabling the PIO avail interrupt. + */ + spin_lock_irqsave(&qp->s_lock, flags); + if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { + spin_lock(&dev->pending_lock); + if (list_empty(&qp->iowait)) { + dev->n_piowait++; + qp->s_flags |= QIB_S_WAIT_PIO; + list_add_tail(&qp->iowait, &dev->piowait); + dd = dd_from_dev(dev); + dd->f_wantpiobuf_intr(dd, 1); + } + spin_unlock(&dev->pending_lock); + qp->s_flags &= ~QIB_S_BUSY; + ret = -EBUSY; + } + spin_unlock_irqrestore(&qp->s_lock, flags); + return ret; +} + +static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, + u32 hdrwords, struct qib_sge_state *ss, u32 len, + u32 plen, u32 dwords) +{ + struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; + u32 *hdr = (u32 *) ibhdr; + u32 __iomem *piobuf_orig; + u32 __iomem *piobuf; + u64 pbc; + unsigned long flags; + unsigned flush_wc; + u32 control; + u32 pbufn; + + control = dd->f_setpbc_control(ppd, plen, qp->s_srate, + be16_to_cpu(ibhdr->lrh[0]) >> 12); + pbc = ((u64) control << 32) | plen; + piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); + if (unlikely(piobuf == NULL)) + return no_bufs_available(qp); + + /* + * Write the pbc. + * We have to flush after the PBC for correctness on some cpus + * or WC buffer can be written out of order. + */ + writeq(pbc, piobuf); + piobuf_orig = piobuf; + piobuf += 2; + + flush_wc = dd->flags & QIB_PIO_FLUSH_WC; + if (len == 0) { + /* + * If there is just the header portion, must flush before + * writing last word of header for correctness, and after + * the last header word (trigger word). + */ + if (flush_wc) { + qib_flush_wc(); + qib_pio_copy(piobuf, hdr, hdrwords - 1); + qib_flush_wc(); + __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); + qib_flush_wc(); + } else + qib_pio_copy(piobuf, hdr, hdrwords); + goto done; + } + + if (flush_wc) + qib_flush_wc(); + qib_pio_copy(piobuf, hdr, hdrwords); + piobuf += hdrwords; + + /* The common case is aligned and contained in one segment. */ + if (likely(ss->num_sge == 1 && len <= ss->sge.length && + !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { + u32 *addr = (u32 *) ss->sge.vaddr; + + /* Update address before sending packet. */ + update_sge(ss, len); + if (flush_wc) { + qib_pio_copy(piobuf, addr, dwords - 1); + /* must flush early everything before trigger word */ + qib_flush_wc(); + __raw_writel(addr[dwords - 1], piobuf + dwords - 1); + /* be sure trigger word is written */ + qib_flush_wc(); + } else + qib_pio_copy(piobuf, addr, dwords); + goto done; + } + copy_io(piobuf, ss, len, flush_wc); +done: + if (dd->flags & QIB_USE_SPCL_TRIG) { + u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; + qib_flush_wc(); + __raw_writel(0xaebecede, piobuf_orig + spcl_off); + } + qib_sendbuf_done(dd, pbufn); + if (qp->s_rdma_mr) { + atomic_dec(&qp->s_rdma_mr->refcount); + qp->s_rdma_mr = NULL; + } + if (qp->s_wqe) { + spin_lock_irqsave(&qp->s_lock, flags); + qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); + spin_unlock_irqrestore(&qp->s_lock, flags); + } else if (qp->ibqp.qp_type == IB_QPT_RC) { + spin_lock_irqsave(&qp->s_lock, flags); + qib_rc_send_complete(qp, ibhdr); + spin_unlock_irqrestore(&qp->s_lock, flags); + } + return 0; +} + +/** + * qib_verbs_send - send a packet + * @qp: the QP to send on + * @hdr: the packet header + * @hdrwords: the number of 32-bit words in the header + * @ss: the SGE to send + * @len: the length of the packet in bytes + * + * Return zero if packet is sent or queued OK. + * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. + */ +int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct qib_sge_state *ss, u32 len) +{ + struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); + u32 plen; + int ret; + u32 dwords = (len + 3) >> 2; + + /* + * Calculate the send buffer trigger address. + * The +1 counts for the pbc control dword following the pbc length. + */ + plen = hdrwords + dwords + 1; + + /* + * VL15 packets (IB_QPT_SMI) will always use PIO, so we + * can defer SDMA restart until link goes ACTIVE without + * worrying about just how we got there. + */ + if (qp->ibqp.qp_type == IB_QPT_SMI || + !(dd->flags & QIB_HAS_SEND_DMA)) + ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, + plen, dwords); + else + ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, + plen, dwords); + + return ret; +} + +int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, + u64 *rwords, u64 *spkts, u64 *rpkts, + u64 *xmit_wait) +{ + int ret; + struct qib_devdata *dd = ppd->dd; + + if (!(dd->flags & QIB_PRESENT)) { + /* no hardware, freeze, etc. */ + ret = -EINVAL; + goto bail; + } + *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); + *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); + *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); + *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); + *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); + + ret = 0; + +bail: + return ret; +} + +/** + * qib_get_counters - get various chip counters + * @dd: the qlogic_ib device + * @cntrs: counters are placed here + * + * Return the counters needed by recv_pma_get_portcounters(). + */ +int qib_get_counters(struct qib_pportdata *ppd, + struct qib_verbs_counters *cntrs) +{ + int ret; + + if (!(ppd->dd->flags & QIB_PRESENT)) { + /* no hardware, freeze, etc. */ + ret = -EINVAL; + goto bail; + } + cntrs->symbol_error_counter = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); + cntrs->link_error_recovery_counter = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); + /* + * The link downed counter counts when the other side downs the + * connection. We add in the number of times we downed the link + * due to local link integrity errors to compensate. + */ + cntrs->link_downed_counter = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); + cntrs->port_rcv_errors = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); + cntrs->port_rcv_errors += + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); + cntrs->port_rcv_errors += + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); + cntrs->port_rcv_remphys_errors = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); + cntrs->port_xmit_discards = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); + cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, + QIBPORTCNTR_WORDSEND); + cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, + QIBPORTCNTR_WORDRCV); + cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, + QIBPORTCNTR_PKTSEND); + cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, + QIBPORTCNTR_PKTRCV); + cntrs->local_link_integrity_errors = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); + cntrs->excessive_buffer_overrun_errors = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); + cntrs->vl15_dropped = + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); + + ret = 0; + +bail: + return ret; +} + +/** + * qib_ib_piobufavail - callback when a PIO buffer is available + * @dd: the device pointer + * + * This is called from qib_intr() at interrupt level when a PIO buffer is + * available after qib_verbs_send() returned an error that no buffers were + * available. Disable the interrupt if there are no more QPs waiting. + */ +void qib_ib_piobufavail(struct qib_devdata *dd) +{ + struct qib_ibdev *dev = &dd->verbs_dev; + struct list_head *list; + struct qib_qp *qps[5]; + struct qib_qp *qp; + unsigned long flags; + unsigned i, n; + + list = &dev->piowait; + n = 0; + + /* + * Note: checking that the piowait list is empty and clearing + * the buffer available interrupt needs to be atomic or we + * could end up with QPs on the wait list with the interrupt + * disabled. + */ + spin_lock_irqsave(&dev->pending_lock, flags); + while (!list_empty(list)) { + if (n == ARRAY_SIZE(qps)) + goto full; + qp = list_entry(list->next, struct qib_qp, iowait); + list_del_init(&qp->iowait); + atomic_inc(&qp->refcount); + qps[n++] = qp; + } + dd->f_wantpiobuf_intr(dd, 0); +full: + spin_unlock_irqrestore(&dev->pending_lock, flags); + + for (i = 0; i < n; i++) { + qp = qps[i]; + + spin_lock_irqsave(&qp->s_lock, flags); + if (qp->s_flags & QIB_S_WAIT_PIO) { + qp->s_flags &= ~QIB_S_WAIT_PIO; + qib_schedule_send(qp); + } + spin_unlock_irqrestore(&qp->s_lock, flags); + + /* Notify qib_destroy_qp() if it is waiting. */ + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } +} + +static int qib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + struct qib_ibdev *dev = to_idev(ibdev); + + memset(props, 0, sizeof(*props)); + + props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | + IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | + IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; + props->page_size_cap = PAGE_SIZE; + props->vendor_id = + QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; + props->vendor_part_id = dd->deviceid; + props->hw_ver = dd->minrev; + props->sys_image_guid = ib_qib_sys_image_guid; + props->max_mr_size = ~0ULL; + props->max_qp = ib_qib_max_qps; + props->max_qp_wr = ib_qib_max_qp_wrs; + props->max_sge = ib_qib_max_sges; + props->max_cq = ib_qib_max_cqs; + props->max_ah = ib_qib_max_ahs; + props->max_cqe = ib_qib_max_cqes; + props->max_mr = dev->lk_table.max; + props->max_fmr = dev->lk_table.max; + props->max_map_per_fmr = 32767; + props->max_pd = ib_qib_max_pds; + props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; + props->max_qp_init_rd_atom = 255; + /* props->max_res_rd_atom */ + props->max_srq = ib_qib_max_srqs; + props->max_srq_wr = ib_qib_max_srq_wrs; + props->max_srq_sge = ib_qib_max_srq_sges; + /* props->local_ca_ack_delay */ + props->atomic_cap = IB_ATOMIC_GLOB; + props->max_pkeys = qib_get_npkeys(dd); + props->max_mcast_grp = ib_qib_max_mcast_grps; + props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + + return 0; +} + +static int qib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + enum ib_mtu mtu; + u16 lid = ppd->lid; + + memset(props, 0, sizeof(*props)); + props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); + props->lmc = ppd->lmc; + props->sm_lid = ibp->sm_lid; + props->sm_sl = ibp->sm_sl; + props->state = dd->f_iblink_state(ppd->lastibcstat); + props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); + props->port_cap_flags = ibp->port_cap_flags; + props->gid_tbl_len = QIB_GUIDS_PER_PORT; + props->max_msg_sz = 0x80000000; + props->pkey_tbl_len = qib_get_npkeys(dd); + props->bad_pkey_cntr = ibp->pkey_violations; + props->qkey_viol_cntr = ibp->qkey_violations; + props->active_width = ppd->link_width_active; + /* See rate_show() */ + props->active_speed = ppd->link_speed_active; + props->max_vl_num = qib_num_vls(ppd->vls_supported); + props->init_type_reply = 0; + + props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; + switch (ppd->ibmtu) { + case 4096: + mtu = IB_MTU_4096; + break; + case 2048: + mtu = IB_MTU_2048; + break; + case 1024: + mtu = IB_MTU_1024; + break; + case 512: + mtu = IB_MTU_512; + break; + case 256: + mtu = IB_MTU_256; + break; + default: + mtu = IB_MTU_2048; + } + props->active_mtu = mtu; + props->subnet_timeout = ibp->subnet_timeout; + + return 0; +} + +static int qib_modify_device(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify) +{ + struct qib_devdata *dd = dd_from_ibdev(device); + unsigned i; + int ret; + + if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | + IB_DEVICE_MODIFY_NODE_DESC)) { + ret = -EOPNOTSUPP; + goto bail; + } + + if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { + memcpy(device->node_desc, device_modify->node_desc, 64); + for (i = 0; i < dd->num_pports; i++) { + struct qib_ibport *ibp = &dd->pport[i].ibport_data; + + qib_node_desc_chg(ibp); + } + } + + if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { + ib_qib_sys_image_guid = + cpu_to_be64(device_modify->sys_image_guid); + for (i = 0; i < dd->num_pports; i++) { + struct qib_ibport *ibp = &dd->pport[i].ibport_data; + + qib_sys_guid_chg(ibp); + } + } + + ret = 0; + +bail: + return ret; +} + +static int qib_modify_port(struct ib_device *ibdev, u8 port, + int port_modify_mask, struct ib_port_modify *props) +{ + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + + ibp->port_cap_flags |= props->set_port_cap_mask; + ibp->port_cap_flags &= ~props->clr_port_cap_mask; + if (props->set_port_cap_mask || props->clr_port_cap_mask) + qib_cap_mask_chg(ibp); + if (port_modify_mask & IB_PORT_SHUTDOWN) + qib_set_linkstate(ppd, QIB_IB_LINKDOWN); + if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) + ibp->qkey_violations = 0; + return 0; +} + +static int qib_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + int ret = 0; + + if (!port || port > dd->num_pports) + ret = -EINVAL; + else { + struct qib_ibport *ibp = to_iport(ibdev, port); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + + gid->global.subnet_prefix = ibp->gid_prefix; + if (index == 0) + gid->global.interface_id = ppd->guid; + else if (index < QIB_GUIDS_PER_PORT) + gid->global.interface_id = ibp->guids[index - 1]; + else + ret = -EINVAL; + } + + return ret; +} + +static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct qib_ibdev *dev = to_idev(ibdev); + struct qib_pd *pd; + struct ib_pd *ret; + + /* + * This is actually totally arbitrary. Some correctness tests + * assume there's a maximum number of PDs that can be allocated. + * We don't actually have this limit, but we fail the test if + * we allow allocations of more than we report for this value. + */ + + pd = kmalloc(sizeof *pd, GFP_KERNEL); + if (!pd) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + spin_lock(&dev->n_pds_lock); + if (dev->n_pds_allocated == ib_qib_max_pds) { + spin_unlock(&dev->n_pds_lock); + kfree(pd); + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + dev->n_pds_allocated++; + spin_unlock(&dev->n_pds_lock); + + /* ib_alloc_pd() will initialize pd->ibpd. */ + pd->user = udata != NULL; + + ret = &pd->ibpd; + +bail: + return ret; +} + +static int qib_dealloc_pd(struct ib_pd *ibpd) +{ + struct qib_pd *pd = to_ipd(ibpd); + struct qib_ibdev *dev = to_idev(ibpd->device); + + spin_lock(&dev->n_pds_lock); + dev->n_pds_allocated--; + spin_unlock(&dev->n_pds_lock); + + kfree(pd); + + return 0; +} + +int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) +{ + /* A multicast address requires a GRH (see ch. 8.4.1). */ + if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && + ah_attr->dlid != QIB_PERMISSIVE_LID && + !(ah_attr->ah_flags & IB_AH_GRH)) + goto bail; + if ((ah_attr->ah_flags & IB_AH_GRH) && + ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) + goto bail; + if (ah_attr->dlid == 0) + goto bail; + if (ah_attr->port_num < 1 || + ah_attr->port_num > ibdev->phys_port_cnt) + goto bail; + if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && + ib_rate_to_mult(ah_attr->static_rate) < 0) + goto bail; + if (ah_attr->sl > 15) + goto bail; + return 0; +bail: + return -EINVAL; +} + +/** + * qib_create_ah - create an address handle + * @pd: the protection domain + * @ah_attr: the attributes of the AH + * + * This may be called from interrupt context. + */ +static struct ib_ah *qib_create_ah(struct ib_pd *pd, + struct ib_ah_attr *ah_attr) +{ + struct qib_ah *ah; + struct ib_ah *ret; + struct qib_ibdev *dev = to_idev(pd->device); + unsigned long flags; + + if (qib_check_ah(pd->device, ah_attr)) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + + ah = kmalloc(sizeof *ah, GFP_ATOMIC); + if (!ah) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + spin_lock_irqsave(&dev->n_ahs_lock, flags); + if (dev->n_ahs_allocated == ib_qib_max_ahs) { + spin_unlock_irqrestore(&dev->n_ahs_lock, flags); + kfree(ah); + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + dev->n_ahs_allocated++; + spin_unlock_irqrestore(&dev->n_ahs_lock, flags); + + /* ib_create_ah() will initialize ah->ibah. */ + ah->attr = *ah_attr; + atomic_set(&ah->refcount, 0); + + ret = &ah->ibah; + +bail: + return ret; +} + +/** + * qib_destroy_ah - destroy an address handle + * @ibah: the AH to destroy + * + * This may be called from interrupt context. + */ +static int qib_destroy_ah(struct ib_ah *ibah) +{ + struct qib_ibdev *dev = to_idev(ibah->device); + struct qib_ah *ah = to_iah(ibah); + unsigned long flags; + + if (atomic_read(&ah->refcount) != 0) + return -EBUSY; + + spin_lock_irqsave(&dev->n_ahs_lock, flags); + dev->n_ahs_allocated--; + spin_unlock_irqrestore(&dev->n_ahs_lock, flags); + + kfree(ah); + + return 0; +} + +static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) +{ + struct qib_ah *ah = to_iah(ibah); + + if (qib_check_ah(ibah->device, ah_attr)) + return -EINVAL; + + ah->attr = *ah_attr; + + return 0; +} + +static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) +{ + struct qib_ah *ah = to_iah(ibah); + + *ah_attr = ah->attr; + + return 0; +} + +/** + * qib_get_npkeys - return the size of the PKEY table for context 0 + * @dd: the qlogic_ib device + */ +unsigned qib_get_npkeys(struct qib_devdata *dd) +{ + return ARRAY_SIZE(dd->rcd[0]->pkeys); +} + +/* + * Return the indexed PKEY from the port PKEY table. + * No need to validate rcd[ctxt]; the port is setup if we are here. + */ +unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) +{ + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = ppd->dd; + unsigned ctxt = ppd->hw_pidx; + unsigned ret; + + /* dd->rcd null if mini_init or some init failures */ + if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) + ret = 0; + else + ret = dd->rcd[ctxt]->pkeys[index]; + + return ret; +} + +static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct qib_devdata *dd = dd_from_ibdev(ibdev); + int ret; + + if (index >= qib_get_npkeys(dd)) { + ret = -EINVAL; + goto bail; + } + + *pkey = qib_get_pkey(to_iport(ibdev, port), index); + ret = 0; + +bail: + return ret; +} + +/** + * qib_alloc_ucontext - allocate a ucontest + * @ibdev: the infiniband device + * @udata: not used by the QLogic_IB driver + */ + +static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct qib_ucontext *context; + struct ib_ucontext *ret; + + context = kmalloc(sizeof *context, GFP_KERNEL); + if (!context) { + ret = ERR_PTR(-ENOMEM); + goto bail; + } + + ret = &context->ibucontext; + +bail: + return ret; +} + +static int qib_dealloc_ucontext(struct ib_ucontext *context) +{ + kfree(to_iucontext(context)); + return 0; +} + +static void init_ibport(struct qib_pportdata *ppd) +{ + struct qib_verbs_counters cntrs; + struct qib_ibport *ibp = &ppd->ibport_data; + + spin_lock_init(&ibp->lock); + /* Set the prefix to the default value (see ch. 4.1.1) */ + ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; + ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); + ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | + IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | + IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | + IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | + IB_PORT_OTHER_LOCAL_CHANGES_SUP; + if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) + ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; + ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; + ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; + ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; + ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; + ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; + + /* Snapshot current HW counters to "clear" them. */ + qib_get_counters(ppd, &cntrs); + ibp->z_symbol_error_counter = cntrs.symbol_error_counter; + ibp->z_link_error_recovery_counter = + cntrs.link_error_recovery_counter; + ibp->z_link_downed_counter = cntrs.link_downed_counter; + ibp->z_port_rcv_errors = cntrs.port_rcv_errors; + ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; + ibp->z_port_xmit_discards = cntrs.port_xmit_discards; + ibp->z_port_xmit_data = cntrs.port_xmit_data; + ibp->z_port_rcv_data = cntrs.port_rcv_data; + ibp->z_port_xmit_packets = cntrs.port_xmit_packets; + ibp->z_port_rcv_packets = cntrs.port_rcv_packets; + ibp->z_local_link_integrity_errors = + cntrs.local_link_integrity_errors; + ibp->z_excessive_buffer_overrun_errors = + cntrs.excessive_buffer_overrun_errors; + ibp->z_vl15_dropped = cntrs.vl15_dropped; +} + +/** + * qib_register_ib_device - register our device with the infiniband core + * @dd: the device data structure + * Return the allocated qib_ibdev pointer or NULL on error. + */ +int qib_register_ib_device(struct qib_devdata *dd) +{ + struct qib_ibdev *dev = &dd->verbs_dev; + struct ib_device *ibdev = &dev->ibdev; + struct qib_pportdata *ppd = dd->pport; + unsigned i, lk_tab_size; + int ret; + + dev->qp_table_size = ib_qib_qp_table_size; + dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, + GFP_KERNEL); + if (!dev->qp_table) { + ret = -ENOMEM; + goto err_qpt; + } + + for (i = 0; i < dd->num_pports; i++) + init_ibport(ppd + i); + + /* Only need to initialize non-zero fields. */ + spin_lock_init(&dev->qpt_lock); + spin_lock_init(&dev->n_pds_lock); + spin_lock_init(&dev->n_ahs_lock); + spin_lock_init(&dev->n_cqs_lock); + spin_lock_init(&dev->n_qps_lock); + spin_lock_init(&dev->n_srqs_lock); + spin_lock_init(&dev->n_mcast_grps_lock); + init_timer(&dev->mem_timer); + dev->mem_timer.function = mem_timer; + dev->mem_timer.data = (unsigned long) dev; + + qib_init_qpn_table(dd, &dev->qpn_table); + + /* + * The top ib_qib_lkey_table_size bits are used to index the + * table. The lower 8 bits can be owned by the user (copied from + * the LKEY). The remaining bits act as a generation number or tag. + */ + spin_lock_init(&dev->lk_table.lock); + dev->lk_table.max = 1 << ib_qib_lkey_table_size; + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); + dev->lk_table.table = (struct qib_mregion **) + __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); + if (dev->lk_table.table == NULL) { + ret = -ENOMEM; + goto err_lk; + } + memset(dev->lk_table.table, 0, lk_tab_size); + INIT_LIST_HEAD(&dev->pending_mmaps); + spin_lock_init(&dev->pending_lock); + dev->mmap_offset = PAGE_SIZE; + spin_lock_init(&dev->mmap_offset_lock); + INIT_LIST_HEAD(&dev->piowait); + INIT_LIST_HEAD(&dev->dmawait); + INIT_LIST_HEAD(&dev->txwait); + INIT_LIST_HEAD(&dev->memwait); + INIT_LIST_HEAD(&dev->txreq_free); + + if (ppd->sdma_descq_cnt) { + dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, + ppd->sdma_descq_cnt * + sizeof(struct qib_pio_header), + &dev->pio_hdrs_phys, + GFP_KERNEL); + if (!dev->pio_hdrs) { + ret = -ENOMEM; + goto err_hdrs; + } + } + + for (i = 0; i < ppd->sdma_descq_cnt; i++) { + struct qib_verbs_txreq *tx; + + tx = kzalloc(sizeof *tx, GFP_KERNEL); + if (!tx) { + ret = -ENOMEM; + goto err_tx; + } + tx->hdr_inx = i; + list_add(&tx->txreq.list, &dev->txreq_free); + } + + /* + * The system image GUID is supposed to be the same for all + * IB HCAs in a single system but since there can be other + * device types in the system, we can't be sure this is unique. + */ + if (!ib_qib_sys_image_guid) + ib_qib_sys_image_guid = ppd->guid; + + strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); + ibdev->owner = THIS_MODULE; + ibdev->node_guid = ppd->guid; + ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; + ibdev->uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | + (1ull << IB_USER_VERBS_CMD_QUERY_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_SEND) | + (1ull << IB_USER_VERBS_CMD_POST_RECV) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); + ibdev->node_type = RDMA_NODE_IB_CA; + ibdev->phys_port_cnt = dd->num_pports; + ibdev->num_comp_vectors = 1; + ibdev->dma_device = &dd->pcidev->dev; + ibdev->query_device = qib_query_device; + ibdev->modify_device = qib_modify_device; + ibdev->query_port = qib_query_port; + ibdev->modify_port = qib_modify_port; + ibdev->query_pkey = qib_query_pkey; + ibdev->query_gid = qib_query_gid; + ibdev->alloc_ucontext = qib_alloc_ucontext; + ibdev->dealloc_ucontext = qib_dealloc_ucontext; + ibdev->alloc_pd = qib_alloc_pd; + ibdev->dealloc_pd = qib_dealloc_pd; + ibdev->create_ah = qib_create_ah; + ibdev->destroy_ah = qib_destroy_ah; + ibdev->modify_ah = qib_modify_ah; + ibdev->query_ah = qib_query_ah; + ibdev->create_srq = qib_create_srq; + ibdev->modify_srq = qib_modify_srq; + ibdev->query_srq = qib_query_srq; + ibdev->destroy_srq = qib_destroy_srq; + ibdev->create_qp = qib_create_qp; + ibdev->modify_qp = qib_modify_qp; + ibdev->query_qp = qib_query_qp; + ibdev->destroy_qp = qib_destroy_qp; + ibdev->post_send = qib_post_send; + ibdev->post_recv = qib_post_receive; + ibdev->post_srq_recv = qib_post_srq_receive; + ibdev->create_cq = qib_create_cq; + ibdev->destroy_cq = qib_destroy_cq; + ibdev->resize_cq = qib_resize_cq; + ibdev->poll_cq = qib_poll_cq; + ibdev->req_notify_cq = qib_req_notify_cq; + ibdev->get_dma_mr = qib_get_dma_mr; + ibdev->reg_phys_mr = qib_reg_phys_mr; + ibdev->reg_user_mr = qib_reg_user_mr; + ibdev->dereg_mr = qib_dereg_mr; + ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; + ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; + ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; + ibdev->alloc_fmr = qib_alloc_fmr; + ibdev->map_phys_fmr = qib_map_phys_fmr; + ibdev->unmap_fmr = qib_unmap_fmr; + ibdev->dealloc_fmr = qib_dealloc_fmr; + ibdev->attach_mcast = qib_multicast_attach; + ibdev->detach_mcast = qib_multicast_detach; + ibdev->process_mad = qib_process_mad; + ibdev->mmap = qib_mmap; + ibdev->dma_ops = &qib_dma_mapping_ops; + + snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), + QIB_IDSTR " %s", init_utsname()->nodename); + + ret = ib_register_device(ibdev, qib_create_port_files); + if (ret) + goto err_reg; + + ret = qib_create_agents(dev); + if (ret) + goto err_agents; + + if (qib_verbs_register_sysfs(dd)) + goto err_class; + + goto bail; + +err_class: + qib_free_agents(dev); +err_agents: + ib_unregister_device(ibdev); +err_reg: +err_tx: + while (!list_empty(&dev->txreq_free)) { + struct list_head *l = dev->txreq_free.next; + struct qib_verbs_txreq *tx; + + list_del(l); + tx = list_entry(l, struct qib_verbs_txreq, txreq.list); + kfree(tx); + } + if (ppd->sdma_descq_cnt) + dma_free_coherent(&dd->pcidev->dev, + ppd->sdma_descq_cnt * + sizeof(struct qib_pio_header), + dev->pio_hdrs, dev->pio_hdrs_phys); +err_hdrs: + free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); +err_lk: + kfree(dev->qp_table); +err_qpt: + qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); +bail: + return ret; +} + +void qib_unregister_ib_device(struct qib_devdata *dd) +{ + struct qib_ibdev *dev = &dd->verbs_dev; + struct ib_device *ibdev = &dev->ibdev; + u32 qps_inuse; + unsigned lk_tab_size; + + qib_verbs_unregister_sysfs(dd); + + qib_free_agents(dev); + + ib_unregister_device(ibdev); + + if (!list_empty(&dev->piowait)) + qib_dev_err(dd, "piowait list not empty!\n"); + if (!list_empty(&dev->dmawait)) + qib_dev_err(dd, "dmawait list not empty!\n"); + if (!list_empty(&dev->txwait)) + qib_dev_err(dd, "txwait list not empty!\n"); + if (!list_empty(&dev->memwait)) + qib_dev_err(dd, "memwait list not empty!\n"); + if (dev->dma_mr) + qib_dev_err(dd, "DMA MR not NULL!\n"); + + qps_inuse = qib_free_all_qps(dd); + if (qps_inuse) + qib_dev_err(dd, "QP memory leak! %u still in use\n", + qps_inuse); + + del_timer_sync(&dev->mem_timer); + qib_free_qpn_table(&dev->qpn_table); + while (!list_empty(&dev->txreq_free)) { + struct list_head *l = dev->txreq_free.next; + struct qib_verbs_txreq *tx; + + list_del(l); + tx = list_entry(l, struct qib_verbs_txreq, txreq.list); + kfree(tx); + } + if (dd->pport->sdma_descq_cnt) + dma_free_coherent(&dd->pcidev->dev, + dd->pport->sdma_descq_cnt * + sizeof(struct qib_pio_header), + dev->pio_hdrs, dev->pio_hdrs_phys); + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); + free_pages((unsigned long) dev->lk_table.table, + get_order(lk_tab_size)); + kfree(dev->qp_table); +} diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h new file mode 100644 index 000000000000..bd57c1273225 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -0,0 +1,1100 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef QIB_VERBS_H +#define QIB_VERBS_H + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/kref.h> +#include <linux/workqueue.h> +#include <rdma/ib_pack.h> +#include <rdma/ib_user_verbs.h> + +struct qib_ctxtdata; +struct qib_pportdata; +struct qib_devdata; +struct qib_verbs_txreq; + +#define QIB_MAX_RDMA_ATOMIC 16 +#define QIB_GUIDS_PER_PORT 5 + +#define QPN_MAX (1 << 24) +#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) + +/* + * Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define QIB_UVERBS_ABI_VERSION 2 + +/* + * Define an ib_cq_notify value that is not valid so we know when CQ + * notifications are armed. + */ +#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) + +#define IB_SEQ_NAK (3 << 29) + +/* AETH NAK opcode values */ +#define IB_RNR_NAK 0x20 +#define IB_NAK_PSN_ERROR 0x60 +#define IB_NAK_INVALID_REQUEST 0x61 +#define IB_NAK_REMOTE_ACCESS_ERROR 0x62 +#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 +#define IB_NAK_INVALID_RD_REQUEST 0x64 + +/* Flags for checking QP state (see ib_qib_state_ops[]) */ +#define QIB_POST_SEND_OK 0x01 +#define QIB_POST_RECV_OK 0x02 +#define QIB_PROCESS_RECV_OK 0x04 +#define QIB_PROCESS_SEND_OK 0x08 +#define QIB_PROCESS_NEXT_SEND_OK 0x10 +#define QIB_FLUSH_SEND 0x20 +#define QIB_FLUSH_RECV 0x40 +#define QIB_PROCESS_OR_FLUSH_SEND \ + (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) + +/* IB Performance Manager status values */ +#define IB_PMA_SAMPLE_STATUS_DONE 0x00 +#define IB_PMA_SAMPLE_STATUS_STARTED 0x01 +#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 + +/* Mandatory IB performance counter select values. */ +#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) +#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) +#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) +#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) +#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) + +#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0) + +#define IB_BTH_REQ_ACK (1 << 31) +#define IB_BTH_SOLICITED (1 << 23) +#define IB_BTH_MIG_REQ (1 << 22) + +/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */ +#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26) + +#define IB_GRH_VERSION 6 +#define IB_GRH_VERSION_MASK 0xF +#define IB_GRH_VERSION_SHIFT 28 +#define IB_GRH_TCLASS_MASK 0xFF +#define IB_GRH_TCLASS_SHIFT 20 +#define IB_GRH_FLOW_MASK 0xFFFFF +#define IB_GRH_FLOW_SHIFT 0 +#define IB_GRH_NEXT_HDR 0x1B + +#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) + +/* Values for set/get portinfo VLCap OperationalVLs */ +#define IB_VL_VL0 1 +#define IB_VL_VL0_1 2 +#define IB_VL_VL0_3 3 +#define IB_VL_VL0_7 4 +#define IB_VL_VL0_14 5 + +static inline int qib_num_vls(int vls) +{ + switch (vls) { + default: + case IB_VL_VL0: + return 1; + case IB_VL_VL0_1: + return 2; + case IB_VL_VL0_3: + return 4; + case IB_VL_VL0_7: + return 8; + case IB_VL_VL0_14: + return 15; + } +} + +struct ib_reth { + __be64 vaddr; + __be32 rkey; + __be32 length; +} __attribute__ ((packed)); + +struct ib_atomic_eth { + __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ + __be32 rkey; + __be64 swap_data; + __be64 compare_data; +} __attribute__ ((packed)); + +struct qib_other_headers { + __be32 bth[3]; + union { + struct { + __be32 deth[2]; + __be32 imm_data; + } ud; + struct { + struct ib_reth reth; + __be32 imm_data; + } rc; + struct { + __be32 aeth; + __be32 atomic_ack_eth[2]; + } at; + __be32 imm_data; + __be32 aeth; + struct ib_atomic_eth atomic_eth; + } u; +} __attribute__ ((packed)); + +/* + * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes + * long (72 w/ imm_data). Only the first 56 bytes of the IB header + * will be in the eager header buffer. The remaining 12 or 16 bytes + * are in the data buffer. + */ +struct qib_ib_header { + __be16 lrh[4]; + union { + struct { + struct ib_grh grh; + struct qib_other_headers oth; + } l; + struct qib_other_headers oth; + } u; +} __attribute__ ((packed)); + +struct qib_pio_header { + __le32 pbc[2]; + struct qib_ib_header hdr; +} __attribute__ ((packed)); + +/* + * There is one struct qib_mcast for each multicast GID. + * All attached QPs are then stored as a list of + * struct qib_mcast_qp. + */ +struct qib_mcast_qp { + struct list_head list; + struct qib_qp *qp; +}; + +struct qib_mcast { + struct rb_node rb_node; + union ib_gid mgid; + struct list_head qp_list; + wait_queue_head_t wait; + atomic_t refcount; + int n_attached; +}; + +/* Protection domain */ +struct qib_pd { + struct ib_pd ibpd; + int user; /* non-zero if created from user space */ +}; + +/* Address Handle */ +struct qib_ah { + struct ib_ah ibah; + struct ib_ah_attr attr; + atomic_t refcount; +}; + +/* + * This structure is used by qib_mmap() to validate an offset + * when an mmap() request is made. The vm_area_struct then uses + * this as its vm_private_data. + */ +struct qib_mmap_info { + struct list_head pending_mmaps; + struct ib_ucontext *context; + void *obj; + __u64 offset; + struct kref ref; + unsigned size; +}; + +/* + * This structure is used to contain the head pointer, tail pointer, + * and completion queue entries as a single memory allocation so + * it can be mmap'ed into user space. + */ +struct qib_cq_wc { + u32 head; /* index of next entry to fill */ + u32 tail; /* index of next ib_poll_cq() entry */ + union { + /* these are actually size ibcq.cqe + 1 */ + struct ib_uverbs_wc uqueue[0]; + struct ib_wc kqueue[0]; + }; +}; + +/* + * The completion queue structure. + */ +struct qib_cq { + struct ib_cq ibcq; + struct work_struct comptask; + spinlock_t lock; /* protect changes in this struct */ + u8 notify; + u8 triggered; + struct qib_cq_wc *queue; + struct qib_mmap_info *ip; +}; + +/* + * A segment is a linear region of low physical memory. + * XXX Maybe we should use phys addr here and kmap()/kunmap(). + * Used by the verbs layer. + */ +struct qib_seg { + void *vaddr; + size_t length; +}; + +/* The number of qib_segs that fit in a page. */ +#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) + +struct qib_segarray { + struct qib_seg segs[QIB_SEGSZ]; +}; + +struct qib_mregion { + struct ib_pd *pd; /* shares refcnt of ibmr.pd */ + u64 user_base; /* User's address for this region */ + u64 iova; /* IB start address of this region */ + size_t length; + u32 lkey; + u32 offset; /* offset (bytes) to start of region */ + int access_flags; + u32 max_segs; /* number of qib_segs in all the arrays */ + u32 mapsz; /* size of the map array */ + atomic_t refcount; + struct qib_segarray *map[0]; /* the segments */ +}; + +/* + * These keep track of the copy progress within a memory region. + * Used by the verbs layer. + */ +struct qib_sge { + struct qib_mregion *mr; + void *vaddr; /* kernel virtual address of segment */ + u32 sge_length; /* length of the SGE */ + u32 length; /* remaining length of the segment */ + u16 m; /* current index: mr->map[m] */ + u16 n; /* current index: mr->map[m]->segs[n] */ +}; + +/* Memory region */ +struct qib_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct qib_mregion mr; /* must be last */ +}; + +/* + * Send work request queue entry. + * The size of the sg_list is determined when the QP is created and stored + * in qp->s_max_sge. + */ +struct qib_swqe { + struct ib_send_wr wr; /* don't use wr.sg_list */ + u32 psn; /* first packet sequence number */ + u32 lpsn; /* last packet sequence number */ + u32 ssn; /* send sequence number */ + u32 length; /* total length of data in sg_list */ + struct qib_sge sg_list[0]; +}; + +/* + * Receive work request queue entry. + * The size of the sg_list is determined when the QP (or SRQ) is created + * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). + */ +struct qib_rwqe { + u64 wr_id; + u8 num_sge; + struct ib_sge sg_list[0]; +}; + +/* + * This structure is used to contain the head pointer, tail pointer, + * and receive work queue entries as a single memory allocation so + * it can be mmap'ed into user space. + * Note that the wq array elements are variable size so you can't + * just index into the array to get the N'th element; + * use get_rwqe_ptr() instead. + */ +struct qib_rwq { + u32 head; /* new work requests posted to the head */ + u32 tail; /* receives pull requests from here. */ + struct qib_rwqe wq[0]; +}; + +struct qib_rq { + struct qib_rwq *wq; + spinlock_t lock; /* protect changes in this struct */ + u32 size; /* size of RWQE array */ + u8 max_sge; +}; + +struct qib_srq { + struct ib_srq ibsrq; + struct qib_rq rq; + struct qib_mmap_info *ip; + /* send signal when number of RWQEs < limit */ + u32 limit; +}; + +struct qib_sge_state { + struct qib_sge *sg_list; /* next SGE to be used if any */ + struct qib_sge sge; /* progress state for the current SGE */ + u32 total_len; + u8 num_sge; +}; + +/* + * This structure holds the information that the send tasklet needs + * to send a RDMA read response or atomic operation. + */ +struct qib_ack_entry { + u8 opcode; + u8 sent; + u32 psn; + u32 lpsn; + union { + struct qib_sge rdma_sge; + u64 atomic_data; + }; +}; + +/* + * Variables prefixed with s_ are for the requester (sender). + * Variables prefixed with r_ are for the responder (receiver). + * Variables prefixed with ack_ are for responder replies. + * + * Common variables are protected by both r_rq.lock and s_lock in that order + * which only happens in modify_qp() or changing the QP 'state'. + */ +struct qib_qp { + struct ib_qp ibqp; + struct qib_qp *next; /* link list for QPN hash table */ + struct qib_qp *timer_next; /* link list for qib_ib_timer() */ + struct list_head iowait; /* link for wait PIO buf */ + struct list_head rspwait; /* link for waititing to respond */ + struct ib_ah_attr remote_ah_attr; + struct ib_ah_attr alt_ah_attr; + struct qib_ib_header s_hdr; /* next packet header to send */ + atomic_t refcount; + wait_queue_head_t wait; + wait_queue_head_t wait_dma; + struct timer_list s_timer; + struct work_struct s_work; + struct qib_mmap_info *ip; + struct qib_sge_state *s_cur_sge; + struct qib_verbs_txreq *s_tx; + struct qib_mregion *s_rdma_mr; + struct qib_sge_state s_sge; /* current send request data */ + struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]; + struct qib_sge_state s_ack_rdma_sge; + struct qib_sge_state s_rdma_read_sge; + struct qib_sge_state r_sge; /* current receive data */ + spinlock_t r_lock; /* used for APM */ + spinlock_t s_lock; + atomic_t s_dma_busy; + unsigned processor_id; /* Processor ID QP is bound to */ + u32 s_flags; + u32 s_cur_size; /* size of send packet in bytes */ + u32 s_len; /* total length of s_sge */ + u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ + u32 s_next_psn; /* PSN for next request */ + u32 s_last_psn; /* last response PSN processed */ + u32 s_sending_psn; /* lowest PSN that is being sent */ + u32 s_sending_hpsn; /* highest PSN that is being sent */ + u32 s_psn; /* current packet sequence number */ + u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ + u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ + u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ + u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ + u64 r_wr_id; /* ID for current receive WQE */ + unsigned long r_aflags; + u32 r_len; /* total length of r_sge */ + u32 r_rcv_len; /* receive data len processed */ + u32 r_psn; /* expected rcv packet sequence number */ + u32 r_msn; /* message sequence number */ + u16 s_hdrwords; /* size of s_hdr in 32 bit words */ + u16 s_rdma_ack_cnt; + u8 state; /* QP state */ + u8 s_state; /* opcode of last packet sent */ + u8 s_ack_state; /* opcode of packet to ACK */ + u8 s_nak_state; /* non-zero if NAK is pending */ + u8 r_state; /* opcode of last packet received */ + u8 r_nak_state; /* non-zero if NAK is pending */ + u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ + u8 r_flags; + u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ + u8 r_head_ack_queue; /* index into s_ack_queue[] */ + u8 qp_access_flags; + u8 s_max_sge; /* size of s_wq->sg_list */ + u8 s_retry_cnt; /* number of times to retry */ + u8 s_rnr_retry_cnt; + u8 s_retry; /* requester retry counter */ + u8 s_rnr_retry; /* requester RNR retry counter */ + u8 s_pkey_index; /* PKEY index to use */ + u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ + u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ + u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ + u8 s_tail_ack_queue; /* index into s_ack_queue[] */ + u8 s_srate; + u8 s_draining; + u8 s_mig_state; + u8 timeout; /* Timeout for this QP */ + u8 alt_timeout; /* Alternate path timeout for this QP */ + u8 port_num; + enum ib_mtu path_mtu; + u32 remote_qpn; + u32 qkey; /* QKEY for this QP (for UD or RD) */ + u32 s_size; /* send work queue size */ + u32 s_head; /* new entries added here */ + u32 s_tail; /* next entry to process */ + u32 s_cur; /* current work queue entry */ + u32 s_acked; /* last un-ACK'ed entry */ + u32 s_last; /* last completed entry */ + u32 s_ssn; /* SSN of tail entry */ + u32 s_lsn; /* limit sequence number (credit) */ + struct qib_swqe *s_wq; /* send work queue */ + struct qib_swqe *s_wqe; + struct qib_rq r_rq; /* receive work queue */ + struct qib_sge r_sg_list[0]; /* verified SGEs */ +}; + +/* + * Atomic bit definitions for r_aflags. + */ +#define QIB_R_WRID_VALID 0 +#define QIB_R_REWIND_SGE 1 + +/* + * Bit definitions for r_flags. + */ +#define QIB_R_REUSE_SGE 0x01 +#define QIB_R_RDMAR_SEQ 0x02 +#define QIB_R_RSP_NAK 0x04 +#define QIB_R_RSP_SEND 0x08 +#define QIB_R_COMM_EST 0x10 + +/* + * Bit definitions for s_flags. + * + * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled + * QIB_S_BUSY - send tasklet is processing the QP + * QIB_S_TIMER - the RC retry timer is active + * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics + * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs + * before processing the next SWQE + * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete + * before processing the next SWQE + * QIB_S_WAIT_RNR - waiting for RNR timeout + * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE + * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating + * next send completion entry not via send DMA + * QIB_S_WAIT_PIO - waiting for a send buffer to be available + * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available + * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available + * QIB_S_WAIT_KMEM - waiting for kernel memory to be available + * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue + * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests + * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK + */ +#define QIB_S_SIGNAL_REQ_WR 0x0001 +#define QIB_S_BUSY 0x0002 +#define QIB_S_TIMER 0x0004 +#define QIB_S_RESP_PENDING 0x0008 +#define QIB_S_ACK_PENDING 0x0010 +#define QIB_S_WAIT_FENCE 0x0020 +#define QIB_S_WAIT_RDMAR 0x0040 +#define QIB_S_WAIT_RNR 0x0080 +#define QIB_S_WAIT_SSN_CREDIT 0x0100 +#define QIB_S_WAIT_DMA 0x0200 +#define QIB_S_WAIT_PIO 0x0400 +#define QIB_S_WAIT_TX 0x0800 +#define QIB_S_WAIT_DMA_DESC 0x1000 +#define QIB_S_WAIT_KMEM 0x2000 +#define QIB_S_WAIT_PSN 0x4000 +#define QIB_S_WAIT_ACK 0x8000 +#define QIB_S_SEND_ONE 0x10000 +#define QIB_S_UNLIMITED_CREDIT 0x20000 + +/* + * Wait flags that would prevent any packet type from being sent. + */ +#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ + QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) + +/* + * Wait flags that would prevent send work requests from making progress. + */ +#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ + QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ + QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) + +#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) + +#define QIB_PSN_CREDIT 16 + +/* + * Since struct qib_swqe is not a fixed size, we can't simply index into + * struct qib_qp.s_wq. This function does the array index computation. + */ +static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, + unsigned n) +{ + return (struct qib_swqe *)((char *)qp->s_wq + + (sizeof(struct qib_swqe) + + qp->s_max_sge * + sizeof(struct qib_sge)) * n); +} + +/* + * Since struct qib_rwqe is not a fixed size, we can't simply index into + * struct qib_rwq.wq. This function does the array index computation. + */ +static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) +{ + return (struct qib_rwqe *) + ((char *) rq->wq->wq + + (sizeof(struct qib_rwqe) + + rq->max_sge * sizeof(struct ib_sge)) * n); +} + +/* + * QPN-map pages start out as NULL, they get allocated upon + * first use and are never deallocated. This way, + * large bitmaps are not allocated unless large numbers of QPs are used. + */ +struct qpn_map { + void *page; +}; + +struct qib_qpn_table { + spinlock_t lock; /* protect changes in this struct */ + unsigned flags; /* flags for QP0/1 allocated for each port */ + u32 last; /* last QP number allocated */ + u32 nmaps; /* size of the map table */ + u16 limit; + u16 mask; + /* bit map of free QP numbers other than 0/1 */ + struct qpn_map map[QPNMAP_ENTRIES]; +}; + +struct qib_lkey_table { + spinlock_t lock; /* protect changes in this struct */ + u32 next; /* next unused index (speeds search) */ + u32 gen; /* generation count */ + u32 max; /* size of the table */ + struct qib_mregion **table; +}; + +struct qib_opcode_stats { + u64 n_packets; /* number of packets */ + u64 n_bytes; /* total number of bytes */ +}; + +struct qib_ibport { + struct qib_qp *qp0; + struct qib_qp *qp1; + struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ + struct qib_ah *sm_ah; + struct qib_ah *smi_ah; + struct rb_root mcast_tree; + spinlock_t lock; /* protect changes in this struct */ + + /* non-zero when timer is set */ + unsigned long mkey_lease_timeout; + unsigned long trap_timeout; + __be64 gid_prefix; /* in network order */ + __be64 mkey; + __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ + u64 tid; /* TID for traps */ + u64 n_unicast_xmit; /* total unicast packets sent */ + u64 n_unicast_rcv; /* total unicast packets received */ + u64 n_multicast_xmit; /* total multicast packets sent */ + u64 n_multicast_rcv; /* total multicast packets received */ + u64 z_symbol_error_counter; /* starting count for PMA */ + u64 z_link_error_recovery_counter; /* starting count for PMA */ + u64 z_link_downed_counter; /* starting count for PMA */ + u64 z_port_rcv_errors; /* starting count for PMA */ + u64 z_port_rcv_remphys_errors; /* starting count for PMA */ + u64 z_port_xmit_discards; /* starting count for PMA */ + u64 z_port_xmit_data; /* starting count for PMA */ + u64 z_port_rcv_data; /* starting count for PMA */ + u64 z_port_xmit_packets; /* starting count for PMA */ + u64 z_port_rcv_packets; /* starting count for PMA */ + u32 z_local_link_integrity_errors; /* starting count for PMA */ + u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ + u32 z_vl15_dropped; /* starting count for PMA */ + u32 n_rc_resends; + u32 n_rc_acks; + u32 n_rc_qacks; + u32 n_rc_delayed_comp; + u32 n_seq_naks; + u32 n_rdma_seq; + u32 n_rnr_naks; + u32 n_other_naks; + u32 n_loop_pkts; + u32 n_pkt_drops; + u32 n_vl15_dropped; + u32 n_rc_timeouts; + u32 n_dmawait; + u32 n_unaligned; + u32 n_rc_dupreq; + u32 n_rc_seqnak; + u32 port_cap_flags; + u32 pma_sample_start; + u32 pma_sample_interval; + __be16 pma_counter_select[5]; + u16 pma_tag; + u16 pkey_violations; + u16 qkey_violations; + u16 mkey_violations; + u16 mkey_lease_period; + u16 sm_lid; + u16 repress_traps; + u8 sm_sl; + u8 mkeyprot; + u8 subnet_timeout; + u8 vl_high_limit; + u8 sl_to_vl[16]; + + struct qib_opcode_stats opstats[128]; +}; + +struct qib_ibdev { + struct ib_device ibdev; + struct list_head pending_mmaps; + spinlock_t mmap_offset_lock; /* protect mmap_offset */ + u32 mmap_offset; + struct qib_mregion *dma_mr; + + /* QP numbers are shared by all IB ports */ + struct qib_qpn_table qpn_table; + struct qib_lkey_table lk_table; + struct list_head piowait; /* list for wait PIO buf */ + struct list_head dmawait; /* list for wait DMA */ + struct list_head txwait; /* list for wait qib_verbs_txreq */ + struct list_head memwait; /* list for wait kernel memory */ + struct list_head txreq_free; + struct timer_list mem_timer; + struct qib_qp **qp_table; + struct qib_pio_header *pio_hdrs; + dma_addr_t pio_hdrs_phys; + /* list of QPs waiting for RNR timer */ + spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ + unsigned qp_table_size; /* size of the hash table */ + spinlock_t qpt_lock; + + u32 n_piowait; + u32 n_txwait; + + u32 n_pds_allocated; /* number of PDs allocated for device */ + spinlock_t n_pds_lock; + u32 n_ahs_allocated; /* number of AHs allocated for device */ + spinlock_t n_ahs_lock; + u32 n_cqs_allocated; /* number of CQs allocated for device */ + spinlock_t n_cqs_lock; + u32 n_qps_allocated; /* number of QPs allocated for device */ + spinlock_t n_qps_lock; + u32 n_srqs_allocated; /* number of SRQs allocated for device */ + spinlock_t n_srqs_lock; + u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ + spinlock_t n_mcast_grps_lock; +}; + +struct qib_verbs_counters { + u64 symbol_error_counter; + u64 link_error_recovery_counter; + u64 link_downed_counter; + u64 port_rcv_errors; + u64 port_rcv_remphys_errors; + u64 port_xmit_discards; + u64 port_xmit_data; + u64 port_rcv_data; + u64 port_xmit_packets; + u64 port_rcv_packets; + u32 local_link_integrity_errors; + u32 excessive_buffer_overrun_errors; + u32 vl15_dropped; +}; + +static inline struct qib_mr *to_imr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct qib_mr, ibmr); +} + +static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct qib_pd, ibpd); +} + +static inline struct qib_ah *to_iah(struct ib_ah *ibah) +{ + return container_of(ibah, struct qib_ah, ibah); +} + +static inline struct qib_cq *to_icq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct qib_cq, ibcq); +} + +static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct qib_srq, ibsrq); +} + +static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct qib_qp, ibqp); +} + +static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct qib_ibdev, ibdev); +} + +/* + * Send if not busy or waiting for I/O and either + * a RC response is pending or we can process send work requests. + */ +static inline int qib_send_ok(struct qib_qp *qp) +{ + return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && + (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || + !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); +} + +extern struct workqueue_struct *qib_wq; +extern struct workqueue_struct *qib_cq_wq; + +/* + * This must be called with s_lock held. + */ +static inline void qib_schedule_send(struct qib_qp *qp) +{ + if (qib_send_ok(qp)) { + if (qp->processor_id == smp_processor_id()) + queue_work(qib_wq, &qp->s_work); + else + queue_work_on(qp->processor_id, + qib_wq, &qp->s_work); + } +} + +static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) +{ + u16 p1 = pkey1 & 0x7FFF; + u16 p2 = pkey2 & 0x7FFF; + + /* + * Low 15 bits must be non-zero and match, and + * one of the two must be a full member. + */ + return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); +} + +void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, + u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); +void qib_cap_mask_chg(struct qib_ibport *ibp); +void qib_sys_guid_chg(struct qib_ibport *ibp); +void qib_node_desc_chg(struct qib_ibport *ibp); +int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad); +int qib_create_agents(struct qib_ibdev *dev); +void qib_free_agents(struct qib_ibdev *dev); + +/* + * Compare the lower 24 bits of the two values. + * Returns an integer <, ==, or > than zero. + */ +static inline int qib_cmp24(u32 a, u32 b) +{ + return (((int) a) - ((int) b)) << 8; +} + +struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); + +int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, + u64 *rwords, u64 *spkts, u64 *rpkts, + u64 *xmit_wait); + +int qib_get_counters(struct qib_pportdata *ppd, + struct qib_verbs_counters *cntrs); + +int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); + +int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); + +int qib_mcast_tree_empty(struct qib_ibport *ibp); + +__be32 qib_compute_aeth(struct qib_qp *qp); + +struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); + +struct ib_qp *qib_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +int qib_destroy_qp(struct ib_qp *ibqp); + +int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); + +int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); + +int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_qp_init_attr *init_attr); + +unsigned qib_free_all_qps(struct qib_devdata *dd); + +void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); + +void qib_free_qpn_table(struct qib_qpn_table *qpt); + +void qib_get_credit(struct qib_qp *qp, u32 aeth); + +unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); + +void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); + +void qib_put_txreq(struct qib_verbs_txreq *tx); + +int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct qib_sge_state *ss, u32 len); + +void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, + int release); + +void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); + +void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp); + +void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp); + +int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); + +void qib_rc_rnr_retry(unsigned long arg); + +void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); + +void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); + +int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); + +void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, void *data, u32 tlen, struct qib_qp *qp); + +int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); + +int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); + +int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, + struct qib_sge *isge, struct ib_sge *sge, int acc); + +int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, + u32 len, u64 vaddr, u32 rkey, int acc); + +int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); + +struct ib_srq *qib_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); + +int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, + struct ib_udata *udata); + +int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); + +int qib_destroy_srq(struct ib_srq *ibsrq); + +void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); + +int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); + +struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, + int comp_vector, struct ib_ucontext *context, + struct ib_udata *udata); + +int qib_destroy_cq(struct ib_cq *ibcq); + +int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); + +int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); + +struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); + +struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 *iova_start); + +struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata); + +int qib_dereg_mr(struct ib_mr *ibmr); + +struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); + +struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( + struct ib_device *ibdev, int page_list_len); + +void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); + +int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); + +struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + +int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova); + +int qib_unmap_fmr(struct list_head *fmr_list); + +int qib_dealloc_fmr(struct ib_fmr *ibfmr); + +void qib_release_mmap_info(struct kref *ref); + +struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, + struct ib_ucontext *context, + void *obj); + +void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, + u32 size, void *obj); + +int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); + +void qib_migrate_qp(struct qib_qp *qp); + +int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, + int has_grh, struct qib_qp *qp, u32 bth0); + +u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, + struct ib_global_route *grh, u32 hwords, u32 nwords); + +void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, + u32 bth0, u32 bth2); + +void qib_do_send(struct work_struct *work); + +void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, + enum ib_wc_status status); + +void qib_send_rc_ack(struct qib_qp *qp); + +int qib_make_rc_req(struct qib_qp *qp); + +int qib_make_uc_req(struct qib_qp *qp); + +int qib_make_ud_req(struct qib_qp *qp); + +int qib_register_ib_device(struct qib_devdata *); + +void qib_unregister_ib_device(struct qib_devdata *); + +void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32); + +void qib_ib_piobufavail(struct qib_devdata *); + +unsigned qib_get_npkeys(struct qib_devdata *); + +unsigned qib_get_pkey(struct qib_ibport *, unsigned); + +extern const enum ib_wc_opcode ib_qib_wc_opcode[]; + +/* + * Below HCA-independent IB PhysPortState values, returned + * by the f_ibphys_portstate() routine. + */ +#define IB_PHYSPORTSTATE_SLEEP 1 +#define IB_PHYSPORTSTATE_POLL 2 +#define IB_PHYSPORTSTATE_DISABLED 3 +#define IB_PHYSPORTSTATE_CFG_TRAIN 4 +#define IB_PHYSPORTSTATE_LINKUP 5 +#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 +#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8 +#define IB_PHYSPORTSTATE_CFG_IDLE 0xB +#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC +#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE +#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF +#define IB_PHYSPORTSTATE_CFG_ENH 0x10 +#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 + +extern const int ib_qib_state_ops[]; + +extern __be64 ib_qib_sys_image_guid; /* in network order */ + +extern unsigned int ib_qib_lkey_table_size; + +extern unsigned int ib_qib_max_cqes; + +extern unsigned int ib_qib_max_cqs; + +extern unsigned int ib_qib_max_qp_wrs; + +extern unsigned int ib_qib_max_qps; + +extern unsigned int ib_qib_max_sges; + +extern unsigned int ib_qib_max_mcast_grps; + +extern unsigned int ib_qib_max_mcast_qp_attached; + +extern unsigned int ib_qib_max_srqs; + +extern unsigned int ib_qib_max_srq_sges; + +extern unsigned int ib_qib_max_srq_wrs; + +extern const u32 ib_qib_rnr_table[]; + +extern struct ib_dma_mapping_ops qib_dma_mapping_ops; + +#endif /* QIB_VERBS_H */ diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c new file mode 100644 index 000000000000..dabb697b1c2a --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/rculist.h> + +#include "qib.h" + +/** + * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct + * @qp: the QP to link + */ +static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) +{ + struct qib_mcast_qp *mqp; + + mqp = kmalloc(sizeof *mqp, GFP_KERNEL); + if (!mqp) + goto bail; + + mqp->qp = qp; + atomic_inc(&qp->refcount); + +bail: + return mqp; +} + +static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) +{ + struct qib_qp *qp = mqp->qp; + + /* Notify qib_destroy_qp() if it is waiting. */ + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + + kfree(mqp); +} + +/** + * qib_mcast_alloc - allocate the multicast GID structure + * @mgid: the multicast GID + * + * A list of QPs will be attached to this structure. + */ +static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) +{ + struct qib_mcast *mcast; + + mcast = kmalloc(sizeof *mcast, GFP_KERNEL); + if (!mcast) + goto bail; + + mcast->mgid = *mgid; + INIT_LIST_HEAD(&mcast->qp_list); + init_waitqueue_head(&mcast->wait); + atomic_set(&mcast->refcount, 0); + mcast->n_attached = 0; + +bail: + return mcast; +} + +static void qib_mcast_free(struct qib_mcast *mcast) +{ + struct qib_mcast_qp *p, *tmp; + + list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) + qib_mcast_qp_free(p); + + kfree(mcast); +} + +/** + * qib_mcast_find - search the global table for the given multicast GID + * @ibp: the IB port structure + * @mgid: the multicast GID to search for + * + * Returns NULL if not found. + * + * The caller is responsible for decrementing the reference count if found. + */ +struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) +{ + struct rb_node *n; + unsigned long flags; + struct qib_mcast *mcast; + + spin_lock_irqsave(&ibp->lock, flags); + n = ibp->mcast_tree.rb_node; + while (n) { + int ret; + + mcast = rb_entry(n, struct qib_mcast, rb_node); + + ret = memcmp(mgid->raw, mcast->mgid.raw, + sizeof(union ib_gid)); + if (ret < 0) + n = n->rb_left; + else if (ret > 0) + n = n->rb_right; + else { + atomic_inc(&mcast->refcount); + spin_unlock_irqrestore(&ibp->lock, flags); + goto bail; + } + } + spin_unlock_irqrestore(&ibp->lock, flags); + + mcast = NULL; + +bail: + return mcast; +} + +/** + * qib_mcast_add - insert mcast GID into table and attach QP struct + * @mcast: the mcast GID table + * @mqp: the QP to attach + * + * Return zero if both were added. Return EEXIST if the GID was already in + * the table but the QP was added. Return ESRCH if the QP was already + * attached and neither structure was added. + */ +static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, + struct qib_mcast *mcast, struct qib_mcast_qp *mqp) +{ + struct rb_node **n = &ibp->mcast_tree.rb_node; + struct rb_node *pn = NULL; + int ret; + + spin_lock_irq(&ibp->lock); + + while (*n) { + struct qib_mcast *tmcast; + struct qib_mcast_qp *p; + + pn = *n; + tmcast = rb_entry(pn, struct qib_mcast, rb_node); + + ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, + sizeof(union ib_gid)); + if (ret < 0) { + n = &pn->rb_left; + continue; + } + if (ret > 0) { + n = &pn->rb_right; + continue; + } + + /* Search the QP list to see if this is already there. */ + list_for_each_entry_rcu(p, &tmcast->qp_list, list) { + if (p->qp == mqp->qp) { + ret = ESRCH; + goto bail; + } + } + if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) { + ret = ENOMEM; + goto bail; + } + + tmcast->n_attached++; + + list_add_tail_rcu(&mqp->list, &tmcast->qp_list); + ret = EEXIST; + goto bail; + } + + spin_lock(&dev->n_mcast_grps_lock); + if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) { + spin_unlock(&dev->n_mcast_grps_lock); + ret = ENOMEM; + goto bail; + } + + dev->n_mcast_grps_allocated++; + spin_unlock(&dev->n_mcast_grps_lock); + + mcast->n_attached++; + + list_add_tail_rcu(&mqp->list, &mcast->qp_list); + + atomic_inc(&mcast->refcount); + rb_link_node(&mcast->rb_node, pn, n); + rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); + + ret = 0; + +bail: + spin_unlock_irq(&ibp->lock); + + return ret; +} + +int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct qib_qp *qp = to_iqp(ibqp); + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_ibport *ibp; + struct qib_mcast *mcast; + struct qib_mcast_qp *mqp; + int ret; + + if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { + ret = -EINVAL; + goto bail; + } + + /* + * Allocate data structures since its better to do this outside of + * spin locks and it will most likely be needed. + */ + mcast = qib_mcast_alloc(gid); + if (mcast == NULL) { + ret = -ENOMEM; + goto bail; + } + mqp = qib_mcast_qp_alloc(qp); + if (mqp == NULL) { + qib_mcast_free(mcast); + ret = -ENOMEM; + goto bail; + } + ibp = to_iport(ibqp->device, qp->port_num); + switch (qib_mcast_add(dev, ibp, mcast, mqp)) { + case ESRCH: + /* Neither was used: OK to attach the same QP twice. */ + qib_mcast_qp_free(mqp); + qib_mcast_free(mcast); + break; + + case EEXIST: /* The mcast wasn't used */ + qib_mcast_free(mcast); + break; + + case ENOMEM: + /* Exceeded the maximum number of mcast groups. */ + qib_mcast_qp_free(mqp); + qib_mcast_free(mcast); + ret = -ENOMEM; + goto bail; + + default: + break; + } + + ret = 0; + +bail: + return ret; +} + +int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct qib_qp *qp = to_iqp(ibqp); + struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); + struct qib_mcast *mcast = NULL; + struct qib_mcast_qp *p, *tmp; + struct rb_node *n; + int last = 0; + int ret; + + if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { + ret = -EINVAL; + goto bail; + } + + spin_lock_irq(&ibp->lock); + + /* Find the GID in the mcast table. */ + n = ibp->mcast_tree.rb_node; + while (1) { + if (n == NULL) { + spin_unlock_irq(&ibp->lock); + ret = -EINVAL; + goto bail; + } + + mcast = rb_entry(n, struct qib_mcast, rb_node); + ret = memcmp(gid->raw, mcast->mgid.raw, + sizeof(union ib_gid)); + if (ret < 0) + n = n->rb_left; + else if (ret > 0) + n = n->rb_right; + else + break; + } + + /* Search the QP list. */ + list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { + if (p->qp != qp) + continue; + /* + * We found it, so remove it, but don't poison the forward + * link until we are sure there are no list walkers. + */ + list_del_rcu(&p->list); + mcast->n_attached--; + + /* If this was the last attached QP, remove the GID too. */ + if (list_empty(&mcast->qp_list)) { + rb_erase(&mcast->rb_node, &ibp->mcast_tree); + last = 1; + } + break; + } + + spin_unlock_irq(&ibp->lock); + + if (p) { + /* + * Wait for any list walkers to finish before freeing the + * list element. + */ + wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); + qib_mcast_qp_free(p); + } + if (last) { + atomic_dec(&mcast->refcount); + wait_event(mcast->wait, !atomic_read(&mcast->refcount)); + qib_mcast_free(mcast); + spin_lock_irq(&dev->n_mcast_grps_lock); + dev->n_mcast_grps_allocated--; + spin_unlock_irq(&dev->n_mcast_grps_lock); + } + + ret = 0; + +bail: + return ret; +} + +int qib_mcast_tree_empty(struct qib_ibport *ibp) +{ + return ibp->mcast_tree.rb_node == NULL; +} diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c index 74fa5cc5131d..673cf4c22ebd 100644 --- a/drivers/infiniband/hw/ipath/ipath_7220.h +++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c @@ -1,7 +1,5 @@ -#ifndef _IPATH_7220_H -#define _IPATH_7220_H /* - * Copyright (c) 2007 QLogic Corporation. All rights reserved. + * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -33,25 +31,32 @@ */ /* - * This header file provides the declarations and common definitions - * for (mostly) manipulation of the SerDes blocks within the IBA7220. - * the functions declared should only be called from within other - * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c. + * This file is conditionally built on PowerPC only. Otherwise weak symbol + * versions of the functions exported from here are used. */ -int ipath_sd7220_presets(struct ipath_devdata *dd); -int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset); -int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img, - int len, int offset); -int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img, - int len, int offset); -/* - * Below used for sdnum parameter, selecting one of the two sections - * used for PCIe, or the single SerDes used for IB, which is the - * only one currently used - */ -#define IB_7220_SERDES 2 -int ipath_sd7220_ib_load(struct ipath_devdata *dd); -int ipath_sd7220_ib_vfy(struct ipath_devdata *dd); +#include "qib.h" -#endif /* _IPATH_7220_H */ +/** + * qib_enable_wc - enable write combining for MMIO writes to the device + * @dd: qlogic_ib device + * + * Nothing to do on PowerPC, so just return without error. + */ +int qib_enable_wc(struct qib_devdata *dd) +{ + return 0; +} + +/** + * qib_unordered_wc - indicate whether write combining is unordered + * + * Because our performance depends on our ability to do write + * combining mmio writes in the most efficient way, we need to + * know if we are on a processor that may reorder stores when + * write combining. + */ +int qib_unordered_wc(void) +{ + return 1; +} diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c new file mode 100644 index 000000000000..561b8bca4060 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file is conditionally built on x86_64 only. Otherwise weak symbol + * versions of the functions exported from here are used. + */ + +#include <linux/pci.h> +#include <asm/mtrr.h> +#include <asm/processor.h> + +#include "qib.h" + +/** + * qib_enable_wc - enable write combining for MMIO writes to the device + * @dd: qlogic_ib device + * + * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable + * write combining. + */ +int qib_enable_wc(struct qib_devdata *dd) +{ + int ret = 0; + u64 pioaddr, piolen; + unsigned bits; + const unsigned long addr = pci_resource_start(dd->pcidev, 0); + const size_t len = pci_resource_len(dd->pcidev, 0); + + /* + * Set the PIO buffers to be WCCOMB, so we get HT bursts to the + * chip. Linux (possibly the hardware) requires it to be on a power + * of 2 address matching the length (which has to be a power of 2). + * For rev1, that means the base address, for rev2, it will be just + * the PIO buffers themselves. + * For chips with two sets of buffers, the calculations are + * somewhat more complicated; we need to sum, and the piobufbase + * register has both offsets, 2K in low 32 bits, 4K in high 32 bits. + * The buffers are still packed, so a single range covers both. + */ + if (dd->piobcnt2k && dd->piobcnt4k) { + /* 2 sizes for chip */ + unsigned long pio2kbase, pio4kbase; + pio2kbase = dd->piobufbase & 0xffffffffUL; + pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; + if (pio2kbase < pio4kbase) { + /* all current chips */ + pioaddr = addr + pio2kbase; + piolen = pio4kbase - pio2kbase + + dd->piobcnt4k * dd->align4k; + } else { + pioaddr = addr + pio4kbase; + piolen = pio2kbase - pio4kbase + + dd->piobcnt2k * dd->palign; + } + } else { /* single buffer size (2K, currently) */ + pioaddr = addr + dd->piobufbase; + piolen = dd->piobcnt2k * dd->palign + + dd->piobcnt4k * dd->align4k; + } + + for (bits = 0; !(piolen & (1ULL << bits)); bits++) + /* do nothing */ ; + + if (piolen != (1ULL << bits)) { + piolen >>= bits; + while (piolen >>= 1) + bits++; + piolen = 1ULL << (bits + 1); + } + if (pioaddr & (piolen - 1)) { + u64 atmp; + atmp = pioaddr & ~(piolen - 1); + if (atmp < addr || (atmp + piolen) > (addr + len)) { + qib_dev_err(dd, "No way to align address/size " + "(%llx/%llx), no WC mtrr\n", + (unsigned long long) atmp, + (unsigned long long) piolen << 1); + ret = -ENODEV; + } else { + pioaddr = atmp; + piolen <<= 1; + } + } + + if (!ret) { + int cookie; + + cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); + if (cookie < 0) { + { + qib_devinfo(dd->pcidev, + "mtrr_add() WC for PIO bufs " + "failed (%d)\n", + cookie); + ret = -EINVAL; + } + } else { + dd->wc_cookie = cookie; + dd->wc_base = (unsigned long) pioaddr; + dd->wc_len = (unsigned long) piolen; + } + } + + return ret; +} + +/** + * qib_disable_wc - disable write combining for MMIO writes to the device + * @dd: qlogic_ib device + */ +void qib_disable_wc(struct qib_devdata *dd) +{ + if (dd->wc_cookie) { + int r; + + r = mtrr_del(dd->wc_cookie, dd->wc_base, + dd->wc_len); + if (r < 0) + qib_devinfo(dd->pcidev, + "mtrr_del(%lx, %lx, %lx) failed: %d\n", + dd->wc_cookie, dd->wc_base, + dd->wc_len, r); + dd->wc_cookie = 0; /* even on failure */ + } +} + +/** + * qib_unordered_wc - indicate whether write combining is ordered + * + * Because our performance depends on our ability to do write combining mmio + * writes in the most efficient way, we need to know if we are on an Intel + * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in + * the order completed, and so no special flushing is required to get + * correct ordering. Intel processors, however, will flush write buffers + * out in "random" orders, and so explicit ordering is needed at times. + */ +int qib_unordered_wc(void) +{ + return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; +} diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 423e0e6031ab..34157bb97ed6 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -47,15 +47,15 @@ struct joydev { struct mutex mutex; struct device dev; - struct js_corr corr[ABS_MAX + 1]; + struct js_corr corr[ABS_CNT]; struct JS_DATA_SAVE_TYPE glue; int nabs; int nkey; __u16 keymap[KEY_MAX - BTN_MISC + 1]; __u16 keypam[KEY_MAX - BTN_MISC + 1]; - __u8 absmap[ABS_MAX + 1]; - __u8 abspam[ABS_MAX + 1]; - __s16 abs[ABS_MAX + 1]; + __u8 absmap[ABS_CNT]; + __u8 abspam[ABS_CNT]; + __s16 abs[ABS_CNT]; }; struct joydev_client { @@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, joydev->handle.handler = handler; joydev->handle.private = joydev; - for (i = 0; i < ABS_MAX + 1; i++) + for (i = 0; i < ABS_CNT; i++) if (test_bit(i, dev->absbit)) { joydev->absmap[i] = joydev->nabs; joydev->abspam[joydev->nabs] = i; diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c index 35149ec455a9..79172af164f2 100644 --- a/drivers/input/keyboard/amikbd.c +++ b/drivers/input/keyboard/amikbd.c @@ -35,6 +35,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/keyboard.h> +#include <linux/platform_device.h> #include <asm/amigaints.h> #include <asm/amigahw.h> @@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = { [7] = KERN_WARNING "amikbd: keyboard interrupt\n" }; -static struct input_dev *amikbd_dev; - -static irqreturn_t amikbd_interrupt(int irq, void *dummy) +static irqreturn_t amikbd_interrupt(int irq, void *data) { + struct input_dev *dev = data; unsigned char scancode, down; scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */ @@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy) if (scancode < 0x78) { /* scancodes < 0x78 are keys */ if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */ - input_report_key(amikbd_dev, scancode, 1); - input_report_key(amikbd_dev, scancode, 0); + input_report_key(dev, scancode, 1); + input_report_key(dev, scancode, 0); } else { - input_report_key(amikbd_dev, scancode, down); + input_report_key(dev, scancode, down); } - input_sync(amikbd_dev); + input_sync(dev); } else /* scancodes >= 0x78 are error codes */ printk(amikbd_messages[scancode - 0x78]); return IRQ_HANDLED; } -static int __init amikbd_init(void) +static int __init amikbd_probe(struct platform_device *pdev) { + struct input_dev *dev; int i, j, err; - if (!AMIGAHW_PRESENT(AMI_KEYBOARD)) - return -ENODEV; - - if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb")) - return -EBUSY; - - amikbd_dev = input_allocate_device(); - if (!amikbd_dev) { - printk(KERN_ERR "amikbd: not enough memory for input device\n"); - err = -ENOMEM; - goto fail1; + dev = input_allocate_device(); + if (!dev) { + dev_err(&pdev->dev, "Not enough memory for input device\n"); + return -ENOMEM; } - amikbd_dev->name = "Amiga Keyboard"; - amikbd_dev->phys = "amikbd/input0"; - amikbd_dev->id.bustype = BUS_AMIGA; - amikbd_dev->id.vendor = 0x0001; - amikbd_dev->id.product = 0x0001; - amikbd_dev->id.version = 0x0100; + dev->name = pdev->name; + dev->phys = "amikbd/input0"; + dev->id.bustype = BUS_AMIGA; + dev->id.vendor = 0x0001; + dev->id.product = 0x0001; + dev->id.version = 0x0100; + dev->dev.parent = &pdev->dev; - amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); for (i = 0; i < 0x78; i++) - set_bit(i, amikbd_dev->keybit); + set_bit(i, dev->keybit); for (i = 0; i < MAX_NR_KEYMAPS; i++) { static u_short temp_map[NR_KEYS] __initdata; @@ -229,30 +224,54 @@ static int __init amikbd_init(void) memcpy(key_maps[i], temp_map, sizeof(temp_map)); } ciaa.cra &= ~0x41; /* serial data in, turn off TA */ - if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", - amikbd_interrupt)) { - err = -EBUSY; + err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", + dev); + if (err) goto fail2; - } - err = input_register_device(amikbd_dev); + err = input_register_device(dev); if (err) goto fail3; + platform_set_drvdata(pdev, dev); + return 0; - fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); - fail2: input_free_device(amikbd_dev); - fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100); + fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev); + fail2: input_free_device(dev); return err; } -static void __exit amikbd_exit(void) +static int __exit amikbd_remove(struct platform_device *pdev) +{ + struct input_dev *dev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + free_irq(IRQ_AMIGA_CIAA_SP, dev); + input_unregister_device(dev); + return 0; +} + +static struct platform_driver amikbd_driver = { + .remove = __exit_p(amikbd_remove), + .driver = { + .name = "amiga-keyboard", + .owner = THIS_MODULE, + }, +}; + +static int __init amikbd_init(void) { - free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); - input_unregister_device(amikbd_dev); - release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100); + return platform_driver_probe(&amikbd_driver, amikbd_probe); } module_init(amikbd_init); + +static void __exit amikbd_exit(void) +{ + platform_driver_unregister(&amikbd_driver); +} + module_exit(amikbd_exit); + +MODULE_ALIAS("platform:amiga-keyboard"); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 48cdabec372a..c44b9eafc556 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -80,6 +80,16 @@ config INPUT_M68K_BEEP tristate "M68k Beeper support" depends on M68K +config INPUT_MAX8925_ONKEY + tristate "MAX8925 ONKEY support" + depends on MFD_MAX8925 + help + Support the ONKEY of MAX8925 PMICs as an input device + reporting power button status. + + To compile this driver as a module, choose M here: the module + will be called max8925_onkey. + config INPUT_APANEL tristate "Fujitsu Lifebook Application Panel buttons" depends on X86 && I2C && LEDS_CLASS diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index f9f577031e06..71fe57d8023f 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o +obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index ad730e15afc0..e00a1cc79c0a 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -43,6 +43,7 @@ #include <linux/proc_fs.h> #include <linux/poll.h> #include <linux/rtc.h> +#include <linux/smp_lock.h> #include <linux/semaphore.h> MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>"); @@ -64,8 +65,8 @@ static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait); static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); -static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg); +static long hp_sdc_rtc_unlocked_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait); @@ -512,7 +513,7 @@ static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off, return len; } -static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file, +static int hp_sdc_rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { #if 1 @@ -659,14 +660,27 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file, #endif } +static long hp_sdc_rtc_unlocked_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = hp_sdc_rtc_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + + static const struct file_operations hp_sdc_rtc_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = hp_sdc_rtc_read, - .poll = hp_sdc_rtc_poll, - .ioctl = hp_sdc_rtc_ioctl, - .open = hp_sdc_rtc_open, - .fasync = hp_sdc_rtc_fasync, + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = hp_sdc_rtc_read, + .poll = hp_sdc_rtc_poll, + .unlocked_ioctl = hp_sdc_rtc_ioctl, + .open = hp_sdc_rtc_open, + .fasync = hp_sdc_rtc_fasync, }; static struct miscdevice hp_sdc_rtc_dev = { diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c new file mode 100644 index 000000000000..80af44608018 --- /dev/null +++ b/drivers/input/misc/max8925_onkey.c @@ -0,0 +1,148 @@ +/** + * max8925_onkey.c - MAX8925 ONKEY driver + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/mfd/max8925.h> +#include <linux/slab.h> + +#define HARDRESET_EN (1 << 7) +#define PWREN_EN (1 << 7) + +struct max8925_onkey_info { + struct input_dev *idev; + struct i2c_client *i2c; + int irq; +}; + +/* + * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds. + * max8925_set_bits() operates I2C bus and may sleep. So implement + * it in thread IRQ handler. + */ +static irqreturn_t max8925_onkey_handler(int irq, void *data) +{ + struct max8925_onkey_info *info = data; + + input_report_key(info->idev, KEY_POWER, 1); + input_sync(info->idev); + + /* Enable hardreset to halt if system isn't shutdown on time */ + max8925_set_bits(info->i2c, MAX8925_SYSENSEL, + HARDRESET_EN, HARDRESET_EN); + + return IRQ_HANDLED; +} + +static int __devinit max8925_onkey_probe(struct platform_device *pdev) +{ + struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); + struct max8925_onkey_info *info; + int error; + + info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->i2c = chip->i2c; + info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC; + + info->idev = input_allocate_device(); + if (!info->idev) { + dev_err(chip->dev, "Failed to allocate input dev\n"); + error = -ENOMEM; + goto out_input; + } + + info->idev->name = "max8925_on"; + info->idev->phys = "max8925_on/input0"; + info->idev->id.bustype = BUS_I2C; + info->idev->dev.parent = &pdev->dev; + info->idev->evbit[0] = BIT_MASK(EV_KEY); + info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); + + error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler, + IRQF_ONESHOT, "onkey", info); + if (error < 0) { + dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", + info->irq, error); + goto out_irq; + } + + error = input_register_device(info->idev); + if (error) { + dev_err(chip->dev, "Can't register input device: %d\n", error); + goto out; + } + + platform_set_drvdata(pdev, info); + + return 0; + +out: + free_irq(info->irq, info); +out_irq: + input_free_device(info->idev); +out_input: + kfree(info); + return error; +} + +static int __devexit max8925_onkey_remove(struct platform_device *pdev) +{ + struct max8925_onkey_info *info = platform_get_drvdata(pdev); + + free_irq(info->irq, info); + input_unregister_device(info->idev); + kfree(info); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver max8925_onkey_driver = { + .driver = { + .name = "max8925-onkey", + .owner = THIS_MODULE, + }, + .probe = max8925_onkey_probe, + .remove = __devexit_p(max8925_onkey_remove), +}; + +static int __init max8925_onkey_init(void) +{ + return platform_driver_register(&max8925_onkey_driver); +} +module_init(max8925_onkey_init); + +static void __exit max8925_onkey_exit(void) +{ + platform_driver_unregister(&max8925_onkey_driver); +} +module_exit(max8925_onkey_exit); + +MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver"); +MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 0d45422f8095..1dacae4b43f0 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -259,8 +259,11 @@ static const struct of_device_id bbc_beep_match[] = { }; static struct of_platform_driver bbc_beep_driver = { - .name = "bbcbeep", - .match_table = bbc_beep_match, + .driver = { + .name = "bbcbeep", + .owner = THIS_MODULE, + .of_match_table = bbc_beep_match, + }, .probe = bbc_beep_probe, .remove = __devexit_p(bbc_remove), .shutdown = sparcspkr_shutdown, @@ -338,8 +341,11 @@ static const struct of_device_id grover_beep_match[] = { }; static struct of_platform_driver grover_beep_driver = { - .name = "groverbeep", - .match_table = grover_beep_match, + .driver = { + .name = "groverbeep", + .owner = THIS_MODULE, + .of_match_table = grover_beep_match, + }, .probe = grover_beep_probe, .remove = __devexit_p(grover_remove), .shutdown = sparcspkr_shutdown, diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fee9eac8e04a..4f9b2afc24e8 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info) twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); - twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER); twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); + twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER); info->enabled = false; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 1477466076ad..b71eb55f2dbc 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev) unsigned int cnt; int retval = 0; - for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { + for (cnt = 0; cnt < ABS_CNT; cnt++) { if (!test_bit(cnt, dev->absbit)) continue; @@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - size = sizeof(int) * (ABS_MAX + 1); + size = sizeof(int) * ABS_CNT; memcpy(dev->absmax, user_dev->absmax, size); memcpy(dev->absmin, user_dev->absmin, size); memcpy(dev->absfuzz, user_dev->absfuzz, size); diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c index a185ac78a42c..ff5f61a0fd3a 100644 --- a/drivers/input/mouse/amimouse.c +++ b/drivers/input/mouse/amimouse.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> #include <asm/irq.h> #include <asm/setup.h> @@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver"); MODULE_LICENSE("GPL"); static int amimouse_lastx, amimouse_lasty; -static struct input_dev *amimouse_dev; -static irqreturn_t amimouse_interrupt(int irq, void *dummy) +static irqreturn_t amimouse_interrupt(int irq, void *data) { + struct input_dev *dev = data; unsigned short joy0dat, potgor; int nx, ny, dx, dy; @@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy) potgor = amiga_custom.potgor; - input_report_rel(amimouse_dev, REL_X, dx); - input_report_rel(amimouse_dev, REL_Y, dy); + input_report_rel(dev, REL_X, dx); + input_report_rel(dev, REL_Y, dy); - input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40); - input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100); - input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400); + input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40); + input_report_key(dev, BTN_MIDDLE, potgor & 0x0100); + input_report_key(dev, BTN_RIGHT, potgor & 0x0400); - input_sync(amimouse_dev); + input_sync(dev); return IRQ_HANDLED; } @@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy) static int amimouse_open(struct input_dev *dev) { unsigned short joy0dat; + int error; joy0dat = amiga_custom.joy0dat; amimouse_lastx = joy0dat & 0xff; amimouse_lasty = joy0dat >> 8; - if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) { - printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB); - return -EBUSY; - } + error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", + dev); + if (error) + dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB); - return 0; + return error; } static void amimouse_close(struct input_dev *dev) { - free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt); + free_irq(IRQ_AMIGA_VERTB, dev); } -static int __init amimouse_init(void) +static int __init amimouse_probe(struct platform_device *pdev) { int err; + struct input_dev *dev; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE)) - return -ENODEV; - - amimouse_dev = input_allocate_device(); - if (!amimouse_dev) + dev = input_allocate_device(); + if (!dev) return -ENOMEM; - amimouse_dev->name = "Amiga mouse"; - amimouse_dev->phys = "amimouse/input0"; - amimouse_dev->id.bustype = BUS_AMIGA; - amimouse_dev->id.vendor = 0x0001; - amimouse_dev->id.product = 0x0002; - amimouse_dev->id.version = 0x0100; + dev->name = pdev->name; + dev->phys = "amimouse/input0"; + dev->id.bustype = BUS_AMIGA; + dev->id.vendor = 0x0001; + dev->id.product = 0x0002; + dev->id.version = 0x0100; - amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); - amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); - amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | + dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); + dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); - amimouse_dev->open = amimouse_open; - amimouse_dev->close = amimouse_close; + dev->open = amimouse_open; + dev->close = amimouse_close; + dev->dev.parent = &pdev->dev; - err = input_register_device(amimouse_dev); + err = input_register_device(dev); if (err) { - input_free_device(amimouse_dev); + input_free_device(dev); return err; } + platform_set_drvdata(pdev, dev); + return 0; } -static void __exit amimouse_exit(void) +static int __exit amimouse_remove(struct platform_device *pdev) { - input_unregister_device(amimouse_dev); + struct input_dev *dev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + input_unregister_device(dev); + return 0; +} + +static struct platform_driver amimouse_driver = { + .remove = __exit_p(amimouse_remove), + .driver = { + .name = "amiga-mouse", + .owner = THIS_MODULE, + }, +}; + +static int __init amimouse_init(void) +{ + return platform_driver_probe(&amimouse_driver, amimouse_probe); } module_init(amimouse_init); + +static void __exit amimouse_exit(void) +{ + platform_driver_unregister(&amimouse_driver); +} + module_exit(amimouse_exit); + +MODULE_ALIAS("platform:amiga-mouse"); diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index 5071af2c0604..04e32f2d1241 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -51,7 +51,7 @@ static inline void i8042_write_command(int val) static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; dp = dp->child; while (dp) { @@ -96,8 +96,11 @@ static const struct of_device_id sparc_i8042_match[] = { MODULE_DEVICE_TABLE(of, sparc_i8042_match); static struct of_platform_driver sparc_i8042_driver = { - .name = "i8042", - .match_table = sparc_i8042_match, + .driver = { + .name = "i8042", + .owner = THIS_MODULE, + .of_match_table = sparc_i8042_match, + }, .probe = sparc_i8042_probe, .remove = __devexit_p(sparc_i8042_remove), }; diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index f84f8e32e3f1..e2c028d2638f 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c @@ -244,17 +244,17 @@ static int __devinit xps2_of_probe(struct of_device *ofdev, int error; dev_info(dev, "Device Tree Probing \'%s\'\n", - ofdev->node->name); + ofdev->dev.of_node->name); /* Get iospace for the device */ - error = of_address_to_resource(ofdev->node, 0, &r_mem); + error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); if (error) { dev_err(dev, "invalid address\n"); return error; } /* Get IRQ for the device */ - if (of_irq_to_resource(ofdev->node, 0, &r_irq) == NO_IRQ) { + if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) { dev_err(dev, "no IRQ found\n"); return -ENODEV; } @@ -342,7 +342,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev) iounmap(drvdata->base_address); /* Get iospace of the device */ - if (of_address_to_resource(of_dev->node, 0, &r_mem)) + if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem)) dev_err(dev, "invalid address\n"); else release_mem_region(r_mem.start, resource_size(&r_mem)); @@ -362,8 +362,11 @@ static const struct of_device_id xps2_of_match[] __devinitconst = { MODULE_DEVICE_TABLE(of, xps2_of_match); static struct of_platform_driver xps2_of_driver = { - .name = DRIVER_NAME, - .match_table = xps2_of_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = xps2_of_match, + }, .probe = xps2_of_probe, .remove = __devexit_p(xps2_of_remove), }; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index b9f58ca82fd1..6703c6b9800a 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -590,4 +590,17 @@ config TOUCHSCREEN_PCAP To compile this driver as a module, choose M here: the module will be called pcap_ts. + +config TOUCHSCREEN_TPS6507X + tristate "TPS6507x based touchscreens" + depends on I2C + help + Say Y here if you have a TPS6507x based touchscreen + controller. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called tps6507x_ts. + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 8ad36eef90a2..497964a7a214 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o +obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 532279cda0e4..634f6f6b9b13 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi) ts->reg = regulator_get(&spi->dev, "vcc"); if (IS_ERR(ts->reg)) { - dev_err(&spi->dev, "unable to get regulator: %ld\n", - PTR_ERR(ts->reg)); + err = PTR_ERR(ts->reg); + dev_err(&spi->dev, "unable to get regulator: %ld\n", err); goto err_free_gpio; } diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c index e0b7c834111d..ac5d0f9b0cb1 100644 --- a/drivers/input/touchscreen/s3c2410_ts.c +++ b/drivers/input/touchscreen/s3c2410_ts.c @@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = { #endif static struct platform_device_id s3cts_driver_ids[] = { + { "s3c2410-ts", 0 }, + { "s3c2440-ts", 0 }, { "s3c64xx-ts", FEAT_PEN_IRQ }, { } }; diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c new file mode 100644 index 000000000000..5de80a1a730b --- /dev/null +++ b/drivers/input/touchscreen/tps6507x-ts.c @@ -0,0 +1,400 @@ +/* + * drivers/input/touchscreen/tps6507x_ts.c + * + * Touchscreen driver for the tps6507x chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * Credits: + * + * Using code from tsc2007, MtekVision Co., Ltd. + * + * For licencing details see kernel-base/COPYING + * + * TPS65070, TPS65073, TPS650731, and TPS650732 support + * 10 bit touch screen interface. + */ + +#include <linux/module.h> +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/platform_device.h> +#include <linux/mfd/tps6507x.h> +#include <linux/input/tps6507x-ts.h> +#include <linux/delay.h> + +#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */ +#define TPS_DEFAULT_MIN_PRESSURE 0x30 +#define MAX_10BIT ((1 << 10) - 1) + +#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \ + TPS6507X_ADCONFIG_START_CONVERSION | \ + TPS6507X_ADCONFIG_INPUT_REAL_TSC) +#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC) + +struct ts_event { + u16 x; + u16 y; + u16 pressure; +}; + +struct tps6507x_ts { + struct input_dev *input_dev; + struct device *dev; + char phys[32]; + struct workqueue_struct *wq; + struct delayed_work work; + unsigned polling; /* polling is active */ + struct ts_event tc; + struct tps6507x_dev *mfd; + u16 model; + unsigned pendown; + int irq; + void (*clear_penirq)(void); + unsigned long poll_period; /* ms */ + u16 min_pressure; + int vref; /* non-zero to leave vref on */ +}; + +static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data) +{ + int err; + + err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data); + + if (err) + return err; + + return 0; +} + +static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data) +{ + return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data); +} + +static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc, + u8 tsc_mode, u16 *value) +{ + s32 ret; + u8 adc_status; + u8 result; + + /* Route input signal to A/D converter */ + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode); + if (ret) { + dev_err(tsc->dev, "TSC mode read failed\n"); + goto err; + } + + /* Start A/D conversion */ + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, + TPS6507X_ADCONFIG_CONVERT_TS); + if (ret) { + dev_err(tsc->dev, "ADC config write failed\n"); + return ret; + } + + do { + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG, + &adc_status); + if (ret) { + dev_err(tsc->dev, "ADC config read failed\n"); + goto err; + } + } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION); + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result); + if (ret) { + dev_err(tsc->dev, "ADC result 2 read failed\n"); + goto err; + } + + *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8; + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result); + if (ret) { + dev_err(tsc->dev, "ADC result 1 read failed\n"); + goto err; + } + + *value |= result; + + dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value); + +err: + return ret; +} + +/* Need to call tps6507x_adc_standby() after using A/D converter for the + * touch screen interrupt to work properly. + */ + +static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc) +{ + s32 ret; + s32 loops = 0; + u8 val; + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, + TPS6507X_ADCONFIG_INPUT_TSC); + if (ret) + return ret; + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, + TPS6507X_TSCMODE_STANDBY); + if (ret) + return ret; + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); + if (ret) + return ret; + + while (val & TPS6507X_REG_TSC_INT) { + mdelay(10); + ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); + if (ret) + return ret; + loops++; + } + + return ret; +} + +static void tps6507x_ts_handler(struct work_struct *work) +{ + struct tps6507x_ts *tsc = container_of(work, + struct tps6507x_ts, work.work); + struct input_dev *input_dev = tsc->input_dev; + int pendown; + int schd; + int poll = 0; + s32 ret; + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE, + &tsc->tc.pressure); + if (ret) + goto done; + + pendown = tsc->tc.pressure > tsc->min_pressure; + + if (unlikely(!pendown && tsc->pendown)) { + dev_dbg(tsc->dev, "UP\n"); + input_report_key(input_dev, BTN_TOUCH, 0); + input_report_abs(input_dev, ABS_PRESSURE, 0); + input_sync(input_dev); + tsc->pendown = 0; + } + + if (pendown) { + + if (!tsc->pendown) { + dev_dbg(tsc->dev, "DOWN\n"); + input_report_key(input_dev, BTN_TOUCH, 1); + } else + dev_dbg(tsc->dev, "still down\n"); + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION, + &tsc->tc.x); + if (ret) + goto done; + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION, + &tsc->tc.y); + if (ret) + goto done; + + input_report_abs(input_dev, ABS_X, tsc->tc.x); + input_report_abs(input_dev, ABS_Y, tsc->tc.y); + input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure); + input_sync(input_dev); + tsc->pendown = 1; + poll = 1; + } + +done: + /* always poll if not using interrupts */ + poll = 1; + + if (poll) { + schd = queue_delayed_work(tsc->wq, &tsc->work, + tsc->poll_period * HZ / 1000); + if (schd) + tsc->polling = 1; + else { + tsc->polling = 0; + dev_err(tsc->dev, "re-schedule failed"); + } + } else + tsc->polling = 0; + + ret = tps6507x_adc_standby(tsc); +} + +static int tps6507x_ts_probe(struct platform_device *pdev) +{ + int error; + struct tps6507x_ts *tsc; + struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); + struct touchscreen_init_data *init_data; + struct input_dev *input_dev; + struct tps6507x_board *tps_board; + int schd; + + /** + * tps_board points to pmic related constants + * coming from the board-evm file. + */ + + tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data; + + if (!tps_board) { + dev_err(tps6507x_dev->dev, + "Could not find tps6507x platform data\n"); + return -EIO; + } + + /** + * init_data points to array of regulator_init structures + * coming from the board-evm file. + */ + + init_data = tps_board->tps6507x_ts_init_data; + + tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL); + if (!tsc) { + dev_err(tps6507x_dev->dev, "failed to allocate driver data\n"); + error = -ENOMEM; + goto err0; + } + + tps6507x_dev->ts = tsc; + tsc->mfd = tps6507x_dev; + tsc->dev = tps6507x_dev->dev; + input_dev = input_allocate_device(); + if (!input_dev) { + dev_err(tsc->dev, "Failed to allocate input device.\n"); + error = -ENOMEM; + goto err1; + } + + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + + input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0); + input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0); + + input_dev->name = "TPS6507x Touchscreen"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = tsc->dev; + + snprintf(tsc->phys, sizeof(tsc->phys), + "%s/input0", dev_name(tsc->dev)); + input_dev->phys = tsc->phys; + + dev_dbg(tsc->dev, "device: %s\n", input_dev->phys); + + input_set_drvdata(input_dev, tsc); + + tsc->input_dev = input_dev; + + INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); + tsc->wq = create_workqueue("TPS6507x Touchscreen"); + + if (init_data) { + tsc->poll_period = init_data->poll_period; + tsc->vref = init_data->vref; + tsc->min_pressure = init_data->min_pressure; + input_dev->id.vendor = init_data->vendor; + input_dev->id.product = init_data->product; + input_dev->id.version = init_data->version; + } else { + tsc->poll_period = TSC_DEFAULT_POLL_PERIOD; + tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE; + } + + error = tps6507x_adc_standby(tsc); + if (error) + goto err2; + + error = input_register_device(input_dev); + if (error) + goto err2; + + schd = queue_delayed_work(tsc->wq, &tsc->work, + tsc->poll_period * HZ / 1000); + + if (schd) + tsc->polling = 1; + else { + tsc->polling = 0; + dev_err(tsc->dev, "schedule failed"); + goto err2; + } + + return 0; + +err2: + cancel_delayed_work(&tsc->work); + flush_workqueue(tsc->wq); + destroy_workqueue(tsc->wq); + tsc->wq = 0; + input_free_device(input_dev); +err1: + kfree(tsc); + tps6507x_dev->ts = NULL; +err0: + return error; +} + +static int __devexit tps6507x_ts_remove(struct platform_device *pdev) +{ + struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); + struct tps6507x_ts *tsc = tps6507x_dev->ts; + struct input_dev *input_dev = tsc->input_dev; + + if (!tsc) + return 0; + + cancel_delayed_work(&tsc->work); + flush_workqueue(tsc->wq); + destroy_workqueue(tsc->wq); + tsc->wq = 0; + + input_free_device(input_dev); + + tps6507x_dev->ts = NULL; + kfree(tsc); + + return 0; +} + +static struct platform_driver tps6507x_ts_driver = { + .driver = { + .name = "tps6507x-ts", + .owner = THIS_MODULE, + }, + .probe = tps6507x_ts_probe, + .remove = __devexit_p(tps6507x_ts_remove), +}; + +static int __init tps6507x_ts_init(void) +{ + return platform_driver_register(&tps6507x_ts_driver); +} +module_init(tps6507x_ts_init); + +static void __exit tps6507x_ts_exit(void) +{ + platform_driver_unregister(&tps6507x_ts_driver); +} +module_exit(tps6507x_ts_exit); + +MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>"); +MODULE_DESCRIPTION("TPS6507x - TouchScreen driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:tps6507x-tsc"); diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 29a8bbf3f086..567d57215c28 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) if ((pkt[0] & 0xe0) != 0xe0) return 0; + if (be16_to_cpu(packet->data_len) > 0xff) + packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100); + if (be16_to_cpu(packet->x_len) > 0xff) + packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80); + /* send ACK */ ret = usb_submit_urb(priv->ack, GFP_ATOMIC); @@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO [DEVTYPE_NEXIO] = { - .rept_size = 128, + .rept_size = 1024, .irq_always = true, .read_data = nexio_read_data, .init = nexio_init, diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index ee5837522f5a..0cabe31f26df 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -787,8 +787,7 @@ capi_poll(struct file *file, poll_table * wait) } static int -capi_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct capidev *cdev = file->private_data; capi_ioctl_struct data; @@ -981,6 +980,18 @@ register_out: } } +static long +capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = capi_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + static int capi_open(struct inode *inode, struct file *file) { struct capidev *cdev; @@ -1026,7 +1037,7 @@ static const struct file_operations capi_fops = .read = capi_read, .write = capi_write, .poll = capi_poll, - .ioctl = capi_ioctl, + .unlocked_ioctl = capi_unlocked_ioctl, .open = capi_open, .release = capi_release, }; diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index bd00dceacaf0..bde3c88b8b27 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -1147,6 +1147,12 @@ load_unlock_out: if (ctr->state == CAPI_CTR_DETECTED) goto reset_unlock_out; + if (ctr->reset_ctr == NULL) { + printk(KERN_DEBUG "kcapi: reset: no reset function\n"); + retval = -ESRCH; + goto reset_unlock_out; + } + ctr->reset_ctr(ctr); retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED); diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 964a55fb1486..8f78f15c8ef7 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -170,17 +170,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param, } /* - * convert hex to binary - */ -static inline u8 hex2bin(char c) -{ - int result = c & 0x0f; - if (c & 0x40) - result += 9; - return result; -} - -/* * convert an IE from Gigaset hex string to ETSI binary representation * including length byte * return value: result length, -1 on error @@ -191,7 +180,7 @@ static int encode_ie(char *in, u8 *out, int maxlen) while (*in) { if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen) return -1; - out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]); + out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]); in += 2; } out[0] = l; @@ -933,30 +922,6 @@ void gigaset_isdn_stop(struct cardstate *cs) */ /* - * load firmware - */ -static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data) -{ - struct cardstate *cs = ctr->driverdata; - - /* AVM specific operation, not needed for Gigaset -- ignore */ - dev_notice(cs->dev, "load_firmware ignored\n"); - - return 0; -} - -/* - * reset (deactivate) controller - */ -static void gigaset_reset_ctr(struct capi_ctr *ctr) -{ - struct cardstate *cs = ctr->driverdata; - - /* AVM specific operation, not needed for Gigaset -- ignore */ - dev_notice(cs->dev, "reset_ctr ignored\n"); -} - -/* * register CAPI application */ static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, @@ -2213,8 +2178,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) iif->ctr.driverdata = cs; strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); iif->ctr.driver_name = "gigaset"; - iif->ctr.load_firmware = gigaset_load_firmware; - iif->ctr.reset_ctr = gigaset_reset_ctr; + iif->ctr.load_firmware = NULL; + iif->ctr.reset_ctr = NULL; iif->ctr.register_appl = gigaset_register_appl; iif->ctr.release_appl = gigaset_release_appl; iif->ctr.send_message = gigaset_send_message; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 70044ee4b228..a44cdb492ea9 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1272,9 +1272,9 @@ isdn_poll(struct file *file, poll_table * wait) static int -isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) +isdn_ioctl(struct file *file, uint cmd, ulong arg) { - uint minor = iminor(inode); + uint minor = iminor(file->f_path.dentry->d_inode); isdn_ctrl c; int drvidx; int chidx; @@ -1722,6 +1722,18 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) #undef cfg } +static long +isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = isdn_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + /* * Open the device code. */ @@ -1838,7 +1850,7 @@ static const struct file_operations isdn_fops = .read = isdn_read, .write = isdn_write, .poll = isdn_poll, - .ioctl = isdn_ioctl, + .unlocked_ioctl = isdn_unlocked_ioctl, .open = isdn_open, .release = isdn_close, }; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 8785004e85e0..81048b8ed8ad 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -24,6 +24,7 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mISDNif.h> +#include <linux/smp_lock.h> #include "core.h" static u_int *debug; @@ -97,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off) if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, filep, buf, (int)count, off); - if (*off != filep->f_pos) - return -ESPIPE; if (list_empty(&dev->expired) && (dev->work == 0)) { if (filep->f_flags & O_NONBLOCK) @@ -215,9 +214,8 @@ unlock: return ret; } -static int -mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, - unsigned long arg) +static long +mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct mISDNtimerdev *dev = filep->private_data; int id, tout, ret = 0; @@ -226,6 +224,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__, filep, cmd, arg); + lock_kernel(); switch (cmd) { case IMADDTIMER: if (get_user(tout, (int __user *)arg)) { @@ -257,13 +256,14 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, default: ret = -EINVAL; } + unlock_kernel(); return ret; } static const struct file_operations mISDN_fops = { .read = mISDN_read, .poll = mISDN_poll, - .ioctl = mISDN_ioctl, + .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, }; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 505eb64c329c..81bf25e67ce1 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -21,7 +21,7 @@ comment "LED drivers" config LEDS_88PM860X tristate "LED Support for Marvell 88PM860x PMIC" - depends on LEDS_CLASS && MFD_88PM860X + depends on MFD_88PM860X help This option enables support for on-chip LED drivers found on Marvell Semiconductor 88PM8606 PMIC. @@ -67,6 +67,16 @@ config LEDS_NET48XX This option enables support for the Soekris net4801 and net4826 error LED. +config LEDS_NET5501 + tristate "LED Support for Soekris net5501 series Error LED" + depends on LEDS_TRIGGERS + depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535 + select LEDS_TRIGGER_DEFAULT_ON + default n + help + Add support for the Soekris net5501 board (detection, error led + and GPIO). + config LEDS_FSG tristate "LED Support for the Freecom FSG-3" depends on MACH_FSG @@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS This adds support for the Latitude 2100 and similar notebooks that have an external LED. +config LEDS_MC13783 + tristate "LED Support for MC13783 PMIC" + depends on MFD_MC13783 + help + This option enable support for on-chip LED drivers found + on Freescale Semiconductor MC13783 PMIC. + config LEDS_TRIGGERS bool "LED Trigger support" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 0cd8b9957380..2493de499374 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o +obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o obj-$(CONFIG_LEDS_H1940) += leds-h1940.o @@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o +obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 69e7d86a5143..260660076507 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev, static struct device_attribute led_class_attrs[] = { __ATTR(brightness, 0644, led_brightness_show, led_brightness_store), - __ATTR(max_brightness, 0644, led_max_brightness_show, NULL), + __ATTR(max_brightness, 0444, led_max_brightness_show, NULL), #ifdef CONFIG_LEDS_TRIGGERS __ATTR(trigger, 0644, led_trigger_show, led_trigger_store), #endif diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 16a60c06c96c..b7677106cff8 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev) if (pdev->dev.parent->platform_data) { pm860x_pdata = pdev->dev.parent->platform_data; pdata = pm860x_pdata->led; - } else - pdata = NULL; + } else { + dev_err(&pdev->dev, "missing platform data\n"); + return -EINVAL; + } data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL); if (data == NULL) @@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev) data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion; data->iset = pdata->iset; data->port = __check_device(pdata, data->name); - if (data->port < 0) + if (data->port < 0) { + dev_err(&pdev->dev, "check device failed\n"); + kfree(data); return -EINVAL; + } data->current_brightness = 0; data->cdev.name = data->name; diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index c6e4b772b757..cc22eeefa10b 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -26,7 +26,8 @@ struct gpio_led_data { u8 new_level; u8 can_sleep; u8 active_low; - int (*platform_gpio_blink_set)(unsigned gpio, + u8 blinking; + int (*platform_gpio_blink_set)(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off); }; @@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work) struct gpio_led_data *led_dat = container_of(work, struct gpio_led_data, work); - gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); + if (led_dat->blinking) { + led_dat->platform_gpio_blink_set(led_dat->gpio, + led_dat->new_level, + NULL, NULL); + led_dat->blinking = 0; + } else + gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); } static void gpio_led_set(struct led_classdev *led_cdev, @@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev, if (led_dat->can_sleep) { led_dat->new_level = level; schedule_work(&led_dat->work); - } else - gpio_set_value(led_dat->gpio, level); + } else { + if (led_dat->blinking) { + led_dat->platform_gpio_blink_set(led_dat->gpio, level, + NULL, NULL); + led_dat->blinking = 0; + } else + gpio_set_value(led_dat->gpio, level); + } } static int gpio_blink_set(struct led_classdev *led_cdev, @@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev, struct gpio_led_data *led_dat = container_of(led_cdev, struct gpio_led_data, cdev); - return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off); + led_dat->blinking = 1; + return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK, + delay_on, delay_off); } static int __devinit create_gpio_led(const struct gpio_led *template, struct gpio_led_data *led_dat, struct device *parent, - int (*blink_set)(unsigned, unsigned long *, unsigned long *)) + int (*blink_set)(unsigned, int, unsigned long *, unsigned long *)) { int ret, state; @@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, led_dat->gpio = template->gpio; led_dat->can_sleep = gpio_cansleep(template->gpio); led_dat->active_low = template->active_low; + led_dat->blinking = 0; if (blink_set) { led_dat->platform_gpio_blink_set = blink_set; led_dat->cdev.blink_set = gpio_blink_set; @@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); if (ret < 0) goto err; - + INIT_WORK(&led_dat->work, gpio_led_work); ret = led_classdev_register(parent, &led_dat->cdev); @@ -211,7 +227,7 @@ struct gpio_led_of_platform_data { static int __devinit of_gpio_leds_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node, *child; + struct device_node *np = ofdev->dev.of_node, *child; struct gpio_led_of_platform_data *pdata; int count = 0, ret; @@ -291,8 +307,8 @@ static struct of_platform_driver of_gpio_leds_driver = { .driver = { .name = "of_gpio_leds", .owner = THIS_MODULE, + .of_match_table = of_gpio_leds_match, }, - .match_table = of_gpio_leds_match, .probe = of_gpio_leds_probe, .remove = __devexit_p(of_gpio_leds_remove), }; diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c index 8d5ecceba181..932a58da76c4 100644 --- a/drivers/leds/leds-lp3944.c +++ b/drivers/leds/leds-lp3944.c @@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client, { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; + int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); @@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client, mutex_init(&data->lock); - dev_info(&client->dev, "lp3944 enabled\n"); + err = lp3944_configure(client, data, lp3944_pdata); + if (err < 0) { + kfree(data); + return err; + } - lp3944_configure(client, data, lp3944_pdata); + dev_info(&client->dev, "lp3944 enabled\n"); return 0; } diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c new file mode 100644 index 000000000000..f05bb08d0f09 --- /dev/null +++ b/drivers/leds/leds-mc13783.c @@ -0,0 +1,403 @@ +/* + * LEDs driver for Freescale MC13783 + * + * Copyright (C) 2010 Philippe Rétornaz + * + * Based on leds-da903x: + * Copyright (C) 2008 Compulab, Ltd. + * Mike Rapoport <mike@compulab.co.il> + * + * Copyright (C) 2006-2008 Marvell International Ltd. + * Eric Miao <eric.miao@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/leds.h> +#include <linux/workqueue.h> +#include <linux/mfd/mc13783.h> +#include <linux/slab.h> + +struct mc13783_led { + struct led_classdev cdev; + struct work_struct work; + struct mc13783 *master; + enum led_brightness new_brightness; + int id; +}; + +#define MC13783_REG_LED_CONTROL_0 51 +#define MC13783_LED_C0_ENABLE_BIT (1 << 0) +#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7) +#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8) +#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9) +#define MC13783_LED_C0_BOOST_BIT (1 << 10) +#define MC13783_LED_C0_ABMODE_MASK 0x7 +#define MC13783_LED_C0_ABMODE 11 +#define MC13783_LED_C0_ABREF_MASK 0x3 +#define MC13783_LED_C0_ABREF 14 + +#define MC13783_REG_LED_CONTROL_1 52 +#define MC13783_LED_C1_TC1HALF_BIT (1 << 18) + +#define MC13783_REG_LED_CONTROL_2 53 +#define MC13783_LED_C2_BL_P_MASK 0xf +#define MC13783_LED_C2_MD_P 9 +#define MC13783_LED_C2_AD_P 13 +#define MC13783_LED_C2_KP_P 17 +#define MC13783_LED_C2_BL_C_MASK 0x7 +#define MC13783_LED_C2_MD_C 0 +#define MC13783_LED_C2_AD_C 3 +#define MC13783_LED_C2_KP_C 6 + +#define MC13783_REG_LED_CONTROL_3 54 +#define MC13783_LED_C3_TC_P 6 +#define MC13783_LED_C3_TC_P_MASK 0x1f + +#define MC13783_REG_LED_CONTROL_4 55 +#define MC13783_REG_LED_CONTROL_5 56 + +#define MC13783_LED_Cx_PERIOD 21 +#define MC13783_LED_Cx_PERIOD_MASK 0x3 +#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23) +#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23) +#define MC13783_LED_Cx_TC_C_MASK 0x3 + +static void mc13783_led_work(struct work_struct *work) +{ + struct mc13783_led *led = container_of(work, struct mc13783_led, work); + int reg = 0; + int mask = 0; + int value = 0; + int bank, off, shift; + + switch (led->id) { + case MC13783_LED_MD: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P; + break; + case MC13783_LED_AD: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P; + break; + case MC13783_LED_KP: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P; + break; + case MC13783_LED_R1: + case MC13783_LED_G1: + case MC13783_LED_B1: + case MC13783_LED_R2: + case MC13783_LED_G2: + case MC13783_LED_B2: + case MC13783_LED_R3: + case MC13783_LED_G3: + case MC13783_LED_B3: + off = led->id - MC13783_LED_R1; + bank = off/3; + reg = MC13783_REG_LED_CONTROL_3 + off/3; + shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P; + value = (led->new_brightness >> 3) << shift; + mask = MC13783_LED_C3_TC_P_MASK << shift; + break; + } + + mc13783_lock(led->master); + + mc13783_reg_rmw(led->master, reg, mask, value); + + mc13783_unlock(led->master); +} + +static void mc13783_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct mc13783_led *led; + + led = container_of(led_cdev, struct mc13783_led, cdev); + led->new_brightness = value; + schedule_work(&led->work); +} + +static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current) +{ + int shift = 0; + int mask = 0; + int value = 0; + int reg = 0; + int ret, bank; + + switch (led->id) { + case MC13783_LED_MD: + shift = MC13783_LED_C2_MD_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_AD: + shift = MC13783_LED_C2_AD_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_KP: + shift = MC13783_LED_C2_KP_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_R1: + case MC13783_LED_G1: + case MC13783_LED_B1: + case MC13783_LED_R2: + case MC13783_LED_G2: + case MC13783_LED_B2: + case MC13783_LED_R3: + case MC13783_LED_G3: + case MC13783_LED_B3: + bank = (led->id - MC13783_LED_R1)/3; + reg = MC13783_REG_LED_CONTROL_3 + bank; + shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2; + mask = MC13783_LED_Cx_TC_C_MASK; + value = max_current & MC13783_LED_Cx_TC_C_MASK; + break; + } + + mc13783_lock(led->master); + + ret = mc13783_reg_rmw(led->master, reg, mask << shift, + value << shift); + + mc13783_unlock(led->master); + return ret; +} + +static int __devinit mc13783_leds_prepare(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); + int ret = 0; + int reg = 0; + + mc13783_lock(dev); + + if (pdata->flags & MC13783_LED_TC1HALF) + reg |= MC13783_LED_C1_TC1HALF_BIT; + + if (pdata->flags & MC13783_LED_SLEWLIMTC) + reg |= MC13783_LED_Cx_SLEWLIM_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg); + if (ret) + goto out; + + reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_SLEWLIMBL) + reg |= MC13783_LED_Cx_SLEWLIM_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg); + if (ret) + goto out; + + reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC1) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg); + if (ret) + goto out; + + reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC2) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg); + if (ret) + goto out; + + reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC3) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT;; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg); + if (ret) + goto out; + + reg = MC13783_LED_C0_ENABLE_BIT; + if (pdata->flags & MC13783_LED_TRIODE_MD) + reg |= MC13783_LED_C0_TRIODE_MD_BIT; + if (pdata->flags & MC13783_LED_TRIODE_AD) + reg |= MC13783_LED_C0_TRIODE_AD_BIT; + if (pdata->flags & MC13783_LED_TRIODE_KP) + reg |= MC13783_LED_C0_TRIODE_KP_BIT; + if (pdata->flags & MC13783_LED_BOOST_EN) + reg |= MC13783_LED_C0_BOOST_BIT; + + reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) << + MC13783_LED_C0_ABMODE; + reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) << + MC13783_LED_C0_ABREF; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg); + +out: + mc13783_unlock(dev); + return ret; +} + +static int __devinit mc13783_led_probe(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783_led_platform_data *led_cur; + struct mc13783_led *led, *led_dat; + int ret, i; + int init_led = 0; + + if (pdata == NULL) { + dev_err(&pdev->dev, "missing platform data\n"); + return -ENODEV; + } + + if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) { + dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds); + return -EINVAL; + } + + led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); + if (led == NULL) { + dev_err(&pdev->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + + ret = mc13783_leds_prepare(pdev); + if (ret) { + dev_err(&pdev->dev, "unable to init led driver\n"); + goto err_free; + } + + for (i = 0; i < pdata->num_leds; i++) { + led_dat = &led[i]; + led_cur = &pdata->led[i]; + + if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) { + dev_err(&pdev->dev, "invalid id %d\n", led_cur->id); + ret = -EINVAL; + goto err_register; + } + + if (init_led & (1 << led_cur->id)) { + dev_err(&pdev->dev, "led %d already initialized\n", + led_cur->id); + ret = -EINVAL; + goto err_register; + } + + init_led |= 1 << led_cur->id; + led_dat->cdev.name = led_cur->name; + led_dat->cdev.default_trigger = led_cur->default_trigger; + led_dat->cdev.brightness_set = mc13783_led_set; + led_dat->cdev.brightness = LED_OFF; + led_dat->id = led_cur->id; + led_dat->master = dev_get_drvdata(pdev->dev.parent); + + INIT_WORK(&led_dat->work, mc13783_led_work); + + ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev); + if (ret) { + dev_err(&pdev->dev, "failed to register led %d\n", + led_dat->id); + goto err_register; + } + + ret = mc13783_led_setup(led_dat, led_cur->max_current); + if (ret) { + dev_err(&pdev->dev, "unable to init led %d\n", + led_dat->id); + i++; + goto err_register; + } + } + + platform_set_drvdata(pdev, led); + return 0; + +err_register: + for (i = i - 1; i >= 0; i--) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + +err_free: + kfree(led); + return ret; +} + +static int __devexit mc13783_led_remove(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783_led *led = platform_get_drvdata(pdev); + struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); + int i; + + for (i = 0; i < pdata->num_leds; i++) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + + mc13783_lock(dev); + + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0); + + mc13783_unlock(dev); + + kfree(led); + return 0; +} + +static struct platform_driver mc13783_led_driver = { + .driver = { + .name = "mc13783-led", + .owner = THIS_MODULE, + }, + .probe = mc13783_led_probe, + .remove = __devexit_p(mc13783_led_remove), +}; + +static int __init mc13783_led_init(void) +{ + return platform_driver_register(&mc13783_led_driver); +} +module_init(mc13783_led_init); + +static void __exit mc13783_led_exit(void) +{ + platform_driver_unregister(&mc13783_led_driver); +} +module_exit(mc13783_led_exit); + +MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC"); +MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mc13783-led"); diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c new file mode 100644 index 000000000000..3063f591f0dc --- /dev/null +++ b/drivers/leds/leds-net5501.c @@ -0,0 +1,94 @@ +/* + * Soekris board support code + * + * Copyright (C) 2008-2009 Tower Technologies + * Written by Alessandro Zummo <a.zummo@towertech.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/string.h> +#include <linux/leds.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> + +#include <asm/geode.h> + +static struct gpio_led net5501_leds[] = { + { + .name = "error", + .gpio = 6, + .default_trigger = "default-on", + }, +}; + +static struct gpio_led_platform_data net5501_leds_data = { + .num_leds = ARRAY_SIZE(net5501_leds), + .leds = net5501_leds, +}; + +static struct platform_device net5501_leds_dev = { + .name = "leds-gpio", + .id = -1, + .dev.platform_data = &net5501_leds_data, +}; + +static void __init init_net5501(void) +{ + platform_device_register(&net5501_leds_dev); +} + +struct soekris_board { + u16 offset; + char *sig; + u8 len; + void (*init)(void); +}; + +static struct soekris_board __initdata boards[] = { + { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */ + { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */ +}; + +static int __init soekris_init(void) +{ + int i; + unsigned char *rombase, *bios; + + if (!is_geode()) + return 0; + + rombase = ioremap(0xffff0000, 0xffff); + if (!rombase) { + printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase"); + return 0; + } + + bios = rombase + 0x20; /* null terminated */ + + if (strncmp(bios, "comBIOS", 7)) + goto unmap; + + for (i = 0; i < ARRAY_SIZE(boards); i++) { + unsigned char *model = rombase + boards[i].offset; + + if (strncmp(model, boards[i].sig, boards[i].len) == 0) { + printk(KERN_INFO "Soekris %s: %s\n", model, bios); + + if (boards[i].init) + boards[i].init(); + break; + } + } + +unmap: + iounmap(rombase); + return 0; +} + +arch_initcall(soekris_init); diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 51477ec71391..a688293abd0b 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -534,7 +534,7 @@ static int __init nas_gpio_init(void) set_power_light_amber_noblink(); return 0; out_err: - for (; i >= 0; i--) + for (i--; i >= 0; i--) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); return ret; diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 26a303a1d1ab..97147804a49c 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -39,14 +39,12 @@ static struct macio_chip *macio_on_hold; static int macio_bus_match(struct device *dev, struct device_driver *drv) { - struct macio_dev * macio_dev = to_macio_device(dev); - struct macio_driver * macio_drv = to_macio_driver(drv); - const struct of_device_id * matches = macio_drv->match_table; + const struct of_device_id * matches = drv->of_match_table; if (!matches) return 0; - return of_match_device(matches, &macio_dev->ofdev) != NULL; + return of_match_device(matches, dev) != NULL; } struct macio_dev *macio_dev_get(struct macio_dev *dev) @@ -84,7 +82,7 @@ static int macio_device_probe(struct device *dev) macio_dev_get(macio_dev); - match = of_match_device(drv->match_table, &macio_dev->ofdev); + match = of_match_device(drv->driver.of_match_table, dev); if (match) error = drv->probe(macio_dev, match); if (error) @@ -248,7 +246,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index, static void macio_add_missing_resources(struct macio_dev *dev) { - struct device_node *np = dev->ofdev.node; + struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq_base; /* Gatwick has some missing interrupts on child nodes */ @@ -289,7 +287,7 @@ static void macio_add_missing_resources(struct macio_dev *dev) static void macio_setup_interrupts(struct macio_dev *dev) { - struct device_node *np = dev->ofdev.node; + struct device_node *np = dev->ofdev.dev.of_node; unsigned int irq; int i = 0, j = 0; @@ -317,7 +315,7 @@ static void macio_setup_interrupts(struct macio_dev *dev) static void macio_setup_resources(struct macio_dev *dev, struct resource *parent_res) { - struct device_node *np = dev->ofdev.node; + struct device_node *np = dev->ofdev.dev.of_node; struct resource r; int index; @@ -373,9 +371,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dev->bus = &chip->lbus; dev->media_bay = in_bay; - dev->ofdev.node = np; - dev->ofdev.dma_mask = 0xffffffffUL; - dev->ofdev.dev.dma_mask = &dev->ofdev.dma_mask; + dev->ofdev.dev.of_node = np; + dev->ofdev.archdata.dma_mask = 0xffffffffUL; + dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; @@ -494,9 +492,9 @@ static void macio_pci_add_devices(struct macio_chip *chip) } /* Add media bay devices if any */ + pnode = mbdev->ofdev.dev.of_node; if (mbdev) - for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np)) - != NULL;) { + for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); @@ -506,9 +504,9 @@ static void macio_pci_add_devices(struct macio_chip *chip) } /* Add serial ports if any */ + pnode = sdev->ofdev.dev.of_node; if (sdev) { - for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np)) - != NULL;) { + for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { if (macio_skip_device(np)) continue; of_node_get(np); diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index 9e9453b58425..6999ce59fd10 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -9,7 +9,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct macio_dev *mdev = to_macio_device (dev); \ - return sprintf (buf, format_string, mdev->ofdev.node->field); \ + return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \ } static ssize_t @@ -21,7 +21,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf) int length = 0; of = &to_macio_device (dev)->ofdev; - compat = of_get_property(of->node, "compatible", &cplen); + compat = of_get_property(of->dev.of_node, "compatible", &cplen); if (!compat) { *buf = '\0'; return 0; @@ -58,7 +58,7 @@ static ssize_t devspec_show(struct device *dev, struct of_device *ofdev; ofdev = to_of_device(dev); - return sprintf(buf, "%s\n", ofdev->node->full_name); + return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); } macio_config_of_attr (name, "%s\n"); diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index 08002b88f342..288acce76b74 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -564,7 +564,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de unsigned long base; int i; - ofnode = mdev->ofdev.node; + ofnode = mdev->ofdev.dev.of_node; if (macio_resource_count(mdev) < 1) return -ENODEV; diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c index c876349c32de..a271c8218d82 100644 --- a/drivers/macintosh/nvram.c +++ b/drivers/macintosh/nvram.c @@ -100,7 +100,7 @@ const struct file_operations nvram_fops = { .llseek = nvram_llseek, .read = read_nvram, .write = write_nvram, - .ioctl = nvram_ioctl, + .unlocked_ioctl = nvram_ioctl, }; static struct miscdevice nvram_dev = { diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 7c54d80c4fb2..12946c5f583f 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -375,7 +375,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev, pr_debug("rackmeter_probe()\n"); /* Get i2s-a node */ - while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL) + while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL) if (strcmp(i2s->name, "i2s-a") == 0) break; if (i2s == NULL) { @@ -431,7 +431,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev, of_address_to_resource(i2s, 1, &rdma)) { printk(KERN_ERR "rackmeter: found match but lacks resources: %s", - mdev->ofdev.node->full_name); + mdev->ofdev.dev.of_node->full_name); rc = -ENXIO; goto bail_free; } diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index c9da5c4c167d..2506c957712e 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -671,8 +671,11 @@ static const struct of_device_id smu_platform_match[] = static struct of_platform_driver smu_of_platform_driver = { - .name = "smu", - .match_table = smu_platform_match, + .driver = { + .name = "smu", + .owner = THIS_MODULE, + .of_match_table = smu_platform_match, + }, .probe = smu_platform_probe, }; diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index b18fa948f3d1..e60605bd0ea9 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -2215,7 +2215,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match) state = state_detached; /* Lookup the fans in the device tree */ - fcu_lookup_fans(dev->node); + fcu_lookup_fans(dev->dev.of_node); /* Add the driver */ return i2c_add_driver(&therm_pm72_driver); @@ -2238,8 +2238,11 @@ static const struct of_device_id fcu_match[] = static struct of_platform_driver fcu_of_platform_driver = { - .name = "temperature", - .match_table = fcu_match, + .driver = { + .name = "temperature", + .owner = THIS_MODULE, + .of_match_table = fcu_match, + }, .probe = fcu_of_probe, .remove = fcu_of_remove }; diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 0839770e4ec5..5c9367acf0cf 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -463,8 +463,11 @@ static const struct of_device_id therm_of_match[] = {{ }; static struct of_platform_driver therm_of_driver = { - .name = "temperature", - .match_table = therm_of_match, + .driver = { + .name = "temperature", + .owner = THIS_MODULE, + .of_match_table = therm_of_match, + }, .probe = therm_of_probe, .remove = therm_of_remove, }; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 42764849eb78..3d4fc0f7b00b 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2273,8 +2273,7 @@ static int register_pmu_pm_ops(void) device_initcall(register_pmu_pm_ops); #endif -static int -pmu_ioctl(struct inode * inode, struct file *filp, +static int pmu_ioctl(struct file *filp, u_int cmd, u_long arg) { __u32 __user *argp = (__u32 __user *)arg; @@ -2337,11 +2336,23 @@ pmu_ioctl(struct inode * inode, struct file *filp, return error; } +static long pmu_unlocked_ioctl(struct file *filp, + u_int cmd, u_long arg) +{ + int ret; + + lock_kernel(); + ret = pmu_ioctl(filp, cmd, arg); + unlock_kernel(); + + return ret; +} + static const struct file_operations pmu_device_fops = { .read = pmu_read, .write = pmu_write, .poll = pmu_fpoll, - .ioctl = pmu_ioctl, + .unlocked_ioctl = pmu_unlocked_ioctl, .open = pmu_open, .release = pmu_release, }; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9ea17d6c799b..d2c0f94fa37d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, kfree(percpu->scribble); pr_err("%s: failed memory allocation for cpu%ld\n", __func__, cpu); - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } break; case CPU_DEAD: diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 9ddc57909d49..425862ffb285 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> +#include <linux/smp_lock.h> #include <linux/poll.h> #include <linux/ioctl.h> #include <linux/wait.h> @@ -963,7 +964,7 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count, return ret; } -static int dvb_demux_do_ioctl(struct inode *inode, struct file *file, +static int dvb_demux_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dmxdev_filter *dmxdevfilter = file->private_data; @@ -1084,10 +1085,16 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file, return ret; } -static int dvb_demux_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long dvb_demux_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { - return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl); + int ret; + + lock_kernel(); + ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); + unlock_kernel(); + + return ret; } static unsigned int dvb_demux_poll(struct file *file, poll_table *wait) @@ -1139,7 +1146,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file) static const struct file_operations dvb_demux_fops = { .owner = THIS_MODULE, .read = dvb_demux_read, - .ioctl = dvb_demux_ioctl, + .unlocked_ioctl = dvb_demux_ioctl, .open = dvb_demux_open, .release = dvb_demux_release, .poll = dvb_demux_poll, @@ -1152,7 +1159,7 @@ static struct dvb_device dvbdev_demux = { .fops = &dvb_demux_fops }; -static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file, +static int dvb_dvr_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1176,10 +1183,16 @@ static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file, return ret; } -static int dvb_dvr_ioctl(struct inode *inode, struct file *file, +static long dvb_dvr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl); + int ret; + + lock_kernel(); + ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); + unlock_kernel(); + + return ret; } static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) @@ -1208,7 +1221,7 @@ static const struct file_operations dvb_dvr_fops = { .owner = THIS_MODULE, .read = dvb_dvr_read, .write = dvb_dvr_write, - .ioctl = dvb_dvr_ioctl, + .unlocked_ioctl = dvb_dvr_ioctl, .open = dvb_dvr_open, .release = dvb_dvr_release, .poll = dvb_dvr_poll, diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c index cb22da53bfb0..ef259a0718ac 100644 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c @@ -36,6 +36,7 @@ #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/sched.h> +#include <linux/smp_lock.h> #include <linux/kthread.h> #include "dvb_ca_en50221.h" @@ -1181,7 +1182,7 @@ static int dvb_ca_en50221_thread(void *data) * * @return 0 on success, <0 on error. */ -static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file, +static int dvb_ca_en50221_io_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1255,10 +1256,16 @@ static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file, * * @return 0 on success, <0 on error. */ -static int dvb_ca_en50221_io_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long dvb_ca_en50221_io_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { - return dvb_usercopy(inode, file, cmd, arg, dvb_ca_en50221_io_do_ioctl); + int ret; + + lock_kernel(); + ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); + unlock_kernel(); + + return ret; } @@ -1611,7 +1618,7 @@ static const struct file_operations dvb_ca_fops = { .owner = THIS_MODULE, .read = dvb_ca_en50221_io_read, .write = dvb_ca_en50221_io_write, - .ioctl = dvb_ca_en50221_io_ioctl, + .unlocked_ioctl = dvb_ca_en50221_io_ioctl, .open = dvb_ca_en50221_io_open, .release = dvb_ca_en50221_io_release, .poll = dvb_ca_en50221_io_poll, diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 6932def4d266..44ae89ecef94 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -36,6 +36,7 @@ #include <linux/list.h> #include <linux/freezer.h> #include <linux/jiffies.h> +#include <linux/smp_lock.h> #include <linux/kthread.h> #include <asm/processor.h> @@ -1195,14 +1196,14 @@ static void dtv_property_cache_submit(struct dvb_frontend *fe) } } -static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file, +static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg); -static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, +static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg); static int dtv_property_process_get(struct dvb_frontend *fe, struct dtv_property *tvp, - struct inode *inode, struct file *file) + struct file *file) { int r = 0; @@ -1335,7 +1336,6 @@ static int dtv_property_process_get(struct dvb_frontend *fe, static int dtv_property_process_set(struct dvb_frontend *fe, struct dtv_property *tvp, - struct inode *inode, struct file *file) { int r = 0; @@ -1366,7 +1366,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe, dprintk("%s() Finalised property cache\n", __func__); dtv_property_cache_submit(fe); - r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND, + r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND, &fepriv->parameters); break; case DTV_FREQUENCY: @@ -1398,12 +1398,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe, break; case DTV_VOLTAGE: fe->dtv_property_cache.voltage = tvp->u.data; - r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_VOLTAGE, + r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE, (void *)fe->dtv_property_cache.voltage); break; case DTV_TONE: fe->dtv_property_cache.sectone = tvp->u.data; - r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_TONE, + r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE, (void *)fe->dtv_property_cache.sectone); break; case DTV_CODE_RATE_HP: @@ -1487,7 +1487,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe, return r; } -static int dvb_frontend_ioctl(struct inode *inode, struct file *file, +static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1509,17 +1509,17 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file, return -ERESTARTSYS; if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) - err = dvb_frontend_ioctl_properties(inode, file, cmd, parg); + err = dvb_frontend_ioctl_properties(file, cmd, parg); else { fe->dtv_property_cache.state = DTV_UNDEFINED; - err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg); + err = dvb_frontend_ioctl_legacy(file, cmd, parg); } up(&fepriv->sem); return err; } -static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, +static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1555,7 +1555,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, } for (i = 0; i < tvps->num; i++) { - (tvp + i)->result = dtv_property_process_set(fe, tvp + i, inode, file); + (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file); err |= (tvp + i)->result; } @@ -1587,7 +1587,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, } for (i = 0; i < tvps->num; i++) { - (tvp + i)->result = dtv_property_process_get(fe, tvp + i, inode, file); + (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file); err |= (tvp + i)->result; } @@ -1604,7 +1604,7 @@ out: return err; } -static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file, +static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -2031,7 +2031,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) static const struct file_operations dvb_frontend_fops = { .owner = THIS_MODULE, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .poll = dvb_frontend_poll, .open = dvb_frontend_open, .release = dvb_frontend_release diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index cccea412088b..f6dac2bb0ac6 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -59,6 +59,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/dvb/net.h> +#include <linux/smp_lock.h> #include <linux/uio.h> #include <asm/uaccess.h> #include <linux/crc32.h> @@ -1329,7 +1330,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num) return 0; } -static int dvb_net_do_ioctl(struct inode *inode, struct file *file, +static int dvb_net_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1431,10 +1432,16 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file, return 0; } -static int dvb_net_ioctl(struct inode *inode, struct file *file, +static long dvb_net_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl); + int ret; + + lock_kernel(); + ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); + unlock_kernel(); + + return ret; } static int dvb_net_close(struct inode *inode, struct file *file) @@ -1455,7 +1462,7 @@ static int dvb_net_close(struct inode *inode, struct file *file) static const struct file_operations dvb_net_fops = { .owner = THIS_MODULE, - .ioctl = dvb_net_ioctl, + .unlocked_ioctl = dvb_net_ioctl, .open = dvb_generic_open, .release = dvb_net_close, }; diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index 94159b90f733..b915c39d782f 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -154,10 +154,11 @@ int dvb_generic_release(struct inode *inode, struct file *file) EXPORT_SYMBOL(dvb_generic_release); -int dvb_generic_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +long dvb_generic_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; + int ret; if (!dvbdev) return -ENODEV; @@ -165,7 +166,11 @@ int dvb_generic_ioctl(struct inode *inode, struct file *file, if (!dvbdev->kernel_ioctl) return -EINVAL; - return dvb_usercopy (inode, file, cmd, arg, dvbdev->kernel_ioctl); + lock_kernel(); + ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl); + unlock_kernel(); + + return ret; } EXPORT_SYMBOL(dvb_generic_ioctl); @@ -377,9 +382,9 @@ EXPORT_SYMBOL(dvb_unregister_adapter); define this as video_usercopy(). this will introduce a dependecy to the v4l "videodev.o" module, which is unnecessary for some cards (ie. the budget dvb-cards don't need the v4l module...) */ -int dvb_usercopy(struct inode *inode, struct file *file, +int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, - int (*func)(struct inode *inode, struct file *file, + int (*func)(struct file *file, unsigned int cmd, void *arg)) { char sbuf[128]; @@ -416,7 +421,7 @@ int dvb_usercopy(struct inode *inode, struct file *file, } /* call driver */ - if ((err = func(inode, file, cmd, parg)) == -ENOIOCTLCMD) + if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD) err = -EINVAL; if (err < 0) diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h index f7b499d4a3c0..fcc6ae98745e 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.h +++ b/drivers/media/dvb/dvb-core/dvbdev.h @@ -116,8 +116,7 @@ struct dvb_device { wait_queue_head_t wait_queue; /* don't really need those !? -- FIXME: use video_usercopy */ - int (*kernel_ioctl)(struct inode *inode, struct file *file, - unsigned int cmd, void *arg); + int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg); void *priv; }; @@ -138,17 +137,15 @@ extern void dvb_unregister_device (struct dvb_device *dvbdev); extern int dvb_generic_open (struct inode *inode, struct file *file); extern int dvb_generic_release (struct inode *inode, struct file *file); -extern int dvb_generic_ioctl (struct inode *inode, struct file *file, +extern long dvb_generic_ioctl (struct file *file, unsigned int cmd, unsigned long arg); /* we don't mess with video_usercopy() any more, we simply define out own dvb_usercopy(), which will hopefully become generic_usercopy() someday... */ -extern int dvb_usercopy(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg, - int (*func)(struct inode *inode, struct file *file, - unsigned int cmd, void *arg)); +extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, unsigned int cmd, void *arg)); /** generic DVB attach function. */ #ifdef CONFIG_MEDIA_ATTACH diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c index 853e04b7cb36..d3c2cf60de76 100644 --- a/drivers/media/dvb/firewire/firedtv-ci.c +++ b/drivers/media/dvb/firewire/firedtv-ci.c @@ -175,8 +175,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg) return err; } -static int fdtv_ca_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, void *arg) +static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg) { struct dvb_device *dvbdev = file->private_data; struct firedtv *fdtv = dvbdev->priv; @@ -217,7 +216,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait) static const struct file_operations fdtv_ca_fops = { .owner = THIS_MODULE, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_generic_open, .release = dvb_generic_release, .poll = fdtv_ca_io_poll, diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index 38915591c6e5..a6be529eec5c 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -708,7 +708,7 @@ static void gpioirq(unsigned long cookie) #ifdef CONFIG_DVB_AV7110_OSD -static int dvb_osd_ioctl(struct inode *inode, struct file *file, +static int dvb_osd_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -727,7 +727,7 @@ static int dvb_osd_ioctl(struct inode *inode, struct file *file, static const struct file_operations dvb_osd_fops = { .owner = THIS_MODULE, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_generic_open, .release = dvb_generic_release, }; diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c index 53884814161c..13efba942dac 100644 --- a/drivers/media/dvb/ttpci/av7110_av.c +++ b/drivers/media/dvb/ttpci/av7110_av.c @@ -1089,7 +1089,7 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len } -static int dvb_video_ioctl(struct inode *inode, struct file *file, +static int dvb_video_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1297,7 +1297,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file, return ret; } -static int dvb_audio_ioctl(struct inode *inode, struct file *file, +static int dvb_audio_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; @@ -1517,7 +1517,7 @@ static int dvb_audio_release(struct inode *inode, struct file *file) static const struct file_operations dvb_video_fops = { .owner = THIS_MODULE, .write = dvb_video_write, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_video_open, .release = dvb_video_release, .poll = dvb_video_poll, @@ -1535,7 +1535,7 @@ static struct dvb_device dvbdev_video = { static const struct file_operations dvb_audio_fops = { .owner = THIS_MODULE, .write = dvb_audio_write, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_audio_open, .release = dvb_audio_release, .poll = dvb_audio_poll, diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c index ac7779c45c5b..4eba35a018e3 100644 --- a/drivers/media/dvb/ttpci/av7110_ca.c +++ b/drivers/media/dvb/ttpci/av7110_ca.c @@ -248,8 +248,7 @@ static unsigned int dvb_ca_poll (struct file *file, poll_table *wait) return mask; } -static int dvb_ca_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, void *parg) +static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct av7110 *av7110 = dvbdev->priv; @@ -350,7 +349,7 @@ static const struct file_operations dvb_ca_fops = { .owner = THIS_MODULE, .read = dvb_ca_read, .write = dvb_ca_write, - .ioctl = dvb_generic_ioctl, + .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_ca_open, .release = dvb_generic_release, .poll = dvb_ca_poll, diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 7bd4c0fc23cc..5c53624e0e87 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2570,9 +2570,7 @@ mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) } /** - * mptscsih_set_scsi_lookup - * - * writes a scmd entry into the ScsiLookup[] array list + * mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list * * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array @@ -2735,7 +2733,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, /** - * mptscsih_get_completion_code - + * mptscsih_get_completion_code - get completion code from MPT request * @ioc: Pointer to MPT_ADAPTER structure * @req: Pointer to original MPT request frame * @reply: Pointer to MPT reply frame (NULL if TurboReply) diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index d33693c13368..c4b117f5fb70 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type) if (!dev) return -ENXIO; - ops = kmalloc(kcmd.oplen, GFP_KERNEL); - if (!ops) - return -ENOMEM; - - if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) { - kfree(ops); - return -EFAULT; - } + ops = memdup_user(kcmd.opbuf, kcmd.oplen); + if (IS_ERR(ops)) + return PTR_ERR(ops); /* * It's possible to have a _very_ large table diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 6a14d2b1ccf0..2c65a2c57294 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -173,33 +173,35 @@ static struct resource regulator_resources[] = { PM8607_REG_RESOURCE(LDO9, LDO9), PM8607_REG_RESOURCE(LDO10, LDO10), PM8607_REG_RESOURCE(LDO12, LDO12), + PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET), PM8607_REG_RESOURCE(LDO14, LDO14), }; -#define PM8607_REG_DEVS(_name, _id) \ +#define PM8607_REG_DEVS(_id) \ { \ - .name = "88pm8607-" #_name, \ + .name = "88pm860x-regulator", \ .num_resources = 1, \ .resources = ®ulator_resources[PM8607_ID_##_id], \ .id = PM8607_ID_##_id, \ } static struct mfd_cell regulator_devs[] = { - PM8607_REG_DEVS(buck1, BUCK1), - PM8607_REG_DEVS(buck2, BUCK2), - PM8607_REG_DEVS(buck3, BUCK3), - PM8607_REG_DEVS(ldo1, LDO1), - PM8607_REG_DEVS(ldo2, LDO2), - PM8607_REG_DEVS(ldo3, LDO3), - PM8607_REG_DEVS(ldo4, LDO4), - PM8607_REG_DEVS(ldo5, LDO5), - PM8607_REG_DEVS(ldo6, LDO6), - PM8607_REG_DEVS(ldo7, LDO7), - PM8607_REG_DEVS(ldo8, LDO8), - PM8607_REG_DEVS(ldo9, LDO9), - PM8607_REG_DEVS(ldo10, LDO10), - PM8607_REG_DEVS(ldo12, LDO12), - PM8607_REG_DEVS(ldo14, LDO14), + PM8607_REG_DEVS(BUCK1), + PM8607_REG_DEVS(BUCK2), + PM8607_REG_DEVS(BUCK3), + PM8607_REG_DEVS(LDO1), + PM8607_REG_DEVS(LDO2), + PM8607_REG_DEVS(LDO3), + PM8607_REG_DEVS(LDO4), + PM8607_REG_DEVS(LDO5), + PM8607_REG_DEVS(LDO6), + PM8607_REG_DEVS(LDO7), + PM8607_REG_DEVS(LDO8), + PM8607_REG_DEVS(LDO9), + PM8607_REG_DEVS(LDO10), + PM8607_REG_DEVS(LDO12), + PM8607_REG_DEVS(LDO13), + PM8607_REG_DEVS(LDO14), }; struct pm860x_irq_data { @@ -564,7 +566,7 @@ out: return ret; } -static void __devexit device_irq_exit(struct pm860x_chip *chip) +static void device_irq_exit(struct pm860x_chip *chip) { if (chip->core_irq) free_irq(chip->core_irq, chip); @@ -701,7 +703,7 @@ out: return; } -int pm860x_device_init(struct pm860x_chip *chip, +int __devinit pm860x_device_init(struct pm860x_chip *chip, struct pm860x_platform_data *pdata) { chip->core_irq = 0; @@ -729,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip, return 0; } -void pm860x_device_exit(struct pm860x_chip *chip) +void __devexit pm860x_device_exit(struct pm860x_chip *chip) { device_irq_exit(chip); mfd_remove_devices(chip->dev); diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c index 4a6e7186334e..c933b64d1283 100644 --- a/drivers/mfd/88pm860x-i2c.c +++ b/drivers/mfd/88pm860x-i2c.c @@ -200,8 +200,8 @@ static int __devexit pm860x_remove(struct i2c_client *client) pm860x_device_exit(chip); i2c_unregister_device(chip->companion); - i2c_set_clientdata(chip->companion, NULL); i2c_set_clientdata(chip->client, NULL); + i2c_set_clientdata(client, NULL); kfree(chip); return 0; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index de3e74cde51c..9da0e504bbe9 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2,8 +2,14 @@ # Multifunction miscellaneous devices # -menu "Multifunction device drivers" +menuconfig MFD_SUPPORT + bool "Multifunction device drivers" depends on HAS_IOMEM + default y + help + Configure MFD device drivers. + +if MFD_SUPPORT config MFD_CORE tristate @@ -49,6 +55,7 @@ config MFD_SH_MOBILE_SDHI bool "Support for SuperH Mobile SDHI" depends on SUPERH || ARCH_SHMOBILE select MFD_CORE + select TMIO_MMC_DMA ---help--- This driver supports the SDHI hardware block found in many SuperH Mobile SoCs. @@ -115,6 +122,18 @@ config TPS65010 This driver can also be built as a module. If so, the module will be called tps65010. +config TPS6507X + tristate "TPS6507x Power Management / Touch Screen chips" + select MFD_CORE + depends on I2C + help + If you say yes here you get support for the TPS6507x series of + Power Management / Touch Screen chips. These include voltage + regulators, lithium ion/polymer battery charging, touch screen + and other features that are often used in portable devices. + This driver can also be built as a module. If so, the module + will be called tps6507x. + config MENELAUS bool "Texas Instruments TWL92330/Menelaus PM chip" depends on I2C=y && ARCH_OMAP2 @@ -158,10 +177,26 @@ config TWL4030_CODEC select MFD_CORE default n +config MFD_TC35892 + bool "Support Toshiba TC35892" + depends on I2C=y && GENERIC_HARDIRQS + select MFD_CORE + help + Support for the Toshiba TC35892 I/O Expander. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the + functionality of the device. + config MFD_TMIO bool default n +config TMIO_MMC_DMA + bool + select DMA_ENGINE + select DMADEVICES + config MFD_T7L66XB bool "Support Toshiba T7L66XB" depends on ARM && HAVE_CLK @@ -345,9 +380,19 @@ config PCF50633_GPIO Say yes here if you want to include support GPIO for pins on the PCF50633 chip. +config ABX500_CORE + bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" + default y if ARCH_U300 + help + Say yes here if you have the ABX500 Mixed Signal IC family + chips. This core driver expose register access functions. + Functionality specific drivers using these functions can + remain unchanged when IC changes. Binding of the functions to + actual register access is done by the IC core driver. + config AB3100_CORE bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions" - depends on I2C=y + depends on I2C=y && ABX500_CORE default y if ARCH_U300 help Select this to enable the AB3100 Mixed Signal IC core @@ -375,15 +420,30 @@ config EZX_PCAP This enables the PCAP ASIC present on EZX Phones. This is needed for MMC, TouchScreen, Sound, USB, etc.. -config AB4500_CORE - tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip" - depends on SPI +config AB8500_CORE + bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" + depends on SPI=y && GENERIC_HARDIRQS + select MFD_CORE help - Select this option to enable access to AB4500 power management + Select this option to enable access to AB8500 power management chip. This connects to U8500 on the SSP/SPI bus and exports read/write functions for the devices to get access to this chip. This chip embeds various other multimedia funtionalities as well. +config AB3550_CORE + bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions" + select MFD_CORE + depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE + help + Select this to enable the AB3550 Mixed Signal IC core + functionality. This connects to a AB3550 on the I2C bus + and expose a number of symbols needed for dependent devices + to read and write registers and subscribe to events from + this multi-functional IC. This is needed to use other features + of the AB3550 such as battery-backed RTC, charging control, + LEDs, vibrator, system power and temperature, power management + and ALSA sound. + config MFD_TIMBERDALE tristate "Support for the Timberdale FPGA" select MFD_CORE @@ -403,7 +463,26 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. -endmenu +config MFD_RDC321X + tristate "Support for RDC-R321x southbridge" + select MFD_CORE + depends on PCI + help + Say yes here if you want to have support for the RDC R-321x SoC + southbridge which provides access to GPIOs and Watchdog using the + southbridge PCI device configuration space. + +config MFD_JANZ_CMODIO + tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board" + select MFD_CORE + depends on PCI + help + This is the core driver for the Janz CMOD-IO PCI MODULbus + carrier board. This device is a PCI to MODULbus bridge which may + host many different types of MODULbus daughterboards, including + CAN and GPIO controllers. + +endif # MFD_SUPPORT menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 87935f967aa0..fb503e77dc60 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o +obj-$(CONFIG_MFD_TC35892) += tc35892.o obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o @@ -29,6 +30,7 @@ obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o obj-$(CONFIG_TPS65010) += tps65010.o +obj-$(CONFIG_TPS6507X) += tps6507x.o obj-$(CONFIG_MENELAUS) += menelaus.o obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o @@ -55,12 +57,17 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o max8925-objs := max8925-core.o max8925-i2c.o obj-$(CONFIG_MFD_MAX8925) += max8925.o -obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o +pcf50633-objs := pcf50633-core.o pcf50633-irq.o +obj-$(CONFIG_MFD_PCF50633) += pcf50633.o obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o +obj-$(CONFIG_ABX500_CORE) += abx500-core.o obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o -obj-$(CONFIG_AB4500_CORE) += ab4500-core.o +obj-$(CONFIG_AB3550_CORE) += ab3550-core.o +obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o -obj-$(CONFIG_LPC_SCH) += lpc_sch.o
\ No newline at end of file +obj-$(CONFIG_LPC_SCH) += lpc_sch.o +obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o +obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index e4ca5909e424..53ebfee548fa 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -19,7 +19,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> -#include <linux/mfd/ab3100.h> +#include <linux/mfd/abx500.h> /* These are the only registers inside AB3100 used in this main file */ @@ -59,24 +59,15 @@ * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ - -u8 ab3100_get_chip_type(struct ab3100 *ab3100) +static int ab3100_get_chip_id(struct device *dev) { - u8 chip = ABUNKNOWN; - - switch (ab3100->chip_id & 0xf0) { - case 0xa0: - chip = AB3000; - break; - case 0xc0: - chip = AB3100; - break; - } - return chip; + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return (int)ab3100->chip_id; } -EXPORT_SYMBOL(ab3100_get_chip_type); -int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) +static int ab3100_set_register_interruptible(struct ab3100 *ab3100, + u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; @@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_set_register_interruptible); +static int set_register_interruptible(struct device *dev, + u8 bank, u8 reg, u8 value) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_set_register_interruptible(ab3100, reg, value); +} /* * The test registers exist at an I2C bus address up one @@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, return err; } - -int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) +static int ab3100_get_register_interruptible(struct ab3100 *ab3100, + u8 reg, u8 *regval) { int err; @@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register_interruptible); +static int get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_get_register_interruptible(ab3100, reg, value); +} -int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, +static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs) { int err; @@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register_page_interruptible); +static int get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_get_register_page_interruptible(ab3100, + first_reg, regvals, numregs); +} -int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, +static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask) { u8 regandval[2] = {reg, 0}; @@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible); +static int mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_mask_and_set_register_interruptible(ab3100, + reg, bitmask, (bitmask & bitvalues)); +} /* * Register a simple callback for handling any AB3100 events. @@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100, EXPORT_SYMBOL(ab3100_event_unregister); -int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, - u32 *fatevent) +static int ab3100_event_registers_startup_state_get(struct device *dev, + u8 *event) { + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); if (!ab3100->startup_events_read) return -EAGAIN; /* Try again later */ - *fatevent = ab3100->startup_events; + memcpy(event, ab3100->startup_events, 3); return 0; } -EXPORT_SYMBOL(ab3100_event_registers_startup_state_get); + +static struct abx500_ops ab3100_ops = { + .get_chip_id = ab3100_get_chip_id, + .set_register = set_register_interruptible, + .get_register = get_register_interruptible, + .get_register_page = get_register_page_interruptible, + .set_register_page = NULL, + .mask_and_set_register = mask_and_set_register_interruptible, + .event_registers_startup_state_get = + ab3100_event_registers_startup_state_get, + .startup_irq_enabled = NULL, +}; /* * This is a threaded interrupt handler so we can make some @@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data) event_regs[2]; if (!ab3100->startup_events_read) { - ab3100->startup_events = fatevent; + ab3100->startup_events[0] = event_regs[0]; + ab3100->startup_events[1] = event_regs[1]; + ab3100->startup_events[2] = event_regs[2]; ab3100->startup_events_read = true; } /* @@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100) dev_warn(ab3100->dev, "AB3100 P1E variant detected, " "forcing chip to 32KHz\n"); - err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); + err = ab3100_set_test_register_interruptible(ab3100, + 0x02, 0x08); } exit_no_setup: @@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client, if (err) goto exit_no_irq; + err = abx500_register_ops(&client->dev, &ab3100_ops); + if (err) + goto exit_no_ops; + /* Set parent and a pointer back to the container in device data */ for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) { ab3100_platform_devs[i]->dev.parent = @@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client, return 0; + exit_no_ops: exit_no_irq: exit_no_setup: i2c_unregister_device(ab3100->testreg_client); exit_no_testreg_client: exit_no_detect: + i2c_set_clientdata(client, NULL); kfree(ab3100); return err; } @@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client) * their notifiers so deactivate IRQ */ free_irq(client->irq, ab3100); + i2c_set_clientdata(client, NULL); kfree(ab3100); return 0; } diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c index 2d14655fdebd..63d2b727ddbb 100644 --- a/drivers/mfd/ab3100-otp.c +++ b/drivers/mfd/ab3100-otp.c @@ -12,7 +12,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/mfd/ab3100.h> +#include <linux/mfd/abx500.h> #include <linux/debugfs.h> #include <linux/seq_file.h> @@ -30,7 +30,6 @@ /** * struct ab3100_otp * @dev containing device - * @ab3100 a pointer to the parent ab3100 device struct * @locked whether the OTP is locked, after locking, no more bits * can be changed but before locking it is still possible * to change bits from 1->0. @@ -49,7 +48,6 @@ */ struct ab3100_otp { struct device *dev; - struct ab3100 *ab3100; bool locked; u32 freq; bool paf; @@ -63,19 +61,19 @@ struct ab3100_otp { static int __init ab3100_otp_read(struct ab3100_otp *otp) { - struct ab3100 *ab = otp->ab3100; u8 otpval[8]; u8 otpp; int err; - err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp); + err = abx500_get_register_interruptible(otp->dev, 0, + AB3100_OTPP, &otpp); if (err) { dev_err(otp->dev, "unable to read OTPP register\n"); return err; } - err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0, - otpval, 8); + err = abx500_get_register_page_interruptible(otp->dev, 0, + AB3100_OTP0, otpval, 8); if (err) { dev_err(otp->dev, "unable to read OTP register page\n"); return err; @@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev) otp->dev = &pdev->dev; /* Replace platform data coming in with a local struct */ - otp->ab3100 = platform_get_drvdata(pdev); platform_set_drvdata(pdev, otp); err = ab3100_otp_read(otp); diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c new file mode 100644 index 000000000000..1060f8e1c40a --- /dev/null +++ b/drivers/mfd/ab3550-core.c @@ -0,0 +1,1401 @@ +/* + * Copyright (C) 2007-2010 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + * Low-level core for exclusive access to the AB3550 IC on the I2C bus + * and some basic chip-configuration. + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> + * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> + * Author: Mattias Wallin <mattias.wallin@stericsson.com> + * Author: Rickard Andersson <rickard.andersson@stericsson.com> + */ + +#include <linux/i2c.h> +#include <linux/mutex.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/random.h> +#include <linux/workqueue.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/mfd/abx500.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/spinlock.h> +#include <linux/mfd/core.h> + +#define AB3550_NAME_STRING "ab3550" +#define AB3550_ID_FORMAT_STRING "AB3550 %s" +#define AB3550_NUM_BANKS 2 +#define AB3550_NUM_EVENT_REG 5 + +/* These are the only registers inside AB3550 used in this main file */ + +/* Chip ID register */ +#define AB3550_CID_REG 0x20 + +/* Interrupt event registers */ +#define AB3550_EVENT_BANK 0 +#define AB3550_EVENT_REG 0x22 + +/* Read/write operation values. */ +#define AB3550_PERM_RD (0x01) +#define AB3550_PERM_WR (0x02) + +/* Read/write permissions. */ +#define AB3550_PERM_RO (AB3550_PERM_RD) +#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR) + +/** + * struct ab3550 + * @access_mutex: lock out concurrent accesses to the AB registers + * @i2c_client: I2C client for this chip + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @mask_work: a worker for writing to mask registers + * @event_lock: a lock to protect the event_mask + * @event_mask: a local copy of the mask event registers + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + */ +struct ab3550 { + struct mutex access_mutex; + struct i2c_client *i2c_client[AB3550_NUM_BANKS]; + char chip_name[32]; + u8 chip_id; + struct work_struct mask_work; + spinlock_t event_lock; + u8 event_mask[AB3550_NUM_EVENT_REG]; + u8 startup_events[AB3550_NUM_EVENT_REG]; + bool startup_events_read; +#ifdef CONFIG_DEBUG_FS + unsigned int debug_bank; + unsigned int debug_address; +#endif +}; + +/** + * struct ab3550_reg_range + * @first: the first address of the range + * @last: the last address of the range + * @perm: access permissions for the range + */ +struct ab3550_reg_range { + u8 first; + u8 last; + u8 perm; +}; + +/** + * struct ab3550_reg_ranges + * @count: the number of ranges in the list + * @range: the list of register ranges + */ +struct ab3550_reg_ranges { + u8 count; + const struct ab3550_reg_range *range; +}; + +/* + * Permissible register ranges for reading and writing per device and bank. + * + * The ranges must be listed in increasing address order, and no overlaps are + * allowed. It is assumed that write permission implies read permission + * (i.e. only RO and RW permissions should be used). Ranges with write + * permission must not be split up. + */ + +#define NO_RANGE {.count = 0, .range = NULL,} + +static struct +ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = { + [AB3550_DEVID_DAC] = { + NO_RANGE, + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0xb0, + .last = 0xba, + .perm = AB3550_PERM_RW, + }, + { + .first = 0xbc, + .last = 0xc3, + .perm = AB3550_PERM_RW, + }, + }, + }, + }, + [AB3550_DEVID_LEDS] = { + NO_RANGE, + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x5a, + .last = 0x88, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x8a, + .last = 0xad, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_POWER] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_REGULATORS] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x69, + .last = 0xa3, + .perm = AB3550_PERM_RW, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x14, + .last = 0x16, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_SIM] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x14, + .last = 0x17, + .perm = AB3550_PERM_RW, + }, + } + + }, + }, + [AB3550_DEVID_UART] = { + NO_RANGE, + NO_RANGE, + }, + [AB3550_DEVID_RTC] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0c, + .perm = AB3550_PERM_RW, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_CHARGER] = { + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x10, + .last = 0x1a, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_ADC] = { + NO_RANGE, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x20, + .last = 0x56, + .perm = AB3550_PERM_RW, + }, + + } + }, + }, + [AB3550_DEVID_FUELGAUGE] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_VIBRATOR] = { + NO_RANGE, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x10, + .last = 0x13, + .perm = AB3550_PERM_RW, + }, + + } + }, + }, + [AB3550_DEVID_CODEC] = { + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x31, + .last = 0x63, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x65, + .last = 0x68, + .perm = AB3550_PERM_RW, + }, + } + }, + NO_RANGE, + }, +}; + +static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = { + [AB3550_DEVID_DAC] = { + .name = "ab3550-dac", + .id = AB3550_DEVID_DAC, + .num_resources = 0, + }, + [AB3550_DEVID_LEDS] = { + .name = "ab3550-leds", + .id = AB3550_DEVID_LEDS, + }, + [AB3550_DEVID_POWER] = { + .name = "ab3550-power", + .id = AB3550_DEVID_POWER, + }, + [AB3550_DEVID_REGULATORS] = { + .name = "ab3550-regulators", + .id = AB3550_DEVID_REGULATORS, + }, + [AB3550_DEVID_SIM] = { + .name = "ab3550-sim", + .id = AB3550_DEVID_SIM, + }, + [AB3550_DEVID_UART] = { + .name = "ab3550-uart", + .id = AB3550_DEVID_UART, + }, + [AB3550_DEVID_RTC] = { + .name = "ab3550-rtc", + .id = AB3550_DEVID_RTC, + }, + [AB3550_DEVID_CHARGER] = { + .name = "ab3550-charger", + .id = AB3550_DEVID_CHARGER, + }, + [AB3550_DEVID_ADC] = { + .name = "ab3550-adc", + .id = AB3550_DEVID_ADC, + .num_resources = 10, + .resources = (struct resource[]) { + { + .name = "TRIGGER-0", + .flags = IORESOURCE_IRQ, + .start = 16, + .end = 16, + }, + { + .name = "TRIGGER-1", + .flags = IORESOURCE_IRQ, + .start = 17, + .end = 17, + }, + { + .name = "TRIGGER-2", + .flags = IORESOURCE_IRQ, + .start = 18, + .end = 18, + }, + { + .name = "TRIGGER-3", + .flags = IORESOURCE_IRQ, + .start = 19, + .end = 19, + }, + { + .name = "TRIGGER-4", + .flags = IORESOURCE_IRQ, + .start = 20, + .end = 20, + }, + { + .name = "TRIGGER-5", + .flags = IORESOURCE_IRQ, + .start = 21, + .end = 21, + }, + { + .name = "TRIGGER-6", + .flags = IORESOURCE_IRQ, + .start = 22, + .end = 22, + }, + { + .name = "TRIGGER-7", + .flags = IORESOURCE_IRQ, + .start = 23, + .end = 23, + }, + { + .name = "TRIGGER-VBAT-TXON", + .flags = IORESOURCE_IRQ, + .start = 13, + .end = 13, + }, + { + .name = "TRIGGER-VBAT", + .flags = IORESOURCE_IRQ, + .start = 12, + .end = 12, + }, + }, + }, + [AB3550_DEVID_FUELGAUGE] = { + .name = "ab3550-fuelgauge", + .id = AB3550_DEVID_FUELGAUGE, + }, + [AB3550_DEVID_VIBRATOR] = { + .name = "ab3550-vibrator", + .id = AB3550_DEVID_VIBRATOR, + }, + [AB3550_DEVID_CODEC] = { + .name = "ab3550-codec", + .id = AB3550_DEVID_CODEC, + }, +}; + +/* + * I2C transactions with error messages. + */ +static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data, + u8 count) +{ + int err; + + err = i2c_master_send(ab->i2c_client[bank], data, count); + if (err < 0) { + dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err); + return err; + } + return 0; +} + +static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data, + u8 count) +{ + int err; + + err = i2c_master_recv(ab->i2c_client[bank], data, count); + if (err < 0) { + dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err); + return err; + } + return 0; +} + +/* + * Functionality for getting/setting register values. + */ +static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg, + u8 *value) +{ + int err; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + err = ab3550_i2c_master_send(ab, bank, ®, 1); + if (!err) + err = ab3550_i2c_master_recv(ab, bank, value, 1); + + mutex_unlock(&ab->access_mutex); + return err; +} + +static int get_register_page_interruptible(struct ab3550 *ab, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + int err; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + err = ab3550_i2c_master_send(ab, bank, &first_reg, 1); + if (!err) + err = ab3550_i2c_master_recv(ab, bank, regvals, numregs); + + mutex_unlock(&ab->access_mutex); + return err; +} + +static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + int err = 0; + + if (likely(bitmask)) { + u8 reg_bits[2] = {reg, 0}; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + if (bitmask == 0xFF) /* No need to read in this case. */ + reg_bits[1] = bitvalues; + else { /* Read and modify the register value. */ + u8 bits; + + err = ab3550_i2c_master_send(ab, bank, ®, 1); + if (err) + goto unlock_and_return; + err = ab3550_i2c_master_recv(ab, bank, &bits, 1); + if (err) + goto unlock_and_return; + reg_bits[1] = ((~bitmask & bits) | + (bitmask & bitvalues)); + } + /* Write the new value. */ + err = ab3550_i2c_master_send(ab, bank, reg_bits, 2); +unlock_and_return: + mutex_unlock(&ab->access_mutex); + } + return err; +} + +/* + * Read/write permission checking functions. + */ +static bool page_write_allowed(const struct ab3550_reg_ranges *ranges, + u8 first_reg, u8 last_reg) +{ + u8 i; + + if (last_reg < first_reg) + return false; + + for (i = 0; i < ranges->count; i++) { + if (first_reg < ranges->range[i].first) + break; + if ((last_reg <= ranges->range[i].last) && + (ranges->range[i].perm & AB3550_PERM_WR)) + return true; + } + return false; +} + +static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg) +{ + return page_write_allowed(ranges, reg, reg); +} + +static bool page_read_allowed(const struct ab3550_reg_ranges *ranges, + u8 first_reg, u8 last_reg) +{ + u8 i; + + if (last_reg < first_reg) + return false; + /* Find the range (if it exists in the list) that includes first_reg. */ + for (i = 0; i < ranges->count; i++) { + if (first_reg < ranges->range[i].first) + return false; + if (first_reg <= ranges->range[i].last) + break; + } + /* Make sure that the entire range up to and including last_reg is + * readable. This may span several of the ranges in the list. + */ + while ((i < ranges->count) && + (ranges->range[i].perm & AB3550_PERM_RD)) { + if (last_reg <= ranges->range[i].last) + return true; + if ((++i >= ranges->count) || + (ranges->range[i].first != + (ranges->range[i - 1].last + 1))) { + break; + } + } + return false; +} + +static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg) +{ + return page_read_allowed(ranges, reg, reg); +} + +/* + * The exported register access functionality. + */ +int ab3550_get_chip_id(struct device *dev) +{ + struct ab3550 *ab = dev_get_drvdata(dev->parent); + return (int)ab->chip_id; +} + +int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg)) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return mask_and_set_register_interruptible(ab, bank, reg, + bitmask, bitvalues); +} + +int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value) +{ + return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF, + value); +} + +int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg)) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return get_register_interruptible(ab, bank, reg, value); +} + +int ab3550_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank], + first_reg, (first_reg + numregs - 1))) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return get_register_page_interruptible(ab, bank, first_reg, regvals, + numregs); +} + +int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event) +{ + struct ab3550 *ab; + + ab = dev_get_drvdata(dev->parent); + if (!ab->startup_events_read) + return -EAGAIN; /* Try again later */ + + memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG); + return 0; +} + +int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq) +{ + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + bool val; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); + + return val; +} + +static struct abx500_ops ab3550_ops = { + .get_chip_id = ab3550_get_chip_id, + .get_register = ab3550_get_register_interruptible, + .set_register = ab3550_set_register_interruptible, + .get_register_page = ab3550_get_register_page_interruptible, + .set_register_page = NULL, + .mask_and_set_register = ab3550_mask_and_set_register_interruptible, + .event_registers_startup_state_get = + ab3550_event_registers_startup_state_get, + .startup_irq_enabled = ab3550_startup_irq_enabled, +}; + +static irqreturn_t ab3550_irq_handler(int irq, void *data) +{ + struct ab3550 *ab = data; + int err; + unsigned int i; + u8 e[AB3550_NUM_EVENT_REG]; + u8 *events; + unsigned long flags; + + events = (ab->startup_events_read ? e : ab->startup_events); + + err = get_register_page_interruptible(ab, AB3550_EVENT_BANK, + AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG); + if (err) + goto err_event_rd; + + if (!ab->startup_events_read) { + dev_info(&ab->i2c_client[0]->dev, + "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n", + ab->startup_events[0], ab->startup_events[1], + ab->startup_events[2], ab->startup_events[3], + ab->startup_events[4]); + ab->startup_events_read = true; + goto out; + } + + /* The two highest bits in event[4] are not used. */ + events[4] &= 0x3f; + + spin_lock_irqsave(&ab->event_lock, flags); + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) + events[i] &= ~ab->event_mask[i]; + spin_unlock_irqrestore(&ab->event_lock, flags); + + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) { + u8 bit; + u8 event_reg; + + dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n", + i, events[i]); + + event_reg = events[i]; + for (bit = 0; event_reg; bit++, event_reg /= 2) { + if (event_reg % 2) { + unsigned int irq; + struct ab3550_platform_data *plf_data; + + plf_data = ab->i2c_client[0]->dev.platform_data; + irq = plf_data->irq.base + (i * 8) + bit; + handle_nested_irq(irq); + } + } + } +out: + return IRQ_HANDLED; + +err_event_rd: + dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n"); + return IRQ_HANDLED; +} + +#ifdef CONFIG_DEBUG_FS +static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = { + { + .count = 6, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + }, + { + .first = 0x10, + .last = 0x1a, + }, + { + .first = 0x1e, + .last = 0x4f, + }, + { + .first = 0x51, + .last = 0x63, + }, + { + .first = 0x65, + .last = 0xa3, + }, + { + .first = 0xa5, + .last = 0xa8, + }, + } + }, + { + .count = 8, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + }, + { + .first = 0x10, + .last = 0x17, + }, + { + .first = 0x1a, + .last = 0x1c, + }, + { + .first = 0x20, + .last = 0x56, + }, + { + .first = 0x5a, + .last = 0x88, + }, + { + .first = 0x8a, + .last = 0xad, + }, + { + .first = 0xb0, + .last = 0xba, + }, + { + .first = 0xbc, + .last = 0xc3, + }, + } + }, +}; + +static int ab3550_registers_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + int bank; + + seq_printf(s, AB3550_NAME_STRING " register values:\n"); + + for (bank = 0; bank < AB3550_NUM_BANKS; bank++) { + unsigned int i; + + seq_printf(s, " bank %d:\n", bank); + for (i = 0; i < debug_ranges[bank].count; i++) { + u8 reg; + + for (reg = debug_ranges[bank].range[i].first; + reg <= debug_ranges[bank].range[i].last; + reg++) { + u8 value; + + get_register_interruptible(ab, bank, reg, + &value); + seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank, + reg, value); + } + } + } + return 0; +} + +static int ab3550_registers_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_registers_print, inode->i_private); +} + +static const struct file_operations ab3550_registers_fops = { + .open = ab3550_registers_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab3550_bank_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + + seq_printf(s, "%d\n", ab->debug_bank); + return 0; +} + +static int ab3550_bank_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_bank_print, inode->i_private); +} + +static ssize_t ab3550_bank_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_bank; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_bank); + if (err) + return -EINVAL; + + if (user_bank >= AB3550_NUM_BANKS) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > number of banks\n"); + return -EINVAL; + } + + ab->debug_bank = user_bank; + + return buf_size; +} + +static int ab3550_address_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + + seq_printf(s, "0x%02X\n", ab->debug_address); + return 0; +} + +static int ab3550_address_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_address_print, inode->i_private); +} + +static ssize_t ab3550_address_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_address; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_address); + if (err) + return -EINVAL; + if (user_address > 0xff) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > 0xff\n"); + return -EINVAL; + } + ab->debug_address = user_address; + return buf_size; +} + +static int ab3550_val_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + int err; + u8 regvalue; + + err = get_register_interruptible(ab, (u8)ab->debug_bank, + (u8)ab->debug_address, ®value); + if (err) + return -EINVAL; + seq_printf(s, "0x%02X\n", regvalue); + + return 0; +} + +static int ab3550_val_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_val_print, inode->i_private); +} + +static ssize_t ab3550_val_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + u8 regvalue; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + if (user_val > 0xff) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > 0xff\n"); + return -EINVAL; + } + err = mask_and_set_register_interruptible( + ab, (u8)ab->debug_bank, + (u8)ab->debug_address, 0xFF, (u8)user_val); + if (err) + return -EINVAL; + + get_register_interruptible(ab, (u8)ab->debug_bank, + (u8)ab->debug_address, ®value); + if (err) + return -EINVAL; + + return buf_size; +} + +static const struct file_operations ab3550_bank_fops = { + .open = ab3550_bank_open, + .write = ab3550_bank_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab3550_address_fops = { + .open = ab3550_address_open, + .write = ab3550_address_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab3550_val_fops = { + .open = ab3550_val_open, + .write = ab3550_val_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct dentry *ab3550_dir; +static struct dentry *ab3550_reg_file; +static struct dentry *ab3550_bank_file; +static struct dentry *ab3550_address_file; +static struct dentry *ab3550_val_file; + +static inline void ab3550_setup_debugfs(struct ab3550 *ab) +{ + ab->debug_bank = 0; + ab->debug_address = 0x00; + + ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL); + if (!ab3550_dir) + goto exit_no_debugfs; + + ab3550_reg_file = debugfs_create_file("all-registers", + S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops); + if (!ab3550_reg_file) + goto exit_destroy_dir; + + ab3550_bank_file = debugfs_create_file("register-bank", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops); + if (!ab3550_bank_file) + goto exit_destroy_reg; + + ab3550_address_file = debugfs_create_file("register-address", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops); + if (!ab3550_address_file) + goto exit_destroy_bank; + + ab3550_val_file = debugfs_create_file("register-value", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops); + if (!ab3550_val_file) + goto exit_destroy_address; + + return; + +exit_destroy_address: + debugfs_remove(ab3550_address_file); +exit_destroy_bank: + debugfs_remove(ab3550_bank_file); +exit_destroy_reg: + debugfs_remove(ab3550_reg_file); +exit_destroy_dir: + debugfs_remove(ab3550_dir); +exit_no_debugfs: + dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n"); + return; +} + +static inline void ab3550_remove_debugfs(void) +{ + debugfs_remove(ab3550_val_file); + debugfs_remove(ab3550_address_file); + debugfs_remove(ab3550_bank_file); + debugfs_remove(ab3550_reg_file); + debugfs_remove(ab3550_dir); +} + +#else /* !CONFIG_DEBUG_FS */ +static inline void ab3550_setup_debugfs(struct ab3550 *ab) +{ +} +static inline void ab3550_remove_debugfs(void) +{ +} +#endif + +/* + * Basic set-up, datastructure creation/destruction and I2C interface. + * This sets up a default config in the AB3550 chip so that it + * will work as expected. + */ +static int __init ab3550_setup(struct ab3550 *ab) +{ + int err = 0; + int i; + struct ab3550_platform_data *plf_data; + struct abx500_init_settings *settings; + + plf_data = ab->i2c_client[0]->dev.platform_data; + settings = plf_data->init_settings; + + for (i = 0; i < plf_data->init_settings_sz; i++) { + err = mask_and_set_register_interruptible(ab, + settings[i].bank, + settings[i].reg, + 0xFF, settings[i].setting); + if (err) + goto exit_no_setup; + + /* If event mask register update the event mask in ab3550 */ + if ((settings[i].bank == 0) && + (AB3550_IMR1 <= settings[i].reg) && + (settings[i].reg <= AB3550_IMR5)) { + ab->event_mask[settings[i].reg - AB3550_IMR1] = + settings[i].setting; + } + } +exit_no_setup: + return err; +} + +static void ab3550_mask_work(struct work_struct *work) +{ + struct ab3550 *ab = container_of(work, struct ab3550, mask_work); + int i; + unsigned long flags; + u8 mask[AB3550_NUM_EVENT_REG]; + + spin_lock_irqsave(&ab->event_lock, flags); + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) + mask[i] = ab->event_mask[i]; + spin_unlock_irqrestore(&ab->event_lock, flags); + + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) { + int err; + + err = mask_and_set_register_interruptible(ab, 0, + (AB3550_IMR1 + i), ~0, mask[i]); + if (err) + dev_err(&ab->i2c_client[0]->dev, + "ab3550_mask_work failed 0x%x,0x%x\n", + (AB3550_IMR1 + i), mask[i]); + } +} + +static void ab3550_mask(unsigned int irq) +{ + unsigned long flags; + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + + spin_lock_irqsave(&ab->event_lock, flags); + ab->event_mask[irq / 8] |= BIT(irq % 8); + spin_unlock_irqrestore(&ab->event_lock, flags); + + schedule_work(&ab->mask_work); +} + +static void ab3550_unmask(unsigned int irq) +{ + unsigned long flags; + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + + spin_lock_irqsave(&ab->event_lock, flags); + ab->event_mask[irq / 8] &= ~BIT(irq % 8); + spin_unlock_irqrestore(&ab->event_lock, flags); + + schedule_work(&ab->mask_work); +} + +static void noop(unsigned int irq) +{ +} + +static struct irq_chip ab3550_irq_chip = { + .name = "ab3550-core", /* Keep the same name as the request */ + .startup = NULL, /* defaults to enable */ + .shutdown = NULL, /* defaults to disable */ + .enable = NULL, /* defaults to unmask */ + .disable = ab3550_mask, /* No default to mask in chip.c */ + .ack = noop, + .mask = ab3550_mask, + .unmask = ab3550_unmask, + .end = NULL, +}; + +struct ab_family_id { + u8 id; + char *name; +}; + +static const struct ab_family_id ids[] __initdata = { + /* AB3550 */ + { + .id = AB3550_P1A, + .name = "P1A" + }, + /* Terminator */ + { + .id = 0x00, + } +}; + +static int __init ab3550_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ab3550 *ab; + struct ab3550_platform_data *ab3550_plf_data = + client->dev.platform_data; + int err; + int i; + int num_i2c_clients = 0; + + ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL); + if (!ab) { + dev_err(&client->dev, + "could not allocate " AB3550_NAME_STRING " device\n"); + return -ENOMEM; + } + + /* Initialize data structure */ + mutex_init(&ab->access_mutex); + spin_lock_init(&ab->event_lock); + ab->i2c_client[0] = client; + + i2c_set_clientdata(client, ab); + + /* Read chip ID register */ + err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id); + if (err) { + dev_err(&client->dev, "could not communicate with the analog " + "baseband chip\n"); + goto exit_no_detect; + } + + for (i = 0; ids[i].id != 0x0; i++) { + if (ids[i].id == ab->chip_id) { + snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1, + AB3550_ID_FORMAT_STRING, ids[i].name); + break; + } + } + + if (ids[i].id == 0x0) { + dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", + ab->chip_id); + dev_err(&client->dev, "driver not started!\n"); + goto exit_no_detect; + } + + dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]); + + /* Attach other dummy I2C clients. */ + while (++num_i2c_clients < AB3550_NUM_BANKS) { + ab->i2c_client[num_i2c_clients] = + i2c_new_dummy(client->adapter, + (client->addr + num_i2c_clients)); + if (!ab->i2c_client[num_i2c_clients]) { + err = -ENOMEM; + goto exit_no_dummy_client; + } + strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name, + sizeof(ab->i2c_client[num_i2c_clients]->name)); + } + + err = ab3550_setup(ab); + if (err) + goto exit_no_setup; + + INIT_WORK(&ab->mask_work, ab3550_mask_work); + + for (i = 0; i < ab3550_plf_data->irq.count; i++) { + unsigned int irq; + + irq = ab3550_plf_data->irq.base + i; + set_irq_chip_data(irq, ab); + set_irq_chip_and_handler(irq, &ab3550_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler, + IRQF_ONESHOT, "ab3550-core", ab); + /* This real unpredictable IRQ is of course sampled for entropy */ + rand_initialize_irq(client->irq); + + if (err) + goto exit_no_irq; + + err = abx500_register_ops(&client->dev, &ab3550_ops); + if (err) + goto exit_no_ops; + + /* Set up and register the platform devices. */ + for (i = 0; i < AB3550_NUM_DEVICES; i++) { + ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i]; + ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i]; + } + + err = mfd_add_devices(&client->dev, 0, ab3550_devs, + ARRAY_SIZE(ab3550_devs), NULL, + ab3550_plf_data->irq.base); + + ab3550_setup_debugfs(ab); + + return 0; + +exit_no_ops: +exit_no_irq: +exit_no_setup: +exit_no_dummy_client: + /* Unregister the dummy i2c clients. */ + while (--num_i2c_clients) + i2c_unregister_device(ab->i2c_client[num_i2c_clients]); +exit_no_detect: + kfree(ab); + return err; +} + +static int __exit ab3550_remove(struct i2c_client *client) +{ + struct ab3550 *ab = i2c_get_clientdata(client); + int num_i2c_clients = AB3550_NUM_BANKS; + + mfd_remove_devices(&client->dev); + ab3550_remove_debugfs(); + + while (--num_i2c_clients) + i2c_unregister_device(ab->i2c_client[num_i2c_clients]); + + /* + * At this point, all subscribers should have unregistered + * their notifiers so deactivate IRQ + */ + free_irq(client->irq, ab); + i2c_set_clientdata(client, NULL); + kfree(ab); + return 0; +} + +static const struct i2c_device_id ab3550_id[] = { + {AB3550_NAME_STRING, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ab3550_id); + +static struct i2c_driver ab3550_driver = { + .driver = { + .name = AB3550_NAME_STRING, + .owner = THIS_MODULE, + }, + .id_table = ab3550_id, + .probe = ab3550_probe, + .remove = __exit_p(ab3550_remove), +}; + +static int __init ab3550_i2c_init(void) +{ + return i2c_add_driver(&ab3550_driver); +} + +static void __exit ab3550_i2c_exit(void) +{ + i2c_del_driver(&ab3550_driver); +} + +subsys_initcall(ab3550_i2c_init); +module_exit(ab3550_i2c_exit); + +MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>"); +MODULE_DESCRIPTION("AB3550 core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c deleted file mode 100644 index c275daa3ab1a..000000000000 --- a/drivers/mfd/ab4500-core.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (C) 2009 ST-Ericsson - * - * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com> - * - * This program is free software; you can redistribute it - * and/or modify it under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation. - * - * AB4500 is a companion power management chip used with U8500. - * On this platform, this is interfaced with SSP0 controller - * which is a ARM primecell pl022. - * - * At the moment the module just exports read/write features. - * Interrupt management to be added - TODO. - */ -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/spi/spi.h> -#include <linux/mfd/ab4500.h> - -/* just required if probe fails, we need to - * unregister the device - */ -static struct spi_driver ab4500_driver; - -/* - * This funtion writes to any AB4500 registers using - * SPI protocol & before it writes it packs the data - * in the below 24 bit frame format - * - * *|------------------------------------| - * *| 23|22...18|17.......10|9|8|7......0| - * *| r/w bank adr data | - * * ------------------------------------ - * - * This function shouldn't be called from interrupt - * context - */ -int ab4500_write(struct ab4500 *ab4500, unsigned char block, - unsigned long addr, unsigned char data) -{ - struct spi_transfer xfer; - struct spi_message msg; - int err; - unsigned long spi_data = - block << 18 | addr << 10 | data; - - mutex_lock(&ab4500->lock); - ab4500->tx_buf[0] = spi_data; - ab4500->rx_buf[0] = 0; - - xfer.tx_buf = ab4500->tx_buf; - xfer.rx_buf = NULL; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - err = spi_sync(ab4500->spi, &msg); - mutex_unlock(&ab4500->lock); - - return err; -} -EXPORT_SYMBOL(ab4500_write); - -int ab4500_read(struct ab4500 *ab4500, unsigned char block, - unsigned long addr) -{ - struct spi_transfer xfer; - struct spi_message msg; - unsigned long spi_data = - 1 << 23 | block << 18 | addr << 10; - - mutex_lock(&ab4500->lock); - ab4500->tx_buf[0] = spi_data; - ab4500->rx_buf[0] = 0; - - xfer.tx_buf = ab4500->tx_buf; - xfer.rx_buf = ab4500->rx_buf; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - spi_sync(ab4500->spi, &msg); - mutex_unlock(&ab4500->lock); - - return ab4500->rx_buf[0]; -} -EXPORT_SYMBOL(ab4500_read); - -/* ref: ab3100 core */ -#define AB4500_DEVICE(devname, devid) \ -static struct platform_device ab4500_##devname##_device = { \ - .name = devid, \ - .id = -1, \ -} - -/* list of childern devices of ab4500 - all are - * not populated here - TODO - */ -AB4500_DEVICE(charger, "ab4500-charger"); -AB4500_DEVICE(audio, "ab4500-audio"); -AB4500_DEVICE(usb, "ab4500-usb"); -AB4500_DEVICE(tvout, "ab4500-tvout"); -AB4500_DEVICE(sim, "ab4500-sim"); -AB4500_DEVICE(gpadc, "ab4500-gpadc"); -AB4500_DEVICE(clkmgt, "ab4500-clkmgt"); -AB4500_DEVICE(misc, "ab4500-misc"); - -static struct platform_device *ab4500_platform_devs[] = { - &ab4500_charger_device, - &ab4500_audio_device, - &ab4500_usb_device, - &ab4500_tvout_device, - &ab4500_sim_device, - &ab4500_gpadc_device, - &ab4500_clkmgt_device, - &ab4500_misc_device, -}; - -static int __init ab4500_probe(struct spi_device *spi) -{ - struct ab4500 *ab4500; - unsigned char revision; - int err = 0; - int i; - - ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL); - if (!ab4500) { - dev_err(&spi->dev, "could not allocate AB4500\n"); - err = -ENOMEM; - goto not_detect; - } - - ab4500->spi = spi; - spi_set_drvdata(spi, ab4500); - - mutex_init(&ab4500->lock); - - /* read the revision register */ - revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG); - - /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */ - if (revision == 0x0 || revision == 0x10) - dev_info(&spi->dev, "Detected chip: %s, revision = %x\n", - ab4500_driver.driver.name, revision); - else { - dev_err(&spi->dev, "unknown chip: 0x%x\n", revision); - goto not_detect; - } - - for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) { - ab4500_platform_devs[i]->dev.parent = - &spi->dev; - platform_set_drvdata(ab4500_platform_devs[i], ab4500); - } - - /* register the ab4500 platform devices */ - platform_add_devices(ab4500_platform_devs, - ARRAY_SIZE(ab4500_platform_devs)); - - return err; - - not_detect: - spi_unregister_driver(&ab4500_driver); - kfree(ab4500); - return err; -} - -static int __devexit ab4500_remove(struct spi_device *spi) -{ - struct ab4500 *ab4500 = - spi_get_drvdata(spi); - - kfree(ab4500); - - return 0; -} - -static struct spi_driver ab4500_driver = { - .driver = { - .name = "ab4500", - .owner = THIS_MODULE, - }, - .probe = ab4500_probe, - .remove = __devexit_p(ab4500_remove) -}; - -static int __devinit ab4500_init(void) -{ - return spi_register_driver(&ab4500_driver); -} - -static void __exit ab4500_exit(void) -{ - spi_unregister_driver(&ab4500_driver); -} - -subsys_initcall(ab4500_init); -module_exit(ab4500_exit); - -MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com"); -MODULE_DESCRIPTION("AB4500 core driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c new file mode 100644 index 000000000000..f3d26fa9c34d --- /dev/null +++ b/drivers/mfd/ab8500-core.c @@ -0,0 +1,444 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> + * Author: Rabin Vincent <rabin.vincent@stericsson.com> + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/mfd/ab8500.h> + +/* + * Interrupt register offsets + * Bank : 0x0E + */ +#define AB8500_IT_SOURCE1_REG 0x0E00 +#define AB8500_IT_SOURCE2_REG 0x0E01 +#define AB8500_IT_SOURCE3_REG 0x0E02 +#define AB8500_IT_SOURCE4_REG 0x0E03 +#define AB8500_IT_SOURCE5_REG 0x0E04 +#define AB8500_IT_SOURCE6_REG 0x0E05 +#define AB8500_IT_SOURCE7_REG 0x0E06 +#define AB8500_IT_SOURCE8_REG 0x0E07 +#define AB8500_IT_SOURCE19_REG 0x0E12 +#define AB8500_IT_SOURCE20_REG 0x0E13 +#define AB8500_IT_SOURCE21_REG 0x0E14 +#define AB8500_IT_SOURCE22_REG 0x0E15 +#define AB8500_IT_SOURCE23_REG 0x0E16 +#define AB8500_IT_SOURCE24_REG 0x0E17 + +/* + * latch registers + */ +#define AB8500_IT_LATCH1_REG 0x0E20 +#define AB8500_IT_LATCH2_REG 0x0E21 +#define AB8500_IT_LATCH3_REG 0x0E22 +#define AB8500_IT_LATCH4_REG 0x0E23 +#define AB8500_IT_LATCH5_REG 0x0E24 +#define AB8500_IT_LATCH6_REG 0x0E25 +#define AB8500_IT_LATCH7_REG 0x0E26 +#define AB8500_IT_LATCH8_REG 0x0E27 +#define AB8500_IT_LATCH9_REG 0x0E28 +#define AB8500_IT_LATCH10_REG 0x0E29 +#define AB8500_IT_LATCH19_REG 0x0E32 +#define AB8500_IT_LATCH20_REG 0x0E33 +#define AB8500_IT_LATCH21_REG 0x0E34 +#define AB8500_IT_LATCH22_REG 0x0E35 +#define AB8500_IT_LATCH23_REG 0x0E36 +#define AB8500_IT_LATCH24_REG 0x0E37 + +/* + * mask registers + */ + +#define AB8500_IT_MASK1_REG 0x0E40 +#define AB8500_IT_MASK2_REG 0x0E41 +#define AB8500_IT_MASK3_REG 0x0E42 +#define AB8500_IT_MASK4_REG 0x0E43 +#define AB8500_IT_MASK5_REG 0x0E44 +#define AB8500_IT_MASK6_REG 0x0E45 +#define AB8500_IT_MASK7_REG 0x0E46 +#define AB8500_IT_MASK8_REG 0x0E47 +#define AB8500_IT_MASK9_REG 0x0E48 +#define AB8500_IT_MASK10_REG 0x0E49 +#define AB8500_IT_MASK11_REG 0x0E4A +#define AB8500_IT_MASK12_REG 0x0E4B +#define AB8500_IT_MASK13_REG 0x0E4C +#define AB8500_IT_MASK14_REG 0x0E4D +#define AB8500_IT_MASK15_REG 0x0E4E +#define AB8500_IT_MASK16_REG 0x0E4F +#define AB8500_IT_MASK17_REG 0x0E50 +#define AB8500_IT_MASK18_REG 0x0E51 +#define AB8500_IT_MASK19_REG 0x0E52 +#define AB8500_IT_MASK20_REG 0x0E53 +#define AB8500_IT_MASK21_REG 0x0E54 +#define AB8500_IT_MASK22_REG 0x0E55 +#define AB8500_IT_MASK23_REG 0x0E56 +#define AB8500_IT_MASK24_REG 0x0E57 + +#define AB8500_REV_REG 0x1080 + +/* + * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt + * numbers are indexed into this array with (num / 8). + * + * This is one off from the register names, i.e. AB8500_IT_MASK1_REG is at + * offset 0. + */ +static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = { + 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21, +}; + +static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + int ret; + + dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data); + + ret = ab8500->write(ab8500, addr, data); + if (ret < 0) + dev_err(ab8500->dev, "failed to write reg %#x: %d\n", + addr, ret); + + return ret; +} + +/** + * ab8500_write() - write an AB8500 register + * @ab8500: device to write to + * @addr: address of the register + * @data: value to write + */ +int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + int ret; + + mutex_lock(&ab8500->lock); + ret = __ab8500_write(ab8500, addr, data); + mutex_unlock(&ab8500->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_write); + +static int __ab8500_read(struct ab8500 *ab8500, u16 addr) +{ + int ret; + + ret = ab8500->read(ab8500, addr); + if (ret < 0) + dev_err(ab8500->dev, "failed to read reg %#x: %d\n", + addr, ret); + + dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); + + return ret; +} + +/** + * ab8500_read() - read an AB8500 register + * @ab8500: device to read from + * @addr: address of the register + */ +int ab8500_read(struct ab8500 *ab8500, u16 addr) +{ + int ret; + + mutex_lock(&ab8500->lock); + ret = __ab8500_read(ab8500, addr); + mutex_unlock(&ab8500->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_read); + +/** + * ab8500_set_bits() - set a bitfield in an AB8500 register + * @ab8500: device to read from + * @addr: address of the register + * @mask: mask of the bitfield to modify + * @data: value to set to the bitfield + */ +int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data) +{ + int ret; + + mutex_lock(&ab8500->lock); + + ret = __ab8500_read(ab8500, addr); + if (ret < 0) + goto out; + + ret &= ~mask; + ret |= data; + + ret = __ab8500_write(ab8500, addr, ret); + +out: + mutex_unlock(&ab8500->lock); + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_set_bits); + +static void ab8500_irq_lock(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + + mutex_lock(&ab8500->irq_lock); +} + +static void ab8500_irq_sync_unlock(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int i; + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { + u8 old = ab8500->oldmask[i]; + u8 new = ab8500->mask[i]; + int reg; + + if (new == old) + continue; + + ab8500->oldmask[i] = new; + + reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i]; + ab8500_write(ab8500, reg, new); + } + + mutex_unlock(&ab8500->irq_lock); +} + +static void ab8500_irq_mask(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int offset = irq - ab8500->irq_base; + int index = offset / 8; + int mask = 1 << (offset % 8); + + ab8500->mask[index] |= mask; +} + +static void ab8500_irq_unmask(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int offset = irq - ab8500->irq_base; + int index = offset / 8; + int mask = 1 << (offset % 8); + + ab8500->mask[index] &= ~mask; +} + +static struct irq_chip ab8500_irq_chip = { + .name = "ab8500", + .bus_lock = ab8500_irq_lock, + .bus_sync_unlock = ab8500_irq_sync_unlock, + .mask = ab8500_irq_mask, + .unmask = ab8500_irq_unmask, +}; + +static irqreturn_t ab8500_irq(int irq, void *dev) +{ + struct ab8500 *ab8500 = dev; + int i; + + dev_vdbg(ab8500->dev, "interrupt\n"); + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { + int regoffset = ab8500_irq_regoffset[i]; + int status; + + status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset); + if (status <= 0) + continue; + + do { + int bit = __ffs(status); + int line = i * 8 + bit; + + handle_nested_irq(ab8500->irq_base + line); + status &= ~(1 << bit); + } while (status); + } + + return IRQ_HANDLED; +} + +static int ab8500_irq_init(struct ab8500 *ab8500) +{ + int base = ab8500->irq_base; + int irq; + + for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { + set_irq_chip_data(irq, ab8500); + set_irq_chip_and_handler(irq, &ab8500_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void ab8500_irq_remove(struct ab8500 *ab8500) +{ + int base = ab8500->irq_base; + int irq; + + for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static struct resource ab8500_gpadc_resources[] = { + { + .name = "HW_CONV_END", + .start = AB8500_INT_GP_HW_ADC_CONV_END, + .end = AB8500_INT_GP_HW_ADC_CONV_END, + .flags = IORESOURCE_IRQ, + }, + { + .name = "SW_CONV_END", + .start = AB8500_INT_GP_SW_ADC_CONV_END, + .end = AB8500_INT_GP_SW_ADC_CONV_END, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource ab8500_rtc_resources[] = { + { + .name = "60S", + .start = AB8500_INT_RTC_60S, + .end = AB8500_INT_RTC_60S, + .flags = IORESOURCE_IRQ, + }, + { + .name = "ALARM", + .start = AB8500_INT_RTC_ALARM, + .end = AB8500_INT_RTC_ALARM, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell ab8500_devs[] = { + { + .name = "ab8500-gpadc", + .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), + .resources = ab8500_gpadc_resources, + }, + { + .name = "ab8500-rtc", + .num_resources = ARRAY_SIZE(ab8500_rtc_resources), + .resources = ab8500_rtc_resources, + }, + { .name = "ab8500-charger", }, + { .name = "ab8500-audio", }, + { .name = "ab8500-usb", }, + { .name = "ab8500-pwm", }, +}; + +int __devinit ab8500_init(struct ab8500 *ab8500) +{ + struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev); + int ret; + int i; + + if (plat) + ab8500->irq_base = plat->irq_base; + + mutex_init(&ab8500->lock); + mutex_init(&ab8500->irq_lock); + + ret = ab8500_read(ab8500, AB8500_REV_REG); + if (ret < 0) + return ret; + + /* + * 0x0 - Early Drop + * 0x10 - Cut 1.0 + * 0x11 - Cut 1.1 + */ + if (ret == 0x0 || ret == 0x10 || ret == 0x11) { + ab8500->revision = ret; + dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret); + } else { + dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret); + return -EINVAL; + } + + if (plat && plat->init) + plat->init(ab8500); + + /* Clear and mask all interrupts */ + for (i = 0; i < 10; i++) { + ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i); + ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff); + } + + for (i = 18; i < 24; i++) { + ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i); + ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff); + } + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) + ab8500->mask[i] = ab8500->oldmask[i] = 0xff; + + if (ab8500->irq_base) { + ret = ab8500_irq_init(ab8500); + if (ret) + return ret; + + ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq, + IRQF_ONESHOT, "ab8500", ab8500); + if (ret) + goto out_removeirq; + } + + ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs, + ARRAY_SIZE(ab8500_devs), NULL, + ab8500->irq_base); + if (ret) + goto out_freeirq; + + return ret; + +out_freeirq: + if (ab8500->irq_base) { + free_irq(ab8500->irq, ab8500); +out_removeirq: + ab8500_irq_remove(ab8500); + } + return ret; +} + +int __devexit ab8500_exit(struct ab8500 *ab8500) +{ + mfd_remove_devices(ab8500->dev); + if (ab8500->irq_base) { + free_irq(ab8500->irq, ab8500); + ab8500_irq_remove(ab8500); + } + + return 0; +} + +MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent"); +MODULE_DESCRIPTION("AB8500 MFD core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c new file mode 100644 index 000000000000..b81d4f768ef6 --- /dev/null +++ b/drivers/mfd/ab8500-spi.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/mfd/ab8500.h> + +/* + * This funtion writes to any AB8500 registers using + * SPI protocol & before it writes it packs the data + * in the below 24 bit frame format + * + * *|------------------------------------| + * *| 23|22...18|17.......10|9|8|7......0| + * *| r/w bank adr data | + * * ------------------------------------ + * + * This function shouldn't be called from interrupt + * context + */ +static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + struct spi_device *spi = container_of(ab8500->dev, struct spi_device, + dev); + unsigned long spi_data = addr << 10 | data; + struct spi_transfer xfer; + struct spi_message msg; + + ab8500->tx_buf[0] = spi_data; + ab8500->rx_buf[0] = 0; + + xfer.tx_buf = ab8500->tx_buf; + xfer.rx_buf = NULL; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(spi, &msg); +} + +static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr) +{ + struct spi_device *spi = container_of(ab8500->dev, struct spi_device, + dev); + unsigned long spi_data = 1 << 23 | addr << 10; + struct spi_transfer xfer; + struct spi_message msg; + int ret; + + ab8500->tx_buf[0] = spi_data; + ab8500->rx_buf[0] = 0; + + xfer.tx_buf = ab8500->tx_buf; + xfer.rx_buf = ab8500->rx_buf; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + ret = spi_sync(spi, &msg); + if (!ret) + ret = ab8500->rx_buf[0]; + + return ret; +} + +static int __devinit ab8500_spi_probe(struct spi_device *spi) +{ + struct ab8500 *ab8500; + int ret; + + ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); + if (!ab8500) + return -ENOMEM; + + ab8500->dev = &spi->dev; + ab8500->irq = spi->irq; + + ab8500->read = ab8500_spi_read; + ab8500->write = ab8500_spi_write; + + spi_set_drvdata(spi, ab8500); + + ret = ab8500_init(ab8500); + if (ret) + kfree(ab8500); + + return ret; +} + +static int __devexit ab8500_spi_remove(struct spi_device *spi) +{ + struct ab8500 *ab8500 = spi_get_drvdata(spi); + + ab8500_exit(ab8500); + kfree(ab8500); + + return 0; +} + +static struct spi_driver ab8500_spi_driver = { + .driver = { + .name = "ab8500", + .owner = THIS_MODULE, + }, + .probe = ab8500_spi_probe, + .remove = __devexit_p(ab8500_spi_remove) +}; + +static int __init ab8500_spi_init(void) +{ + return spi_register_driver(&ab8500_spi_driver); +} +subsys_initcall(ab8500_spi_init); + +static void __exit ab8500_spi_exit(void) +{ + spi_unregister_driver(&ab8500_spi_driver); +} +module_exit(ab8500_spi_exit); + +MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com"); +MODULE_DESCRIPTION("AB8500 SPI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c new file mode 100644 index 000000000000..3b3b97ec32a7 --- /dev/null +++ b/drivers/mfd/abx500-core.c @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2007-2010 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + * Register access functions for the ABX500 Mixed Signal IC family. + * Author: Mattias Wallin <mattias.wallin@stericsson.com> + */ + +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/mfd/abx500.h> + +static LIST_HEAD(abx500_list); + +struct abx500_device_entry { + struct list_head list; + struct abx500_ops ops; + struct device *dev; +}; + +static void lookup_ops(struct device *dev, struct abx500_ops **ops) +{ + struct abx500_device_entry *dev_entry; + + *ops = NULL; + list_for_each_entry(dev_entry, &abx500_list, list) { + if (dev_entry->dev == dev) { + *ops = &dev_entry->ops; + return; + } + } +} + +int abx500_register_ops(struct device *dev, struct abx500_ops *ops) +{ + struct abx500_device_entry *dev_entry; + + dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL); + if (IS_ERR(dev_entry)) { + dev_err(dev, "register_ops kzalloc failed"); + return -ENOMEM; + } + dev_entry->dev = dev; + memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops)); + + list_add_tail(&dev_entry->list, &abx500_list); + return 0; +} +EXPORT_SYMBOL(abx500_register_ops); + +void abx500_remove_ops(struct device *dev) +{ + struct abx500_device_entry *dev_entry, *tmp; + + list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list) + { + if (dev_entry->dev == dev) { + list_del(&dev_entry->list); + kfree(dev_entry); + } + } +} +EXPORT_SYMBOL(abx500_remove_ops); + +int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->set_register != NULL)) + return ops->set_register(dev, bank, reg, value); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_set_register_interruptible); + +int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_register != NULL)) + return ops->get_register(dev, bank, reg, value); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_register_interruptible); + +int abx500_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_register_page != NULL)) + return ops->get_register_page(dev, bank, + first_reg, regvals, numregs); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_register_page_interruptible); + +int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->mask_and_set_register != NULL)) + return ops->mask_and_set_register(dev, bank, + reg, bitmask, bitvalues); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible); + +int abx500_get_chip_id(struct device *dev) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_chip_id != NULL)) + return ops->get_chip_id(dev); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_chip_id); + +int abx500_event_registers_startup_state_get(struct device *dev, u8 *event) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL)) + return ops->event_registers_startup_state_get(dev, event); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_event_registers_startup_state_get); + +int abx500_startup_irq_enabled(struct device *dev, unsigned int irq) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->startup_irq_enabled != NULL)) + return ops->startup_irq_enabled(dev, irq); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_startup_irq_enabled); + +MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>"); +MODULE_DESCRIPTION("ABX500 core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c index 67181b147ab3..3ad915d0589c 100644 --- a/drivers/mfd/da903x.c +++ b/drivers/mfd/da903x.c @@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client) struct da903x_chip *chip = i2c_get_clientdata(client); da903x_remove_subdevs(chip); + i2c_set_clientdata(client, NULL); kfree(chip); return 0; } diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c new file mode 100644 index 000000000000..9ed630799acc --- /dev/null +++ b/drivers/mfd/janz-cmodio.c @@ -0,0 +1,304 @@ +/* + * Janz CMOD-IO MODULbus Carrier Board PCI Driver + * + * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> + * + * Lots of inspiration and code was copied from drivers/mfd/sm501.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> + +#include <linux/mfd/janz.h> + +#define DRV_NAME "janz-cmodio" + +/* Size of each MODULbus module in PCI BAR4 */ +#define CMODIO_MODULBUS_SIZE 0x200 + +/* Maximum number of MODULbus modules on a CMOD-IO carrier board */ +#define CMODIO_MAX_MODULES 4 + +/* Module Parameters */ +static unsigned int num_modules = CMODIO_MAX_MODULES; +static unsigned char *modules[CMODIO_MAX_MODULES] = { + "empty", "empty", "empty", "empty", +}; + +module_param_array(modules, charp, &num_modules, S_IRUGO); +MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board"); + +/* Unique Device Id */ +static unsigned int cmodio_id; + +struct cmodio_device { + /* Parent PCI device */ + struct pci_dev *pdev; + + /* PLX control registers */ + struct janz_cmodio_onboard_regs __iomem *ctrl; + + /* hex switch position */ + u8 hex; + + /* mfd-core API */ + struct mfd_cell cells[CMODIO_MAX_MODULES]; + struct resource resources[3 * CMODIO_MAX_MODULES]; + struct janz_platform_data pdata[CMODIO_MAX_MODULES]; +}; + +/* + * Subdevices using the mfd-core API + */ + +static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv, + char *name, unsigned int devno, + unsigned int modno) +{ + struct janz_platform_data *pdata; + struct mfd_cell *cell; + struct resource *res; + struct pci_dev *pci; + + pci = priv->pdev; + cell = &priv->cells[devno]; + res = &priv->resources[devno * 3]; + pdata = &priv->pdata[devno]; + + cell->name = name; + cell->resources = res; + cell->num_resources = 3; + + /* Setup the subdevice ID -- must be unique */ + cell->id = cmodio_id++; + + /* Add platform data */ + pdata->modno = modno; + cell->platform_data = pdata; + cell->data_size = sizeof(*pdata); + + /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */ + res->flags = IORESOURCE_MEM; + res->parent = &pci->resource[3]; + res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno); + res->end = res->start + CMODIO_MODULBUS_SIZE - 1; + res++; + + /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */ + res->flags = IORESOURCE_MEM; + res->parent = &pci->resource[4]; + res->start = pci->resource[4].start; + res->end = pci->resource[4].end; + res++; + + /* + * IRQ + * + * The start and end fields are used as an offset to the irq_base + * parameter passed into the mfd_add_devices() function call. All + * devices share the same IRQ. + */ + res->flags = IORESOURCE_IRQ; + res->parent = NULL; + res->start = 0; + res->end = 0; + res++; + + return 0; +} + +/* Probe each submodule using kernel parameters */ +static int __devinit cmodio_probe_submodules(struct cmodio_device *priv) +{ + struct pci_dev *pdev = priv->pdev; + unsigned int num_probed = 0; + char *name; + int i; + + for (i = 0; i < num_modules; i++) { + name = modules[i]; + if (!strcmp(name, "") || !strcmp(name, "empty")) + continue; + + dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name); + cmodio_setup_subdevice(priv, name, num_probed, i); + num_probed++; + } + + /* print an error message if no modules were probed */ + if (num_probed == 0) { + dev_err(&priv->pdev->dev, "no MODULbus modules specified, " + "please set the ``modules'' kernel " + "parameter according to your " + "hardware configuration\n"); + return -ENODEV; + } + + return mfd_add_devices(&pdev->dev, 0, priv->cells, + num_probed, NULL, pdev->irq); +} + +/* + * SYSFS Attributes + */ + +static ssize_t mbus_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cmodio_device *priv = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex); +} + +static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL); + +static struct attribute *cmodio_sysfs_attrs[] = { + &dev_attr_modulbus_number.attr, + NULL, +}; + +static const struct attribute_group cmodio_sysfs_attr_group = { + .attrs = cmodio_sysfs_attrs, +}; + +/* + * PCI Driver + */ + +static int __devinit cmodio_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct cmodio_device *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&dev->dev, "unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + pci_set_drvdata(dev, priv); + priv->pdev = dev; + + /* Hardware Initialization */ + ret = pci_enable_device(dev); + if (ret) { + dev_err(&dev->dev, "unable to enable device\n"); + goto out_free_priv; + } + + pci_set_master(dev); + ret = pci_request_regions(dev, DRV_NAME); + if (ret) { + dev_err(&dev->dev, "unable to request regions\n"); + goto out_pci_disable_device; + } + + /* Onboard configuration registers */ + priv->ctrl = pci_ioremap_bar(dev, 4); + if (!priv->ctrl) { + dev_err(&dev->dev, "unable to remap onboard regs\n"); + ret = -ENOMEM; + goto out_pci_release_regions; + } + + /* Read the hex switch on the carrier board */ + priv->hex = ioread8(&priv->ctrl->int_enable); + + /* Add the MODULbus number (hex switch value) to the device's sysfs */ + ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); + if (ret) { + dev_err(&dev->dev, "unable to create sysfs attributes\n"); + goto out_unmap_ctrl; + } + + /* + * Disable all interrupt lines, each submodule will enable its + * own interrupt line if needed + */ + iowrite8(0xf, &priv->ctrl->int_disable); + + /* Register drivers for all submodules */ + ret = cmodio_probe_submodules(priv); + if (ret) { + dev_err(&dev->dev, "unable to probe submodules\n"); + goto out_sysfs_remove_group; + } + + return 0; + +out_sysfs_remove_group: + sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); +out_unmap_ctrl: + iounmap(priv->ctrl); +out_pci_release_regions: + pci_release_regions(dev); +out_pci_disable_device: + pci_disable_device(dev); +out_free_priv: + kfree(priv); +out_return: + return ret; +} + +static void __devexit cmodio_pci_remove(struct pci_dev *dev) +{ + struct cmodio_device *priv = pci_get_drvdata(dev); + + mfd_remove_devices(&dev->dev); + sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); + iounmap(priv->ctrl); + pci_release_regions(dev); + pci_disable_device(dev); + kfree(priv); +} + +#define PCI_VENDOR_ID_JANZ 0x13c3 + +/* The list of devices that this module will support */ +static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = { + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, cmodio_pci_ids); + +static struct pci_driver cmodio_pci_driver = { + .name = DRV_NAME, + .id_table = cmodio_pci_ids, + .probe = cmodio_pci_probe, + .remove = __devexit_p(cmodio_pci_remove), +}; + +/* + * Module Init / Exit + */ + +static int __init cmodio_init(void) +{ + return pci_register_driver(&cmodio_pci_driver); +} + +static void __exit cmodio_exit(void) +{ + pci_unregister_driver(&cmodio_pci_driver); +} + +MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); +MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver"); +MODULE_LICENSE("GPL"); + +module_init(cmodio_init); +module_exit(cmodio_exit); diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 85d63c04749b..f621bcea3d02 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2); max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ); max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); - /* mask all interrupts */ + /* mask all interrupts except for TSC */ max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0); max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff); @@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff); max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff); - max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff); mutex_init(&chip->irq_lock); chip->core_irq = irq; @@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret); chip->core_irq = 0; } + tsc_irq: + /* mask TSC interrupt */ + max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f); + if (!pdata->tsc_irq) { dev_warn(chip->dev, "No interrupt support on TSC IRQ\n"); return 0; diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index d9fd8785da4d..e73f3f5252a8 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client) max8925_device_exit(chip); i2c_unregister_device(chip->adc); i2c_unregister_device(chip->rtc); - i2c_set_clientdata(chip->adc, NULL); - i2c_set_clientdata(chip->rtc, NULL); i2c_set_clientdata(chip->i2c, NULL); kfree(chip); return 0; diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c index 1f68ecadddc2..fecf38a4f025 100644 --- a/drivers/mfd/mc13783-core.c +++ b/drivers/mfd/mc13783-core.c @@ -679,6 +679,10 @@ err_revision: if (pdata->flags & MC13783_USE_TOUCHSCREEN) mc13783_add_subdevice(mc13783, "mc13783-ts"); + if (pdata->flags & MC13783_USE_LED) + mc13783_add_subdevice_pdata(mc13783, "mc13783-led", + pdata->leds, sizeof(*pdata->leds)); + return 0; } diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c index a94b131a18ef..721948be12c7 100644 --- a/drivers/mfd/menelaus.c +++ b/drivers/mfd/menelaus.c @@ -1228,6 +1228,7 @@ fail2: free_irq(client->irq, menelaus); flush_scheduled_work(); fail1: + i2c_set_clientdata(client, NULL); kfree(menelaus); return err; } @@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client) struct menelaus_chip *menelaus = i2c_get_clientdata(client); free_irq(client->irq, menelaus); - kfree(menelaus); i2c_set_clientdata(client, NULL); + kfree(menelaus); the_menelaus = NULL; return 0; } diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 8ffbb7a85a7e..7dd76bceaae8 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id, res[r].flags = cell->resources[r].flags; /* Find out base to use */ - if (cell->resources[r].flags & IORESOURCE_MEM) { + if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) { res[r].parent = mem_base; res[r].start = mem_base->start + cell->resources[r].start; diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c index fe8f922f6654..aed0d2a9b032 100644 --- a/drivers/mfd/pcf50633-adc.c +++ b/drivers/mfd/pcf50633-adc.c @@ -30,13 +30,13 @@ struct pcf50633_adc_request { int mux; int avg; - int result; void (*callback)(struct pcf50633 *, void *, int); void *callback_param; +}; - /* Used in case of sync requests */ +struct pcf50633_adc_sync_request { + int result; struct completion completion; - }; #define PCF50633_MAX_ADC_FIFO_DEPTH 8 @@ -109,10 +109,10 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) return 0; } -static void -pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) +static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, + int result) { - struct pcf50633_adc_request *req = param; + struct pcf50633_adc_sync_request *req = param; req->result = result; complete(&req->completion); @@ -120,28 +120,19 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) { - struct pcf50633_adc_request *req; - int err; + struct pcf50633_adc_sync_request req; + int ret; - /* req is freed when the result is ready, in interrupt handler */ - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) - return -ENOMEM; - - req->mux = mux; - req->avg = avg; - req->callback = pcf50633_adc_sync_read_callback; - req->callback_param = req; + init_completion(&req.completion); - init_completion(&req->completion); - err = adc_enqueue_request(pcf, req); - if (err) - return err; + ret = pcf50633_adc_async_read(pcf, mux, avg, + pcf50633_adc_sync_read_callback, &req); + if (ret) + return ret; - wait_for_completion(&req->completion); + wait_for_completion(&req.completion); - /* FIXME by this time req might be already freed */ - return req->result; + return req.result; } EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 63a614d696c1..704736e6e9b9 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -21,16 +21,16 @@ #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/i2c.h> -#include <linux/irq.h> #include <linux/slab.h> #include <linux/mfd/pcf50633/core.h> -/* Two MBCS registers used during cold start */ -#define PCF50633_REG_MBCS1 0x4b -#define PCF50633_REG_MBCS2 0x4c -#define PCF50633_MBCS1_USBPRES 0x01 -#define PCF50633_MBCS1_ADAPTPRES 0x01 +int pcf50633_irq_init(struct pcf50633 *pcf, int irq); +void pcf50633_irq_free(struct pcf50633 *pcf); +#ifdef CONFIG_PM +int pcf50633_irq_suspend(struct pcf50633 *pcf); +int pcf50633_irq_resume(struct pcf50633 *pcf); +#endif static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data) { @@ -215,244 +215,6 @@ static struct attribute_group pcf_attr_group = { .attrs = pcf_sysfs_entries, }; -int pcf50633_register_irq(struct pcf50633 *pcf, int irq, - void (*handler) (int, void *), void *data) -{ - if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler) - return -EINVAL; - - if (WARN_ON(pcf->irq_handler[irq].handler)) - return -EBUSY; - - mutex_lock(&pcf->lock); - pcf->irq_handler[irq].handler = handler; - pcf->irq_handler[irq].data = data; - mutex_unlock(&pcf->lock); - - return 0; -} -EXPORT_SYMBOL_GPL(pcf50633_register_irq); - -int pcf50633_free_irq(struct pcf50633 *pcf, int irq) -{ - if (irq < 0 || irq > PCF50633_NUM_IRQ) - return -EINVAL; - - mutex_lock(&pcf->lock); - pcf->irq_handler[irq].handler = NULL; - mutex_unlock(&pcf->lock); - - return 0; -} -EXPORT_SYMBOL_GPL(pcf50633_free_irq); - -static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask) -{ - u8 reg, bits, tmp; - int ret = 0, idx; - - idx = irq >> 3; - reg = PCF50633_REG_INT1M + idx; - bits = 1 << (irq & 0x07); - - mutex_lock(&pcf->lock); - - if (mask) { - ret = __pcf50633_read(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - tmp |= bits; - - ret = __pcf50633_write(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - pcf->mask_regs[idx] &= ~bits; - pcf->mask_regs[idx] |= bits; - } else { - ret = __pcf50633_read(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - tmp &= ~bits; - - ret = __pcf50633_write(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - pcf->mask_regs[idx] &= ~bits; - } -out: - mutex_unlock(&pcf->lock); - - return ret; -} - -int pcf50633_irq_mask(struct pcf50633 *pcf, int irq) -{ - dev_dbg(pcf->dev, "Masking IRQ %d\n", irq); - - return __pcf50633_irq_mask_set(pcf, irq, 1); -} -EXPORT_SYMBOL_GPL(pcf50633_irq_mask); - -int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq) -{ - dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq); - - return __pcf50633_irq_mask_set(pcf, irq, 0); -} -EXPORT_SYMBOL_GPL(pcf50633_irq_unmask); - -int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq) -{ - u8 reg, bits; - - reg = irq >> 3; - bits = 1 << (irq & 0x07); - - return pcf->mask_regs[reg] & bits; -} -EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get); - -static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq) -{ - if (pcf->irq_handler[irq].handler) - pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data); -} - -/* Maximum amount of time ONKEY is held before emergency action is taken */ -#define PCF50633_ONKEY1S_TIMEOUT 8 - -static void pcf50633_irq_worker(struct work_struct *work) -{ - struct pcf50633 *pcf; - int ret, i, j; - u8 pcf_int[5], chgstat; - - pcf = container_of(work, struct pcf50633, irq_work); - - /* Read the 5 INT regs in one transaction */ - ret = pcf50633_read_block(pcf, PCF50633_REG_INT1, - ARRAY_SIZE(pcf_int), pcf_int); - if (ret != ARRAY_SIZE(pcf_int)) { - dev_err(pcf->dev, "Error reading INT registers\n"); - - /* - * If this doesn't ACK the interrupt to the chip, we'll be - * called once again as we're level triggered. - */ - goto out; - } - - /* defeat 8s death from lowsys on A5 */ - pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04); - - /* We immediately read the usb and adapter status. We thus make sure - * only of USBINS/USBREM IRQ handlers are called */ - if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) { - chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); - if (chgstat & (0x3 << 4)) - pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM); - else - pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS); - } - - /* Make sure only one of ADPINS or ADPREM is set */ - if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) { - chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); - if (chgstat & (0x3 << 4)) - pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM); - else - pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS); - } - - dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x " - "INT4=0x%02x INT5=0x%02x\n", pcf_int[0], - pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]); - - /* Some revisions of the chip don't have a 8s standby mode on - * ONKEY1S press. We try to manually do it in such cases. */ - if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) { - dev_info(pcf->dev, "ONKEY1S held for %d secs\n", - pcf->onkey1s_held); - if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT) - if (pcf->pdata->force_shutdown) - pcf->pdata->force_shutdown(pcf); - } - - if (pcf_int[2] & PCF50633_INT3_ONKEY1S) { - dev_info(pcf->dev, "ONKEY1S held\n"); - pcf->onkey1s_held = 1 ; - - /* Unmask IRQ_SECOND */ - pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M, - PCF50633_INT1_SECOND); - - /* Unmask IRQ_ONKEYR */ - pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M, - PCF50633_INT2_ONKEYR); - } - - if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) { - pcf->onkey1s_held = 0; - - /* Mask SECOND and ONKEYR interrupts */ - if (pcf->mask_regs[0] & PCF50633_INT1_SECOND) - pcf50633_reg_set_bit_mask(pcf, - PCF50633_REG_INT1M, - PCF50633_INT1_SECOND, - PCF50633_INT1_SECOND); - - if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR) - pcf50633_reg_set_bit_mask(pcf, - PCF50633_REG_INT2M, - PCF50633_INT2_ONKEYR, - PCF50633_INT2_ONKEYR); - } - - /* Have we just resumed ? */ - if (pcf->is_suspended) { - pcf->is_suspended = 0; - - /* Set the resume reason filtering out non resumers */ - for (i = 0; i < ARRAY_SIZE(pcf_int); i++) - pcf->resume_reason[i] = pcf_int[i] & - pcf->pdata->resumers[i]; - - /* Make sure we don't pass on any ONKEY events to - * userspace now */ - pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF); - } - - for (i = 0; i < ARRAY_SIZE(pcf_int); i++) { - /* Unset masked interrupts */ - pcf_int[i] &= ~pcf->mask_regs[i]; - - for (j = 0; j < 8 ; j++) - if (pcf_int[i] & (1 << j)) - pcf50633_irq_call_handler(pcf, (i * 8) + j); - } - -out: - put_device(pcf->dev); - enable_irq(pcf->irq); -} - -static irqreturn_t pcf50633_irq(int irq, void *data) -{ - struct pcf50633 *pcf = data; - - dev_dbg(pcf->dev, "pcf50633_irq\n"); - - get_device(pcf->dev); - disable_irq_nosync(pcf->irq); - queue_work(pcf->work_queue, &pcf->irq_work); - - return IRQ_HANDLED; -} - static void pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, struct platform_device **pdev) @@ -479,70 +241,17 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, static int pcf50633_suspend(struct i2c_client *client, pm_message_t state) { struct pcf50633 *pcf; - int ret = 0, i; - u8 res[5]; - pcf = i2c_get_clientdata(client); - /* Make sure our interrupt handlers are not called - * henceforth */ - disable_irq(pcf->irq); - - /* Make sure that any running IRQ worker has quit */ - cancel_work_sync(&pcf->irq_work); - - /* Save the masks */ - ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(pcf->suspend_irq_masks), - pcf->suspend_irq_masks); - if (ret < 0) { - dev_err(pcf->dev, "error saving irq masks\n"); - goto out; - } - - /* Write wakeup irq masks */ - for (i = 0; i < ARRAY_SIZE(res); i++) - res[i] = ~pcf->pdata->resumers[i]; - - ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(res), &res[0]); - if (ret < 0) { - dev_err(pcf->dev, "error writing wakeup irq masks\n"); - goto out; - } - - pcf->is_suspended = 1; - -out: - return ret; + return pcf50633_irq_suspend(pcf); } static int pcf50633_resume(struct i2c_client *client) { struct pcf50633 *pcf; - int ret; - pcf = i2c_get_clientdata(client); - /* Write the saved mask registers */ - ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(pcf->suspend_irq_masks), - pcf->suspend_irq_masks); - if (ret < 0) - dev_err(pcf->dev, "Error restoring saved suspend masks\n"); - - /* Restore regulators' state */ - - - get_device(pcf->dev); - - /* - * Clear any pending interrupts and set resume reason if any. - * This will leave with enable_irq() - */ - pcf50633_irq_worker(&pcf->irq_work); - - return 0; + return pcf50633_irq_resume(pcf); } #else #define pcf50633_suspend NULL @@ -573,43 +282,19 @@ static int __devinit pcf50633_probe(struct i2c_client *client, i2c_set_clientdata(client, pcf); pcf->dev = &client->dev; pcf->i2c_client = client; - pcf->irq = client->irq; - pcf->work_queue = create_singlethread_workqueue("pcf50633"); - - if (!pcf->work_queue) { - dev_err(&client->dev, "Failed to alloc workqueue\n"); - ret = -ENOMEM; - goto err_free; - } - - INIT_WORK(&pcf->irq_work, pcf50633_irq_worker); version = pcf50633_reg_read(pcf, 0); variant = pcf50633_reg_read(pcf, 1); if (version < 0 || variant < 0) { dev_err(pcf->dev, "Unable to probe pcf50633\n"); ret = -ENODEV; - goto err_destroy_workqueue; + goto err_free; } dev_info(pcf->dev, "Probed device version %d variant %d\n", version, variant); - /* Enable all interrupts except RTC SECOND */ - pcf->mask_regs[0] = 0x80; - pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]); - pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00); - - ret = request_irq(client->irq, pcf50633_irq, - IRQF_TRIGGER_LOW, "pcf50633", pcf); - - if (ret) { - dev_err(pcf->dev, "Failed to request IRQ %d\n", ret); - goto err_destroy_workqueue; - } + pcf50633_irq_init(pcf, client->irq); /* Create sub devices */ pcf50633_client_dev_register(pcf, "pcf50633-input", @@ -620,6 +305,9 @@ static int __devinit pcf50633_probe(struct i2c_client *client, &pcf->mbc_pdev); pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev); + pcf50633_client_dev_register(pcf, "pcf50633-backlight", + &pcf->bl_pdev); + for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { struct platform_device *pdev; @@ -638,10 +326,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client, platform_device_add(pdev); } - if (enable_irq_wake(client->irq) < 0) - dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source" - "in this hardware revision", client->irq); - ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group); if (ret) dev_err(pcf->dev, "error creating sysfs entries\n"); @@ -651,8 +335,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client, return 0; -err_destroy_workqueue: - destroy_workqueue(pcf->work_queue); err_free: i2c_set_clientdata(client, NULL); kfree(pcf); @@ -665,8 +347,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) struct pcf50633 *pcf = i2c_get_clientdata(client); int i; - free_irq(pcf->irq, pcf); - destroy_workqueue(pcf->work_queue); + pcf50633_irq_free(pcf); platform_device_unregister(pcf->input_pdev); platform_device_unregister(pcf->rtc_pdev); @@ -676,6 +357,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) for (i = 0; i < PCF50633_NUM_REGULATORS; i++) platform_device_unregister(pcf->regulator_pdev[i]); + i2c_set_clientdata(client, NULL); kfree(pcf); return 0; diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c new file mode 100644 index 000000000000..1b0192f1efff --- /dev/null +++ b/drivers/mfd/pcf50633-irq.c @@ -0,0 +1,318 @@ +/* NXP PCF50633 Power Management Unit (PMU) driver + * + * (C) 2006-2008 by Openmoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * Balaji Rao <balajirrao@openmoko.org> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include <linux/mfd/pcf50633/core.h> + +/* Two MBCS registers used during cold start */ +#define PCF50633_REG_MBCS1 0x4b +#define PCF50633_REG_MBCS2 0x4c +#define PCF50633_MBCS1_USBPRES 0x01 +#define PCF50633_MBCS1_ADAPTPRES 0x01 + +int pcf50633_register_irq(struct pcf50633 *pcf, int irq, + void (*handler) (int, void *), void *data) +{ + if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler) + return -EINVAL; + + if (WARN_ON(pcf->irq_handler[irq].handler)) + return -EBUSY; + + mutex_lock(&pcf->lock); + pcf->irq_handler[irq].handler = handler; + pcf->irq_handler[irq].data = data; + mutex_unlock(&pcf->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(pcf50633_register_irq); + +int pcf50633_free_irq(struct pcf50633 *pcf, int irq) +{ + if (irq < 0 || irq >= PCF50633_NUM_IRQ) + return -EINVAL; + + mutex_lock(&pcf->lock); + pcf->irq_handler[irq].handler = NULL; + mutex_unlock(&pcf->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(pcf50633_free_irq); + +static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask) +{ + u8 reg, bit; + int ret = 0, idx; + + idx = irq >> 3; + reg = PCF50633_REG_INT1M + idx; + bit = 1 << (irq & 0x07); + + pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0); + + mutex_lock(&pcf->lock); + + if (mask) + pcf->mask_regs[idx] |= bit; + else + pcf->mask_regs[idx] &= ~bit; + + mutex_unlock(&pcf->lock); + + return ret; +} + +int pcf50633_irq_mask(struct pcf50633 *pcf, int irq) +{ + dev_dbg(pcf->dev, "Masking IRQ %d\n", irq); + + return __pcf50633_irq_mask_set(pcf, irq, 1); +} +EXPORT_SYMBOL_GPL(pcf50633_irq_mask); + +int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq) +{ + dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq); + + return __pcf50633_irq_mask_set(pcf, irq, 0); +} +EXPORT_SYMBOL_GPL(pcf50633_irq_unmask); + +int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq) +{ + u8 reg, bits; + + reg = irq >> 3; + bits = 1 << (irq & 0x07); + + return pcf->mask_regs[reg] & bits; +} +EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get); + +static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq) +{ + if (pcf->irq_handler[irq].handler) + pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data); +} + +/* Maximum amount of time ONKEY is held before emergency action is taken */ +#define PCF50633_ONKEY1S_TIMEOUT 8 + +static irqreturn_t pcf50633_irq(int irq, void *data) +{ + struct pcf50633 *pcf = data; + int ret, i, j; + u8 pcf_int[5], chgstat; + + /* Read the 5 INT regs in one transaction */ + ret = pcf50633_read_block(pcf, PCF50633_REG_INT1, + ARRAY_SIZE(pcf_int), pcf_int); + if (ret != ARRAY_SIZE(pcf_int)) { + dev_err(pcf->dev, "Error reading INT registers\n"); + + /* + * If this doesn't ACK the interrupt to the chip, we'll be + * called once again as we're level triggered. + */ + goto out; + } + + /* defeat 8s death from lowsys on A5 */ + pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04); + + /* We immediately read the usb and adapter status. We thus make sure + * only of USBINS/USBREM IRQ handlers are called */ + if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) { + chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + if (chgstat & (0x3 << 4)) + pcf_int[0] &= ~PCF50633_INT1_USBREM; + else + pcf_int[0] &= ~PCF50633_INT1_USBINS; + } + + /* Make sure only one of ADPINS or ADPREM is set */ + if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) { + chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + if (chgstat & (0x3 << 4)) + pcf_int[0] &= ~PCF50633_INT1_ADPREM; + else + pcf_int[0] &= ~PCF50633_INT1_ADPINS; + } + + dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x " + "INT4=0x%02x INT5=0x%02x\n", pcf_int[0], + pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]); + + /* Some revisions of the chip don't have a 8s standby mode on + * ONKEY1S press. We try to manually do it in such cases. */ + if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) { + dev_info(pcf->dev, "ONKEY1S held for %d secs\n", + pcf->onkey1s_held); + if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT) + if (pcf->pdata->force_shutdown) + pcf->pdata->force_shutdown(pcf); + } + + if (pcf_int[2] & PCF50633_INT3_ONKEY1S) { + dev_info(pcf->dev, "ONKEY1S held\n"); + pcf->onkey1s_held = 1 ; + + /* Unmask IRQ_SECOND */ + pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M, + PCF50633_INT1_SECOND); + + /* Unmask IRQ_ONKEYR */ + pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M, + PCF50633_INT2_ONKEYR); + } + + if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) { + pcf->onkey1s_held = 0; + + /* Mask SECOND and ONKEYR interrupts */ + if (pcf->mask_regs[0] & PCF50633_INT1_SECOND) + pcf50633_reg_set_bit_mask(pcf, + PCF50633_REG_INT1M, + PCF50633_INT1_SECOND, + PCF50633_INT1_SECOND); + + if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR) + pcf50633_reg_set_bit_mask(pcf, + PCF50633_REG_INT2M, + PCF50633_INT2_ONKEYR, + PCF50633_INT2_ONKEYR); + } + + /* Have we just resumed ? */ + if (pcf->is_suspended) { + pcf->is_suspended = 0; + + /* Set the resume reason filtering out non resumers */ + for (i = 0; i < ARRAY_SIZE(pcf_int); i++) + pcf->resume_reason[i] = pcf_int[i] & + pcf->pdata->resumers[i]; + + /* Make sure we don't pass on any ONKEY events to + * userspace now */ + pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF); + } + + for (i = 0; i < ARRAY_SIZE(pcf_int); i++) { + /* Unset masked interrupts */ + pcf_int[i] &= ~pcf->mask_regs[i]; + + for (j = 0; j < 8 ; j++) + if (pcf_int[i] & (1 << j)) + pcf50633_irq_call_handler(pcf, (i * 8) + j); + } + +out: + return IRQ_HANDLED; +} + +#ifdef CONFIG_PM + +int pcf50633_irq_suspend(struct pcf50633 *pcf) +{ + int ret; + int i; + u8 res[5]; + + + /* Make sure our interrupt handlers are not called + * henceforth */ + disable_irq(pcf->irq); + + /* Save the masks */ + ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(pcf->suspend_irq_masks), + pcf->suspend_irq_masks); + if (ret < 0) { + dev_err(pcf->dev, "error saving irq masks\n"); + goto out; + } + + /* Write wakeup irq masks */ + for (i = 0; i < ARRAY_SIZE(res); i++) + res[i] = ~pcf->pdata->resumers[i]; + + ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(res), &res[0]); + if (ret < 0) { + dev_err(pcf->dev, "error writing wakeup irq masks\n"); + goto out; + } + + pcf->is_suspended = 1; + +out: + return ret; +} + +int pcf50633_irq_resume(struct pcf50633 *pcf) +{ + int ret; + + /* Write the saved mask registers */ + ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(pcf->suspend_irq_masks), + pcf->suspend_irq_masks); + if (ret < 0) + dev_err(pcf->dev, "Error restoring saved suspend masks\n"); + + enable_irq(pcf->irq); + + return ret; +} + +#endif + +int pcf50633_irq_init(struct pcf50633 *pcf, int irq) +{ + int ret; + + pcf->irq = irq; + + /* Enable all interrupts except RTC SECOND */ + pcf->mask_regs[0] = 0x80; + pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]); + pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00); + + ret = request_threaded_irq(irq, NULL, pcf50633_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "pcf50633", pcf); + + if (ret) + dev_err(pcf->dev, "Failed to request IRQ %d\n", ret); + + if (enable_irq_wake(irq) < 0) + dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source" + "in this hardware revision", irq); + + return ret; +} + +void pcf50633_irq_free(struct pcf50633 *pcf) +{ + free_irq(pcf->irq, pcf); +} diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c new file mode 100644 index 000000000000..50922975bda3 --- /dev/null +++ b/drivers/mfd/rdc321x-southbridge.c @@ -0,0 +1,123 @@ +/* + * RDC321x MFD southbrige driver + * + * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org> + * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/pci.h> +#include <linux/mfd/core.h> +#include <linux/mfd/rdc321x.h> + +static struct rdc321x_wdt_pdata rdc321x_wdt_pdata; + +static struct resource rdc321x_wdt_resource[] = { + { + .name = "wdt-reg", + .start = RDC321X_WDT_CTRL, + .end = RDC321X_WDT_CTRL + 0x3, + .flags = IORESOURCE_IO, + } +}; + +static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = { + .max_gpios = RDC321X_MAX_GPIO, +}; + +static struct resource rdc321x_gpio_resources[] = { + { + .name = "gpio-reg1", + .start = RDC321X_GPIO_CTRL_REG1, + .end = RDC321X_GPIO_CTRL_REG1 + 0x7, + .flags = IORESOURCE_IO, + }, { + .name = "gpio-reg2", + .start = RDC321X_GPIO_CTRL_REG2, + .end = RDC321X_GPIO_CTRL_REG2 + 0x7, + .flags = IORESOURCE_IO, + } +}; + +static struct mfd_cell rdc321x_sb_cells[] = { + { + .name = "rdc321x-wdt", + .resources = rdc321x_wdt_resource, + .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), + .driver_data = &rdc321x_wdt_pdata, + }, { + .name = "rdc321x-gpio", + .resources = rdc321x_gpio_resources, + .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), + .driver_data = &rdc321x_gpio_pdata, + }, +}; + +static int __devinit rdc321x_sb_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable device\n"); + return err; + } + + rdc321x_gpio_pdata.sb_pdev = pdev; + rdc321x_wdt_pdata.sb_pdev = pdev; + + return mfd_add_devices(&pdev->dev, -1, + rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0); +} + +static void __devexit rdc321x_sb_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, + {} +}; + +static struct pci_driver rdc321x_sb_driver = { + .name = "RDC321x Southbridge", + .id_table = rdc321x_sb_table, + .probe = rdc321x_sb_probe, + .remove = __devexit_p(rdc321x_sb_remove), +}; + +static int __init rdc321x_sb_init(void) +{ + return pci_register_driver(&rdc321x_sb_driver); +} + +static void __exit rdc321x_sb_exit(void) +{ + pci_unregister_driver(&rdc321x_sb_driver); +} + +module_init(rdc321x_sb_init); +module_exit(rdc321x_sb_exit); + +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver"); diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c index 497f91b6138e..cd164595f08a 100644 --- a/drivers/mfd/sh_mobile_sdhi.c +++ b/drivers/mfd/sh_mobile_sdhi.c @@ -26,11 +26,15 @@ #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> #include <linux/mfd/sh_mobile_sdhi.h> +#include <linux/sh_dma.h> struct sh_mobile_sdhi { struct clk *clk; struct tmio_mmc_data mmc_data; struct mfd_cell cell_mmc; + struct sh_dmae_slave param_tx; + struct sh_dmae_slave param_rx; + struct tmio_mmc_dma dma_priv; }; static struct resource sh_mobile_sdhi_resources[] = { @@ -64,6 +68,8 @@ static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) static int __init sh_mobile_sdhi_probe(struct platform_device *pdev) { struct sh_mobile_sdhi *priv; + struct tmio_mmc_data *mmc_data; + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct resource *mem; char clk_name[8]; int ret, irq; @@ -85,6 +91,8 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev) return -ENOMEM; } + mmc_data = &priv->mmc_data; + snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); priv->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->clk)) { @@ -96,12 +104,24 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev) clk_enable(priv->clk); - priv->mmc_data.hclk = clk_get_rate(priv->clk); - priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr; - priv->mmc_data.capabilities = MMC_CAP_MMC_HIGHSPEED; + mmc_data->hclk = clk_get_rate(priv->clk); + mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; + mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; + if (p) { + mmc_data->flags = p->tmio_flags; + mmc_data->ocr_mask = p->tmio_ocr_mask; + } + + if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { + priv->param_tx.slave_id = p->dma_slave_tx; + priv->param_rx.slave_id = p->dma_slave_rx; + priv->dma_priv.chan_priv_tx = &priv->param_tx; + priv->dma_priv.chan_priv_rx = &priv->param_rx; + mmc_data->dma = &priv->dma_priv; + } memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); - priv->cell_mmc.driver_data = &priv->mmc_data; + priv->cell_mmc.driver_data = mmc_data; priv->cell_mmc.platform_data = &priv->cell_mmc; priv->cell_mmc.data_size = sizeof(priv->cell_mmc); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index da6383a934ac..5041d33adf0b 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev) struct resource *iomem, *rscr; int ret; + if (pdata == NULL) + return -EINVAL; + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c new file mode 100644 index 000000000000..715f095dd7a6 --- /dev/null +++ b/drivers/mfd/tc35892.c @@ -0,0 +1,347 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mfd/core.h> +#include <linux/mfd/tc35892.h> + +/** + * tc35892_reg_read() - read a single TC35892 register + * @tc35892: Device to read from + * @reg: Register to read + */ +int tc35892_reg_read(struct tc35892 *tc35892, u8 reg) +{ + int ret; + + ret = i2c_smbus_read_byte_data(tc35892->i2c, reg); + if (ret < 0) + dev_err(tc35892->dev, "failed to read reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_read); + +/** + * tc35892_reg_read() - write a single TC35892 register + * @tc35892: Device to write to + * @reg: Register to read + * @data: Value to write + */ +int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data) +{ + int ret; + + ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data); + if (ret < 0) + dev_err(tc35892->dev, "failed to write reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_write); + +/** + * tc35892_block_read() - read multiple TC35892 registers + * @tc35892: Device to read from + * @reg: First register + * @length: Number of registers + * @values: Buffer to write to + */ +int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values) +{ + int ret; + + ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values); + if (ret < 0) + dev_err(tc35892->dev, "failed to read regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_read); + +/** + * tc35892_block_write() - write multiple TC35892 registers + * @tc35892: Device to write to + * @reg: First register + * @length: Number of registers + * @values: Values to write + */ +int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length, + const u8 *values) +{ + int ret; + + ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length, + values); + if (ret < 0) + dev_err(tc35892->dev, "failed to write regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_write); + +/** + * tc35892_set_bits() - set the value of a bitfield in a TC35892 register + * @tc35892: Device to write to + * @reg: Register to write + * @mask: Mask of bits to set + * @values: Value to set + */ +int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val) +{ + int ret; + + mutex_lock(&tc35892->lock); + + ret = tc35892_reg_read(tc35892, reg); + if (ret < 0) + goto out; + + ret &= ~mask; + ret |= val; + + ret = tc35892_reg_write(tc35892, reg, ret); + +out: + mutex_unlock(&tc35892->lock); + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_set_bits); + +static struct resource gpio_resources[] = { + { + .start = TC35892_INT_GPIIRQ, + .end = TC35892_INT_GPIIRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell tc35892_devs[] = { + { + .name = "tc35892-gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = &gpio_resources[0], + }, +}; + +static irqreturn_t tc35892_irq(int irq, void *data) +{ + struct tc35892 *tc35892 = data; + int status; + + status = tc35892_reg_read(tc35892, TC35892_IRQST); + if (status < 0) + return IRQ_NONE; + + while (status) { + int bit = __ffs(status); + + handle_nested_irq(tc35892->irq_base + bit); + status &= ~(1 << bit); + } + + /* + * A dummy read or write (to any register) appears to be necessary to + * have the last interrupt clear (for example, GPIO IC write) take + * effect. + */ + tc35892_reg_read(tc35892, TC35892_IRQST); + + return IRQ_HANDLED; +} + +static void tc35892_irq_dummy(unsigned int irq) +{ + /* No mask/unmask at this level */ +} + +static struct irq_chip tc35892_irq_chip = { + .name = "tc35892", + .mask = tc35892_irq_dummy, + .unmask = tc35892_irq_dummy, +}; + +static int tc35892_irq_init(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { + set_irq_chip_data(irq, tc35892); + set_irq_chip_and_handler(irq, &tc35892_irq_chip, + handle_edge_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void tc35892_irq_remove(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static int tc35892_chip_init(struct tc35892 *tc35892) +{ + int manf, ver, ret; + + manf = tc35892_reg_read(tc35892, TC35892_MANFCODE); + if (manf < 0) + return manf; + + ver = tc35892_reg_read(tc35892, TC35892_VERSION); + if (ver < 0) + return ver; + + if (manf != TC35892_MANFCODE_MAGIC) { + dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf); + return -EINVAL; + } + + dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver); + + /* Put everything except the IRQ module into reset */ + ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL, + TC35892_RSTCTRL_TIMRST + | TC35892_RSTCTRL_ROTRST + | TC35892_RSTCTRL_KBDRST + | TC35892_RSTCTRL_GPIRST); + if (ret < 0) + return ret; + + /* Clear the reset interrupt. */ + return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1); +} + +static int __devinit tc35892_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tc35892_platform_data *pdata = i2c->dev.platform_data; + struct tc35892 *tc35892; + int ret; + + if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_I2C_BLOCK)) + return -EIO; + + tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL); + if (!tc35892) + return -ENOMEM; + + mutex_init(&tc35892->lock); + + tc35892->dev = &i2c->dev; + tc35892->i2c = i2c; + tc35892->pdata = pdata; + tc35892->irq_base = pdata->irq_base; + tc35892->num_gpio = id->driver_data; + + i2c_set_clientdata(i2c, tc35892); + + ret = tc35892_chip_init(tc35892); + if (ret) + goto out_free; + + ret = tc35892_irq_init(tc35892); + if (ret) + goto out_free; + + ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "tc35892", tc35892); + if (ret) { + dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret); + goto out_removeirq; + } + + ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs, + ARRAY_SIZE(tc35892_devs), NULL, + tc35892->irq_base); + if (ret) { + dev_err(tc35892->dev, "failed to add children\n"); + goto out_freeirq; + } + + return 0; + +out_freeirq: + free_irq(tc35892->i2c->irq, tc35892); +out_removeirq: + tc35892_irq_remove(tc35892); +out_free: + i2c_set_clientdata(i2c, NULL); + kfree(tc35892); + return ret; +} + +static int __devexit tc35892_remove(struct i2c_client *client) +{ + struct tc35892 *tc35892 = i2c_get_clientdata(client); + + mfd_remove_devices(tc35892->dev); + + free_irq(tc35892->i2c->irq, tc35892); + tc35892_irq_remove(tc35892); + + i2c_set_clientdata(client, NULL); + kfree(tc35892); + + return 0; +} + +static const struct i2c_device_id tc35892_id[] = { + { "tc35892", 24 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tc35892_id); + +static struct i2c_driver tc35892_driver = { + .driver.name = "tc35892", + .driver.owner = THIS_MODULE, + .probe = tc35892_probe, + .remove = __devexit_p(tc35892_remove), + .id_table = tc35892_id, +}; + +static int __init tc35892_init(void) +{ + return i2c_add_driver(&tc35892_driver); +} +subsys_initcall(tc35892_init); + +static void __exit tc35892_exit(void) +{ + i2c_del_driver(&tc35892_driver); +} +module_exit(tc35892_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TC35892 MFD core driver"); +MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent"); diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index 7f478ec4184b..ac5995026c88 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -31,6 +31,7 @@ #include <linux/i2c.h> #include <linux/i2c-ocores.h> +#include <linux/i2c-xiic.h> #include <linux/i2c/tsc2007.h> #include <linux/spi/spi.h> @@ -40,6 +41,8 @@ #include <media/timb_radio.h> +#include <linux/timb_dma.h> + #include "timberdale.h" #define DRIVER_NAME "timberdale" @@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = { }, }; +static __devinitdata struct xiic_i2c_platform_data +timberdale_xiic_platform_data = { + .devices = timberdale_i2c_board_info, + .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) +}; + static __devinitdata struct ocores_i2c_platform_data timberdale_ocores_platform_data = { .regstep = 4, @@ -77,7 +86,20 @@ timberdale_ocores_platform_data = { .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) }; -const static __devinitconst struct resource timberdale_ocores_resources[] = { +static const __devinitconst struct resource timberdale_xiic_resources[] = { + { + .start = XIICOFFSET, + .end = XIICEND, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_TIMBERDALE_I2C, + .end = IRQ_TIMBERDALE_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +static const __devinitconst struct resource timberdale_ocores_resources[] = { { .start = OCORESOFFSET, .end = OCORESEND, @@ -126,7 +148,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = { */ }; -const static __devinitconst struct resource timberdale_spi_resources[] = { +static const __devinitconst struct resource timberdale_spi_resources[] = { { .start = SPIOFFSET, .end = SPIEND, @@ -139,7 +161,7 @@ const static __devinitconst struct resource timberdale_spi_resources[] = { }, }; -const static __devinitconst struct resource timberdale_eth_resources[] = { +static const __devinitconst struct resource timberdale_eth_resources[] = { { .start = ETHOFFSET, .end = ETHEND, @@ -159,7 +181,7 @@ static __devinitdata struct timbgpio_platform_data .irq_base = 200, }; -const static __devinitconst struct resource timberdale_gpio_resources[] = { +static const __devinitconst struct resource timberdale_gpio_resources[] = { { .start = GPIOOFFSET, .end = GPIOEND, @@ -172,7 +194,7 @@ const static __devinitconst struct resource timberdale_gpio_resources[] = { }, }; -const static __devinitconst struct resource timberdale_mlogicore_resources[] = { +static const __devinitconst struct resource timberdale_mlogicore_resources[] = { { .start = MLCOREOFFSET, .end = MLCOREEND, @@ -190,7 +212,7 @@ const static __devinitconst struct resource timberdale_mlogicore_resources[] = { }, }; -const static __devinitconst struct resource timberdale_uart_resources[] = { +static const __devinitconst struct resource timberdale_uart_resources[] = { { .start = UARTOFFSET, .end = UARTEND, @@ -203,7 +225,7 @@ const static __devinitconst struct resource timberdale_uart_resources[] = { }, }; -const static __devinitconst struct resource timberdale_uartlite_resources[] = { +static const __devinitconst struct resource timberdale_uartlite_resources[] = { { .start = UARTLITEOFFSET, .end = UARTLITEEND, @@ -216,7 +238,7 @@ const static __devinitconst struct resource timberdale_uartlite_resources[] = { }, }; -const static __devinitconst struct resource timberdale_radio_resources[] = { +static const __devinitconst struct resource timberdale_radio_resources[] = { { .start = RDSOFFSET, .end = RDSEND, @@ -250,7 +272,66 @@ static __devinitdata struct timb_radio_platform_data } }; -const static __devinitconst struct resource timberdale_dma_resources[] = { +static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = { + .nr_channels = 10, + .channels = { + { + /* UART RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* UART TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* MLB RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* MLB TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* Video RX */ + .rx = true, + .bytes_per_line = 1440, + .descriptors = 2, + .descriptor_elements = 16 + }, + { + /* Video framedrop */ + }, + { + /* SDHCI RX */ + .rx = true, + }, + { + /* SDHCI TX */ + }, + { + /* ETH RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* ETH TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + } +}; + +static const __devinitconst struct resource timberdale_dma_resources[] = { { .start = DMAOFFSET, .end = DMAEND, @@ -265,11 +346,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = { static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { { + .name = "timb-dma", + .num_resources = ARRAY_SIZE(timberdale_dma_resources), + .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), + }, + { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), .resources = timberdale_uart_resources, }, { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, + { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), .resources = timberdale_gpio_resources, @@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), @@ -314,6 +411,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { .resources = timberdale_uartlite_resources, }, { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, + { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), .resources = timberdale_gpio_resources, @@ -344,20 +448,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), .resources = timberdale_uart_resources, }, { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, + { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), .resources = timberdale_gpio_resources, @@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { .platform_data = &timberdale_xspi_platform_data, .data_size = sizeof(timberdale_xspi_platform_data), }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), @@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, - { - .name = "timb-dma", - .num_resources = ARRAY_SIZE(timberdale_dma_resources), - .resources = timberdale_dma_resources, - }, }; static const __devinitconst struct resource timberdale_sdhc_resources[] = { diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h index 8d27ffabc25d..c11bf6ebfe00 100644 --- a/drivers/mfd/timberdale.h +++ b/drivers/mfd/timberdale.h @@ -23,7 +23,7 @@ #ifndef MFD_TIMBERDALE_H #define MFD_TIMBERDALE_H -#define DRV_VERSION "0.1" +#define DRV_VERSION "0.2" /* This driver only support versions >= 3.8 and < 4.0 */ #define TIMB_SUPPORTED_MAJOR 3 @@ -66,7 +66,7 @@ #define CHIPCTLOFFSET 0x800 #define CHIPCTLEND 0x8ff -#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET) +#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1) #define INTCOFFSET 0xc00 #define INTCEND 0xfff @@ -127,4 +127,16 @@ #define GPIO_PIN_BT_RST 15 #define GPIO_NR_PINS 16 +/* DMA Channels */ +#define DMA_UART_RX 0 +#define DMA_UART_TX 1 +#define DMA_MLB_RX 2 +#define DMA_MLB_TX 3 +#define DMA_VIDEO_RX 4 +#define DMA_VIDEO_DROP 5 +#define DMA_SDHCI_RX 6 +#define DMA_SDHCI_TX 7 +#define DMA_ETH_RX 8 +#define DMA_ETH_TX 9 + #endif diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index e5955306c2fa..9b22a77f70f5 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client) cancel_delayed_work(&tps->work); flush_scheduled_work(); debugfs_remove(tps->file); - kfree(tps); i2c_set_clientdata(client, NULL); + kfree(tps); the_tps = NULL; return 0; } diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c new file mode 100644 index 000000000000..d859dffed39f --- /dev/null +++ b/drivers/mfd/tps6507x.c @@ -0,0 +1,159 @@ +/* + * tps6507x.c -- TPS6507x chip family multi-function driver + * + * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com) + * + * Author: Todd Fischer + * todd.fischer@ridgerun.com + * + * Credits: + * + * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC. + * + * For licencing details see kernel-base/COPYING + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mfd/core.h> +#include <linux/mfd/tps6507x.h> + +static struct mfd_cell tps6507x_devs[] = { + { + .name = "tps6507x-pmic", + }, + { + .name = "tps6507x-ts", + }, +}; + + +static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg, + int bytes, void *dest) +{ + struct i2c_client *i2c = tps6507x->i2c_client; + struct i2c_msg xfer[2]; + int ret; + + /* Write register */ + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = 1; + xfer[0].buf = ® + + /* Read data */ + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = bytes; + xfer[1].buf = dest; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + ret = 0; + else if (ret >= 0) + ret = -EIO; + + return ret; +} + +static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg, + int bytes, void *src) +{ + struct i2c_client *i2c = tps6507x->i2c_client; + /* we add 1 byte for device register */ + u8 msg[TPS6507X_MAX_REGISTER + 1]; + int ret; + + if (bytes > (TPS6507X_MAX_REGISTER + 1)) + return -EINVAL; + + msg[0] = reg; + memcpy(&msg[1], src, bytes); + + ret = i2c_master_send(i2c, msg, bytes + 1); + if (ret < 0) + return ret; + if (ret != bytes + 1) + return -EIO; + return 0; +} + +static int tps6507x_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tps6507x_dev *tps6507x; + int ret = 0; + + tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL); + if (tps6507x == NULL) { + kfree(i2c); + return -ENOMEM; + } + + i2c_set_clientdata(i2c, tps6507x); + tps6507x->dev = &i2c->dev; + tps6507x->i2c_client = i2c; + tps6507x->read_dev = tps6507x_i2c_read_device; + tps6507x->write_dev = tps6507x_i2c_write_device; + + ret = mfd_add_devices(tps6507x->dev, -1, + tps6507x_devs, ARRAY_SIZE(tps6507x_devs), + NULL, 0); + + if (ret < 0) + goto err; + + return ret; + +err: + mfd_remove_devices(tps6507x->dev); + kfree(tps6507x); + return ret; +} + +static int tps6507x_i2c_remove(struct i2c_client *i2c) +{ + struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c); + + mfd_remove_devices(tps6507x->dev); + kfree(tps6507x); + + return 0; +} + +static const struct i2c_device_id tps6507x_i2c_id[] = { + { "tps6507x", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id); + + +static struct i2c_driver tps6507x_i2c_driver = { + .driver = { + .name = "tps6507x", + .owner = THIS_MODULE, + }, + .probe = tps6507x_i2c_probe, + .remove = tps6507x_i2c_remove, + .id_table = tps6507x_i2c_id, +}; + +static int __init tps6507x_i2c_init(void) +{ + return i2c_add_driver(&tps6507x_i2c_driver); +} +/* init early so consumer devices can complete system boot */ +subsys_initcall(tps6507x_i2c_init); + +static void __exit tps6507x_i2c_exit(void) +{ + i2c_del_driver(&tps6507x_i2c_driver); +} +module_exit(tps6507x_i2c_exit); + +MODULE_DESCRIPTION("TPS6507x chip family multi-function driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 202bdd59632d..097f24d8bceb 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -232,10 +232,11 @@ static const struct sih sih_modules_twl5031[8] = { }, [6] = { /* - * ACI doesn't use the same SIH organization. - * For example, it supports only one interrupt line + * ECI/DBI doesn't use the same SIH organization. + * For example, it supports only one interrupt output line. + * That is, the interrupts are seen on both INT1 and INT2 lines. */ - .name = "aci", + .name = "eci_dbi", .module = TWL5031_MODULE_ACCESSORY, .bits = 9, .bytes_ixr = 2, @@ -247,8 +248,8 @@ static const struct sih sih_modules_twl5031[8] = { }, [7] = { - /* Accessory */ - .name = "acc", + /* Audio accessory */ + .name = "audio", .module = TWL5031_MODULE_ACCESSORY, .control_offset = TWL5031_ACCSIHCTRL, .bits = 2, diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index f2ab025ad97a..1a968f34d679 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits); */ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) { - int ret, src; + int ret, src, irq_masked, timeout; + + /* Are we using the interrupt? */ + irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK); + irq_masked &= WM831X_AUXADC_DATA_EINT; mutex_lock(&wm831x->auxadc_lock); @@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) goto out; } + /* Clear any notification from a very late arriving interrupt */ + try_wait_for_completion(&wm831x->auxadc_done); + ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); if (ret < 0) { @@ -349,22 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) goto disable; } - /* If an interrupt arrived late clean up after it */ - try_wait_for_completion(&wm831x->auxadc_done); - - /* Ignore the result to allow us to soldier on without IRQ hookup */ - wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); - - ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL); - if (ret < 0) { - dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret); - goto disable; - } - - if (ret & WM831X_AUX_CVT_ENA) { - dev_err(wm831x->dev, "Timed out reading AUXADC\n"); - ret = -EBUSY; - goto disable; + if (irq_masked) { + /* If we're not using interrupts then poll the + * interrupt status register */ + timeout = 5; + while (timeout) { + msleep(1); + + ret = wm831x_reg_read(wm831x, + WM831X_INTERRUPT_STATUS_1); + if (ret < 0) { + dev_err(wm831x->dev, + "ISR 1 read failed: %d\n", ret); + goto disable; + } + + /* Did it complete? */ + if (ret & WM831X_AUXADC_DATA_EINT) { + wm831x_reg_write(wm831x, + WM831X_INTERRUPT_STATUS_1, + WM831X_AUXADC_DATA_EINT); + break; + } else { + dev_err(wm831x->dev, + "AUXADC conversion timeout\n"); + ret = -EBUSY; + goto disable; + } + } + } else { + /* If we are using interrupts then wait for the + * interrupt to complete. Use an extremely long + * timeout to handle situations with heavy load where + * the notification of the interrupt may be delayed by + * threaded IRQ handling. */ + if (!wait_for_completion_timeout(&wm831x->auxadc_done, + msecs_to_jiffies(500))) { + dev_err(wm831x->dev, "Timed out waiting for AUXADC\n"); + ret = -EBUSY; + goto disable; + } } ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); @@ -1463,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8310: parent = WM8310; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1474,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8311: parent = WM8311; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1485,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8312: parent = WM8312; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1623,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x) kfree(wm831x); } +static int wm831x_device_suspend(struct wm831x *wm831x) +{ + int reg, mask; + + /* If the charger IRQs are a wake source then make sure we ack + * them even if they're not actively being used (eg, no power + * driver or no IRQ line wired up) then acknowledge the + * interrupts otherwise suspend won't last very long. + */ + if (wm831x->charger_irq_wake) { + reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK); + + mask = WM831X_CHG_BATT_HOT_EINT | + WM831X_CHG_BATT_COLD_EINT | + WM831X_CHG_BATT_FAIL_EINT | + WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT | + WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT | + WM831X_CHG_START_EINT; + + /* If any of the interrupts are masked read the statuses */ + if (reg & mask) + reg = wm831x_reg_read(wm831x, + WM831X_INTERRUPT_STATUS_2); + + if (reg & mask) { + dev_info(wm831x->dev, + "Acknowledging masked charger IRQs: %x\n", + reg & mask); + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2, + reg & mask); + } + } + + return 0; +} + static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg, int bytes, void *dest) { @@ -1697,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c) return 0; } +static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg) +{ + struct wm831x *wm831x = i2c_get_clientdata(i2c); + + return wm831x_device_suspend(wm831x); +} + static const struct i2c_device_id wm831x_i2c_id[] = { { "wm8310", WM8310 }, { "wm8311", WM8311 }, @@ -1714,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = { }, .probe = wm831x_i2c_probe, .remove = wm831x_i2c_remove, + .suspend = wm831x_i2c_suspend, .id_table = wm831x_i2c_id, }; diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 4c1122ceb443..7dabe4dbd373 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -39,8 +39,6 @@ struct wm831x_irq_data { int primary; int reg; int mask; - irq_handler_t handler; - void *handler_data; }; static struct wm831x_irq_data wm831x_irqs[] = { @@ -492,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) mutex_init(&wm831x->irq_lock); + /* Mask the individual interrupt sources */ + for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { + wm831x->irq_masks_cur[i] = 0xffff; + wm831x->irq_masks_cache[i] = 0xffff; + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, + 0xffff); + } + if (!irq) { dev_warn(wm831x->dev, "No interrupt specified - functionality limited\n"); @@ -507,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) wm831x->irq = irq; wm831x->irq_base = pdata->irq_base; - /* Mask the individual interrupt sources */ - for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { - wm831x->irq_masks_cur[i] = 0xffff; - wm831x->irq_masks_cache[i] = 0xffff; - wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, - 0xffff); - } - /* Register them with genirq */ for (cur_irq = wm831x->irq_base; cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c index 65830f57c093..7795af4b1fe1 100644 --- a/drivers/mfd/wm8350-i2c.c +++ b/drivers/mfd/wm8350-i2c.c @@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c, int ret = 0; wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL); - if (wm8350 == NULL) { - kfree(i2c); + if (wm8350 == NULL) return -ENOMEM; - } i2c_set_clientdata(i2c, wm8350); wm8350->dev = &i2c->dev; @@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c, return ret; err: + i2c_set_clientdata(i2c, NULL); kfree(wm8350); return ret; } @@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c) struct wm8350 *wm8350 = i2c_get_clientdata(i2c); wm8350_device_exit(wm8350); + i2c_set_clientdata(i2c, NULL); kfree(wm8350); return 0; diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 865ce013a821..e08aafa663dc 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest) { int i, ret = 0; - BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); + BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); /* If there are any volatile reads then read back the entire block */ for (i = reg; i < reg + num_regs; i++) @@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs, { int ret, i; - BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); + BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); for (i = 0; i < num_regs; i++) { BUG_ON(!reg_data[reg + i].writable); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0d0d625fece2..26386a92f5aa 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -14,11 +14,17 @@ menuconfig MISC_DEVICES if MISC_DEVICES config AD525X_DPOT - tristate "Analog Devices AD525x Digital Potentiometers" - depends on I2C && SYSFS + tristate "Analog Devices Digital Potentiometers" + depends on (I2C || SPI) && SYSFS help If you say yes here, you get support for the Analog Devices - AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255 + AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255 + AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203, + AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235, + AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293, + AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242, + AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282, + ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173 digital potentiometer chips. See Documentation/misc-devices/ad525x_dpot.txt for the @@ -27,6 +33,26 @@ config AD525X_DPOT This driver can also be built as a module. If so, the module will be called ad525x_dpot. +config AD525X_DPOT_I2C + tristate "support I2C bus connection" + depends on AD525X_DPOT && I2C + help + Say Y here if you have a digital potentiometers hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-i2c. + +config AD525X_DPOT_SPI + tristate "support SPI bus connection" + depends on AD525X_DPOT && SPI_MASTER + help + Say Y here if you have a digital potentiometers hooked to an SPI bus. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad525x_dpot-spi. + config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f12dc3e54402..6ed06a19474a 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -4,6 +4,8 @@ obj-$(CONFIG_IBM_ASM) += ibmasm/ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o +obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o +obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c new file mode 100644 index 000000000000..374352af7979 --- /dev/null +++ b/drivers/misc/ad525x_dpot-i2c.c @@ -0,0 +1,134 @@ +/* + * Driver for the Analog Devices digital potentiometers (I2C bus) + * + * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/i2c.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +/* ------------------------------------------------------------------------- */ +/* I2C bus functions */ +static int write_d8(void *client, u8 val) +{ + return i2c_smbus_write_byte(client, val); +} + +static int write_r8d8(void *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int write_r8d16(void *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, val); +} + +static int read_d8(void *client) +{ + return i2c_smbus_read_byte(client); +} + +static int read_r8d8(void *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static int read_r8d16(void *client, u8 reg) +{ + return i2c_smbus_read_word_data(client, reg); +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read_d8, + .read_r8d8 = read_r8d8, + .read_r8d16 = read_r8d16, + .write_d8 = write_d8, + .write_r8d8 = write_r8d8, + .write_r8d16 = write_r8d16, +}; + +static int __devinit ad_dpot_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ad_dpot_bus_data bdata = { + .client = client, + .bops = &bops, + }; + + struct ad_dpot_id dpot_id = { + .name = (char *) &id->name, + .devid = id->driver_data, + }; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "SMBUS Word Data not Supported\n"); + return -EIO; + } + + return ad_dpot_probe(&client->dev, &bdata, &dpot_id); +} + +static int __devexit ad_dpot_i2c_remove(struct i2c_client *client) +{ + return ad_dpot_remove(&client->dev); +} + +static const struct i2c_device_id ad_dpot_id[] = { + {"ad5258", AD5258_ID}, + {"ad5259", AD5259_ID}, + {"ad5251", AD5251_ID}, + {"ad5252", AD5252_ID}, + {"ad5253", AD5253_ID}, + {"ad5254", AD5254_ID}, + {"ad5255", AD5255_ID}, + {"ad5241", AD5241_ID}, + {"ad5242", AD5242_ID}, + {"ad5243", AD5243_ID}, + {"ad5245", AD5245_ID}, + {"ad5246", AD5246_ID}, + {"ad5247", AD5247_ID}, + {"ad5248", AD5248_ID}, + {"ad5280", AD5280_ID}, + {"ad5282", AD5282_ID}, + {"adn2860", ADN2860_ID}, + {"ad5273", AD5273_ID}, + {"ad5171", AD5171_ID}, + {"ad5170", AD5170_ID}, + {"ad5172", AD5172_ID}, + {"ad5173", AD5173_ID}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ad_dpot_id); + +static struct i2c_driver ad_dpot_i2c_driver = { + .driver = { + .name = "ad_dpot", + .owner = THIS_MODULE, + }, + .probe = ad_dpot_i2c_probe, + .remove = __devexit_p(ad_dpot_i2c_remove), + .id_table = ad_dpot_id, +}; + +static int __init ad_dpot_i2c_init(void) +{ + return i2c_add_driver(&ad_dpot_i2c_driver); +} +module_init(ad_dpot_i2c_init); + +static void __exit ad_dpot_i2c_exit(void) +{ + i2c_del_driver(&ad_dpot_i2c_driver); +} +module_exit(ad_dpot_i2c_exit); + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("i2c:ad_dpot"); diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c new file mode 100644 index 000000000000..b8c6df9c8437 --- /dev/null +++ b/drivers/misc/ad525x_dpot-spi.c @@ -0,0 +1,172 @@ +/* + * Driver for the Analog Devices digital potentiometers (SPI bus) + * + * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/spi/spi.h> +#include <linux/module.h> + +#include "ad525x_dpot.h" + +static const struct ad_dpot_id ad_dpot_spi_devlist[] = { + {.name = "ad5160", .devid = AD5160_ID}, + {.name = "ad5161", .devid = AD5161_ID}, + {.name = "ad5162", .devid = AD5162_ID}, + {.name = "ad5165", .devid = AD5165_ID}, + {.name = "ad5200", .devid = AD5200_ID}, + {.name = "ad5201", .devid = AD5201_ID}, + {.name = "ad5203", .devid = AD5203_ID}, + {.name = "ad5204", .devid = AD5204_ID}, + {.name = "ad5206", .devid = AD5206_ID}, + {.name = "ad5207", .devid = AD5207_ID}, + {.name = "ad5231", .devid = AD5231_ID}, + {.name = "ad5232", .devid = AD5232_ID}, + {.name = "ad5233", .devid = AD5233_ID}, + {.name = "ad5235", .devid = AD5235_ID}, + {.name = "ad5260", .devid = AD5260_ID}, + {.name = "ad5262", .devid = AD5262_ID}, + {.name = "ad5263", .devid = AD5263_ID}, + {.name = "ad5290", .devid = AD5290_ID}, + {.name = "ad5291", .devid = AD5291_ID}, + {.name = "ad5292", .devid = AD5292_ID}, + {.name = "ad5293", .devid = AD5293_ID}, + {.name = "ad7376", .devid = AD7376_ID}, + {.name = "ad8400", .devid = AD8400_ID}, + {.name = "ad8402", .devid = AD8402_ID}, + {.name = "ad8403", .devid = AD8403_ID}, + {.name = "adn2850", .devid = ADN2850_ID}, + {} +}; + +/* ------------------------------------------------------------------------- */ + +/* SPI bus functions */ +static int write8(void *client, u8 val) +{ + u8 data = val; + return spi_write(client, &data, 1); +} + +static int write16(void *client, u8 reg, u8 val) +{ + u8 data[2] = {reg, val}; + return spi_write(client, data, 1); +} + +static int write24(void *client, u8 reg, u16 val) +{ + u8 data[3] = {reg, val >> 8, val}; + return spi_write(client, data, 1); +} + +static int read8(void *client) +{ + int ret; + u8 data; + ret = spi_read(client, &data, 1); + if (ret < 0) + return ret; + + return data; +} + +static int read16(void *client, u8 reg) +{ + int ret; + u8 buf_rx[2]; + + write16(client, reg, 0); + ret = spi_read(client, buf_rx, 2); + if (ret < 0) + return ret; + + return (buf_rx[0] << 8) | buf_rx[1]; +} + +static int read24(void *client, u8 reg) +{ + int ret; + u8 buf_rx[3]; + + write24(client, reg, 0); + ret = spi_read(client, buf_rx, 3); + if (ret < 0) + return ret; + + return (buf_rx[1] << 8) | buf_rx[2]; +} + +static const struct ad_dpot_bus_ops bops = { + .read_d8 = read8, + .read_r8d8 = read16, + .read_r8d16 = read24, + .write_d8 = write8, + .write_r8d8 = write16, + .write_r8d16 = write24, +}; + +static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id, + char *name) +{ + while (id->name && id->name[0]) { + if (strcmp(name, id->name) == 0) + return id; + id++; + } + return NULL; +} + +static int __devinit ad_dpot_spi_probe(struct spi_device *spi) +{ + char *name = spi->dev.platform_data; + const struct ad_dpot_id *dpot_id; + + struct ad_dpot_bus_data bdata = { + .client = spi, + .bops = &bops, + }; + + dpot_id = dpot_match_id(ad_dpot_spi_devlist, name); + + if (dpot_id == NULL) { + dev_err(&spi->dev, "%s not in supported device list", name); + return -ENODEV; + } + + return ad_dpot_probe(&spi->dev, &bdata, dpot_id); +} + +static int __devexit ad_dpot_spi_remove(struct spi_device *spi) +{ + return ad_dpot_remove(&spi->dev); +} + +static struct spi_driver ad_dpot_spi_driver = { + .driver = { + .name = "ad_dpot", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = ad_dpot_spi_probe, + .remove = __devexit_p(ad_dpot_spi_remove), +}; + +static int __init ad_dpot_spi_init(void) +{ + return spi_register_driver(&ad_dpot_spi_driver); +} +module_init(ad_dpot_spi_init); + +static void __exit ad_dpot_spi_exit(void) +{ + spi_unregister_driver(&ad_dpot_spi_driver); +} +module_exit(ad_dpot_spi_exit); + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:ad_dpot"); diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 30a59f2bacd2..5e6fa8449e8b 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -1,6 +1,6 @@ /* - * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers - * Copyright (c) 2009 Analog Devices, Inc. + * ad525x_dpot: Driver for the Analog Devices digital potentiometers + * Copyright (c) 2009-2010 Analog Devices, Inc. * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> * * DEVID #Wipers #Positions Resistor Options (kOhm) @@ -11,6 +11,47 @@ * AD5255 3 512 25, 250 * AD5253 4 64 1, 10, 50, 100 * AD5254 4 256 1, 10, 50, 100 + * AD5160 1 256 5, 10, 50, 100 + * AD5161 1 256 5, 10, 50, 100 + * AD5162 2 256 2.5, 10, 50, 100 + * AD5165 1 256 100 + * AD5200 1 256 10, 50 + * AD5201 1 33 10, 50 + * AD5203 4 64 10, 100 + * AD5204 4 256 10, 50, 100 + * AD5206 6 256 10, 50, 100 + * AD5207 2 256 10, 50, 100 + * AD5231 1 1024 10, 50, 100 + * AD5232 2 256 10, 50, 100 + * AD5233 4 64 10, 50, 100 + * AD5235 2 1024 25, 250 + * AD5260 1 256 20, 50, 200 + * AD5262 2 256 20, 50, 200 + * AD5263 4 256 20, 50, 200 + * AD5290 1 256 10, 50, 100 + * AD5291 1 256 20 + * AD5292 1 1024 20 + * AD5293 1 1024 20 + * AD7376 1 128 10, 50, 100, 1M + * AD8400 1 256 1, 10, 50, 100 + * AD8402 2 256 1, 10, 50, 100 + * AD8403 4 256 1, 10, 50, 100 + * ADN2850 3 512 25, 250 + * AD5241 1 256 10, 100, 1M + * AD5246 1 128 5, 10, 50, 100 + * AD5247 1 128 5, 10, 50, 100 + * AD5245 1 256 5, 10, 50, 100 + * AD5243 2 256 2.5, 10, 50, 100 + * AD5248 2 256 2.5, 10, 50, 100 + * AD5242 2 256 20, 50, 200 + * AD5280 1 256 20, 50, 200 + * AD5282 2 256 20, 50, 200 + * ADN2860 3 512 25, 250 + * AD5273 1 64 1, 10, 50, 100 (OTP) + * AD5171 1 64 5, 10, 50, 100 (OTP) + * AD5170 1 256 2.5, 10, 50, 100 (OTP) + * AD5172 2 256 2.5, 10, 50, 100 (OTP) + * AD5173 2 256 2.5, 10, 50, 100 (OTP) * * See Documentation/misc-devices/ad525x_dpot.txt for more info. * @@ -28,77 +69,283 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/init.h> -#include <linux/slab.h> -#include <linux/i2c.h> #include <linux/delay.h> +#include <linux/slab.h> -#define DRIVER_NAME "ad525x_dpot" -#define DRIVER_VERSION "0.1" - -enum dpot_devid { - AD5258_ID, - AD5259_ID, - AD5251_ID, - AD5252_ID, - AD5253_ID, - AD5254_ID, - AD5255_ID, -}; +#define DRIVER_VERSION "0.2" -#define AD5258_MAX_POSITION 64 -#define AD5259_MAX_POSITION 256 -#define AD5251_MAX_POSITION 64 -#define AD5252_MAX_POSITION 256 -#define AD5253_MAX_POSITION 64 -#define AD5254_MAX_POSITION 256 -#define AD5255_MAX_POSITION 512 - -#define AD525X_RDAC0 0 -#define AD525X_RDAC1 1 -#define AD525X_RDAC2 2 -#define AD525X_RDAC3 3 - -#define AD525X_REG_TOL 0x18 -#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0) -#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1) -#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2) -#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3) - -/* RDAC-to-EEPROM Interface Commands */ -#define AD525X_I2C_RDAC (0x00 << 5) -#define AD525X_I2C_EEPROM (0x01 << 5) -#define AD525X_I2C_CMD (0x80) - -#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3)) -#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3)) -#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3)) -#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3)) - -static s32 ad525x_read(struct i2c_client *client, u8 reg); -static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value); +#include "ad525x_dpot.h" /* * Client data (each client gets its own) */ struct dpot_data { + struct ad_dpot_bus_data bdata; struct mutex update_lock; unsigned rdac_mask; unsigned max_pos; - unsigned devid; + unsigned long devid; + unsigned uid; + unsigned feat; + unsigned wipers; + u16 rdac_cache[MAX_RDACS]; + DECLARE_BITMAP(otp_en_mask, MAX_RDACS); }; +static inline int dpot_read_d8(struct dpot_data *dpot) +{ + return dpot->bdata.bops->read_d8(dpot->bdata.client); +} + +static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg) +{ + return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg); +} + +static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg) +{ + return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg); +} + +static inline int dpot_write_d8(struct dpot_data *dpot, u8 val) +{ + return dpot->bdata.bops->write_d8(dpot->bdata.client, val); +} + +static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val) +{ + return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val); +} + +static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val) +{ + return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val); +} + +static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg) +{ + unsigned ctrl = 0; + + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { + + if (dpot->feat & F_RDACS_WONLY) + return dpot->rdac_cache[reg & DPOT_RDAC_MASK]; + + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) + return dpot_read_r8d8(dpot, + DPOT_AD5291_READ_RDAC << 2); + + ctrl = DPOT_SPI_READ_RDAC; + } else if (reg & DPOT_ADDR_EEPROM) { + ctrl = DPOT_SPI_READ_EEPROM; + } + + if (dpot->feat & F_SPI_16BIT) + return dpot_read_r8d8(dpot, ctrl); + else if (dpot->feat & F_SPI_24BIT) + return dpot_read_r8d16(dpot, ctrl); + + return -EFAULT; +} + +static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) +{ + unsigned ctrl = 0; + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5291_RDAC_AB; + return dpot_read_r8d8(dpot, ctrl); + case DPOT_UID(AD5170_ID): + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + return dpot_read_d8(dpot); + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5272_3_A0; + return dpot_read_r8d8(dpot, ctrl); + default: + if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) + return dpot_read_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1)); + else + return dpot_read_r8d8(dpot, reg); + } +} + +static s32 dpot_read(struct dpot_data *dpot, u8 reg) +{ + if (dpot->feat & F_SPI) + return dpot_read_spi(dpot, reg); + else + return dpot_read_i2c(dpot, reg); +} + +static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value) +{ + unsigned val = 0; + + if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) { + if (dpot->feat & F_RDACS_WONLY) + dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value; + + if (dpot->feat & F_AD_APPDATA) { + if (dpot->feat & F_SPI_8BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_d8(dpot, val); + } else if (dpot->feat & F_SPI_16BIT) { + val = ((reg & DPOT_RDAC_MASK) << + DPOT_MAX_POS(dpot->devid)) | + value; + return dpot_write_r8d8(dpot, val >> 8, + val & 0xFF); + } else + BUG(); + } else { + if (dpot->uid == DPOT_UID(AD5291_ID) || + dpot->uid == DPOT_UID(AD5292_ID) || + dpot->uid == DPOT_UID(AD5293_ID)) + return dpot_write_r8d8(dpot, + (DPOT_AD5291_RDAC << 2) | + (value >> 8), value & 0xFF); + + val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK); + } + } else if (reg & DPOT_ADDR_EEPROM) { + val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK); + } else if (reg & DPOT_ADDR_CMD) { + switch (reg) { + case DPOT_DEC_ALL_6DB: + val = DPOT_SPI_DEC_ALL_6DB; + break; + case DPOT_INC_ALL_6DB: + val = DPOT_SPI_INC_ALL_6DB; + break; + case DPOT_DEC_ALL: + val = DPOT_SPI_DEC_ALL; + break; + case DPOT_INC_ALL: + val = DPOT_SPI_INC_ALL; + break; + } + } else + BUG(); + + if (dpot->feat & F_SPI_16BIT) + return dpot_write_r8d8(dpot, val, value); + else if (dpot->feat & F_SPI_24BIT) + return dpot_write_r8d16(dpot, val, value); + + return -EFAULT; +} + +static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value) +{ + /* Only write the instruction byte for certain commands */ + unsigned tmp = 0, ctrl = 0; + + switch (dpot->uid) { + case DPOT_UID(AD5246_ID): + case DPOT_UID(AD5247_ID): + return dpot_write_d8(dpot, value); + break; + + case DPOT_UID(AD5245_ID): + case DPOT_UID(AD5241_ID): + case DPOT_UID(AD5242_ID): + case DPOT_UID(AD5243_ID): + case DPOT_UID(AD5248_ID): + case DPOT_UID(AD5280_ID): + case DPOT_UID(AD5282_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5291_RDAC_AB; + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5171_ID): + case DPOT_UID(AD5273_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_d8(dpot); + if (tmp >> 6) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5273_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5172_ID): + case DPOT_UID(AD5173_ID): + ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ? + 0 : DPOT_AD5272_3_A0; + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, ctrl); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl |= DPOT_AD5270_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + case DPOT_UID(AD5170_ID): + if (reg & DPOT_ADDR_OTP) { + tmp = dpot_read_r8d16(dpot, tmp); + if (tmp >> 14) /* Ready to Program? */ + return -EFAULT; + ctrl = DPOT_AD5270_2_3_FUSE; + } + return dpot_write_r8d8(dpot, ctrl, value); + break; + default: + if (reg & DPOT_ADDR_CMD) + return dpot_write_d8(dpot, reg); + + if (dpot->max_pos > 256) + return dpot_write_r8d16(dpot, (reg & 0xF8) | + ((reg & 0x7) << 1), value); + else + /* All other registers require instruction + data bytes */ + return dpot_write_r8d8(dpot, reg, value); + } +} + + +static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value) +{ + if (dpot->feat & F_SPI) + return dpot_write_spi(dpot, reg, value); + else + return dpot_write_i2c(dpot, reg, value); +} + /* sysfs functions */ static ssize_t sysfs_show_reg(struct device *dev, - struct device_attribute *attr, char *buf, u32 reg) + struct device_attribute *attr, + char *buf, u32 reg) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); + struct dpot_data *data = dev_get_drvdata(dev); s32 value; + if (reg & DPOT_ADDR_OTP_EN) + return sprintf(buf, "%s\n", + test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ? + "enabled" : "disabled"); + + mutex_lock(&data->update_lock); - value = ad525x_read(client, reg); + value = dpot_read(data, reg); mutex_unlock(&data->update_lock); if (value < 0) @@ -111,7 +358,7 @@ static ssize_t sysfs_show_reg(struct device *dev, * datasheet (Rev. A) for more details. */ - if (reg & AD525X_REG_TOL) + if (reg & DPOT_REG_TOL) return sprintf(buf, "0x%04x\n", value & 0xFFFF); else return sprintf(buf, "%u\n", value & data->rdac_mask); @@ -121,11 +368,23 @@ static ssize_t sysfs_set_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, u32 reg) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); + struct dpot_data *data = dev_get_drvdata(dev); unsigned long value; int err; + if (reg & DPOT_ADDR_OTP_EN) { + if (!strncmp(buf, "enabled", sizeof("enabled"))) + set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); + else + clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); + + return count; + } + + if ((reg & DPOT_ADDR_OTP) && + !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)) + return -EPERM; + err = strict_strtoul(buf, 10, &value); if (err) return err; @@ -134,9 +393,11 @@ static ssize_t sysfs_set_reg(struct device *dev, value = data->rdac_mask; mutex_lock(&data->update_lock); - ad525x_write(client, reg, value); - if (reg & AD525X_I2C_EEPROM) + dpot_write(data, reg, value); + if (reg & DPOT_ADDR_EEPROM) msleep(26); /* Sleep while the EEPROM updates */ + else if (reg & DPOT_ADDR_OTP) + msleep(400); /* Sleep while the OTP updates */ mutex_unlock(&data->update_lock); return count; @@ -146,11 +407,10 @@ static ssize_t sysfs_do_cmd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count, u32 reg) { - struct i2c_client *client = to_i2c_client(dev); - struct dpot_data *data = i2c_get_clientdata(client); + struct dpot_data *data = dev_get_drvdata(dev); mutex_lock(&data->update_lock); - ad525x_write(client, reg, 0); + dpot_write(data, reg, 0); mutex_unlock(&data->update_lock); return count; @@ -158,244 +418,131 @@ static ssize_t sysfs_do_cmd(struct device *dev, /* ------------------------------------------------------------------------- */ -static ssize_t show_rdac0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0); -} - -static ssize_t set_rdac0(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC0); -} - -static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0); - -static ssize_t show_eeprom0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0); -} - -static ssize_t set_eeprom0(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC0); -} - -static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0); - -static ssize_t show_tolerance0(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC0); -} - -static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL); - -/* ------------------------------------------------------------------------- */ - -static ssize_t show_rdac1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1); -} - -static ssize_t set_rdac1(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC1); -} - -static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1); - -static ssize_t show_eeprom1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1); -} - -static ssize_t set_eeprom1(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC1); -} - -static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1); - -static ssize_t show_tolerance1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC1); -} - -static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL); - -/* ------------------------------------------------------------------------- */ - -static ssize_t show_rdac2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2); -} - -static ssize_t set_rdac2(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC2); -} - -static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2); - -static ssize_t show_eeprom2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2); -} - -static ssize_t set_eeprom2(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC2); -} - -static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2); - -static ssize_t show_tolerance2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC2); -} - -static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL); - -/* ------------------------------------------------------------------------- */ - -static ssize_t show_rdac3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3); -} - -static ssize_t set_rdac3(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_RDAC | AD525X_RDAC3); -} - -static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3); - -static ssize_t show_eeprom3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3); -} - -static ssize_t set_eeprom3(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_set_reg(dev, attr, buf, count, - AD525X_I2C_EEPROM | AD525X_RDAC3); -} +#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \ +show_##_name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return sysfs_show_reg(dev, attr, buf, _reg); \ +} + +#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_set_reg(dev, attr, buf, count, _reg); \ +} + +#define DPOT_DEVICE_SHOW_SET(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +DPOT_DEVICE_SET(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name); + +#define DPOT_DEVICE_SHOW_ONLY(name, reg) \ +DPOT_DEVICE_SHOW(name, reg) \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL); + +DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0); +DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0); +DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0); + +DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1); +DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1); +DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1); + +DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2); +DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2); +DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2); + +DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3); +DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3); +DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3); + +DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4); +DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4); +DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4); + +DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5); +DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5); +DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5); + +static const struct attribute *dpot_attrib_wipers[] = { + &dev_attr_rdac0.attr, + &dev_attr_rdac1.attr, + &dev_attr_rdac2.attr, + &dev_attr_rdac3.attr, + &dev_attr_rdac4.attr, + &dev_attr_rdac5.attr, + NULL +}; -static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3); +static const struct attribute *dpot_attrib_eeprom[] = { + &dev_attr_eeprom0.attr, + &dev_attr_eeprom1.attr, + &dev_attr_eeprom2.attr, + &dev_attr_eeprom3.attr, + &dev_attr_eeprom4.attr, + &dev_attr_eeprom5.attr, + NULL +}; -static ssize_t show_tolerance3(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sysfs_show_reg(dev, attr, buf, - AD525X_I2C_EEPROM | AD525X_TOL_RDAC3); -} +static const struct attribute *dpot_attrib_otp[] = { + &dev_attr_otp0.attr, + &dev_attr_otp1.attr, + &dev_attr_otp2.attr, + &dev_attr_otp3.attr, + &dev_attr_otp4.attr, + &dev_attr_otp5.attr, + NULL +}; -static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL); - -static struct attribute *ad525x_attributes_wipers[4][4] = { - { - &dev_attr_rdac0.attr, - &dev_attr_eeprom0.attr, - &dev_attr_tolerance0.attr, - NULL - }, { - &dev_attr_rdac1.attr, - &dev_attr_eeprom1.attr, - &dev_attr_tolerance1.attr, - NULL - }, { - &dev_attr_rdac2.attr, - &dev_attr_eeprom2.attr, - &dev_attr_tolerance2.attr, - NULL - }, { - &dev_attr_rdac3.attr, - &dev_attr_eeprom3.attr, - &dev_attr_tolerance3.attr, - NULL - } +static const struct attribute *dpot_attrib_otp_en[] = { + &dev_attr_otp0en.attr, + &dev_attr_otp1en.attr, + &dev_attr_otp2en.attr, + &dev_attr_otp3en.attr, + &dev_attr_otp4en.attr, + &dev_attr_otp5en.attr, + NULL }; -static const struct attribute_group ad525x_group_wipers[] = { - {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]}, - {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]}, +static const struct attribute *dpot_attrib_tolerance[] = { + &dev_attr_tolerance0.attr, + &dev_attr_tolerance1.attr, + &dev_attr_tolerance2.attr, + &dev_attr_tolerance3.attr, + &dev_attr_tolerance4.attr, + &dev_attr_tolerance5.attr, + NULL }; /* ------------------------------------------------------------------------- */ -static ssize_t set_inc_all(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL); -} +#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \ +set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + return sysfs_do_cmd(dev, attr, buf, count, _cmd); \ +} \ +static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name); -static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all); - -static ssize_t set_dec_all(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL); -} - -static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all); - -static ssize_t set_inc_all_6db(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB); -} - -static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db); - -static ssize_t set_dec_all_6db(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB); -} - -static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db); +DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL); +DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL); +DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB); +DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB); static struct attribute *ad525x_attributes_commands[] = { &dev_attr_inc_all.attr, @@ -409,74 +556,56 @@ static const struct attribute_group ad525x_group_commands = { .attrs = ad525x_attributes_commands, }; -/* ------------------------------------------------------------------------- */ - -/* i2c device functions */ +__devinit int ad_dpot_add_files(struct device *dev, + unsigned features, unsigned rdac) +{ + int err = sysfs_create_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + err |= sysfs_create_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } -/** - * ad525x_read - return the value contained in the specified register - * on the AD5258 device. - * @client: value returned from i2c_new_device() - * @reg: the register to read - * - * If the tolerance register is specified, 2 bytes are returned. - * Otherwise, 1 byte is returned. A negative value indicates an error - * occurred while reading the register. - */ -static s32 ad525x_read(struct i2c_client *client, u8 reg) -{ - struct dpot_data *data = i2c_get_clientdata(client); + if (err) + dev_err(dev, "failed to register sysfs hooks for RDAC%d\n", + rdac); - if ((reg & AD525X_REG_TOL) || (data->max_pos > 256)) - return i2c_smbus_read_word_data(client, (reg & 0xF8) | - ((reg & 0x7) << 1)); - else - return i2c_smbus_read_byte_data(client, reg); + return err; } -/** - * ad525x_write - store the given value in the specified register on - * the AD5258 device. - * @client: value returned from i2c_new_device() - * @reg: the register to write - * @value: the byte to store in the register - * - * For certain instructions that do not require a data byte, "NULL" - * should be specified for the "value" parameter. These instructions - * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM. - * - * A negative return value indicates an error occurred while reading - * the register. - */ -static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value) -{ - struct dpot_data *data = i2c_get_clientdata(client); - - /* Only write the instruction byte for certain commands */ - if (reg & AD525X_I2C_CMD) - return i2c_smbus_write_byte(client, reg); - - if (data->max_pos > 256) - return i2c_smbus_write_word_data(client, (reg & 0xF8) | - ((reg & 0x7) << 1), value); - else - /* All other registers require instruction + data bytes */ - return i2c_smbus_write_byte_data(client, reg, value); +inline void ad_dpot_remove_files(struct device *dev, + unsigned features, unsigned rdac) +{ + sysfs_remove_file(&dev->kobj, + dpot_attrib_wipers[rdac]); + if (features & F_CMD_EEP) + sysfs_remove_file(&dev->kobj, + dpot_attrib_eeprom[rdac]); + if (features & F_CMD_TOL) + sysfs_remove_file(&dev->kobj, + dpot_attrib_tolerance[rdac]); + if (features & F_CMD_OTP) { + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp_en[rdac]); + sysfs_remove_file(&dev->kobj, + dpot_attrib_otp[rdac]); + } } -static int ad525x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +__devinit int ad_dpot_probe(struct device *dev, + struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id) { - struct device *dev = &client->dev; - struct dpot_data *data; - int err = 0; - dev_dbg(dev, "%s\n", __func__); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { - dev_err(dev, "missing I2C functionality for this driver\n"); - goto exit; - } + struct dpot_data *data; + int i, err = 0; data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); if (!data) { @@ -484,183 +613,74 @@ static int ad525x_probe(struct i2c_client *client, goto exit; } - i2c_set_clientdata(client, data); + dev_set_drvdata(dev, data); mutex_init(&data->update_lock); - switch (id->driver_data) { - case AD5258_ID: - data->max_pos = AD5258_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5259_ID: - data->max_pos = AD5259_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5251_ID: - data->max_pos = AD5251_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5252_ID: - data->max_pos = AD5252_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5253_ID: - data->max_pos = AD5253_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5254_ID: - data->max_pos = AD5254_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5255_ID: - data->max_pos = AD5255_MAX_POSITION; - err = sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - err |= sysfs_create_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); - break; - default: - err = -ENODEV; - goto exit_free; - } + data->bdata = *bdata; + data->devid = id->devid; + + data->max_pos = 1 << DPOT_MAX_POS(data->devid); + data->rdac_mask = data->max_pos - 1; + data->feat = DPOT_FEAT(data->devid); + data->uid = DPOT_UID(data->devid); + data->wipers = DPOT_WIPERS(data->devid); + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) { + err = ad_dpot_add_files(dev, data->feat, i); + if (err) + goto exit_remove_files; + /* power-up midscale */ + if (data->feat & F_RDACS_WONLY) + data->rdac_cache[i] = data->max_pos / 2; + } + + if (data->feat & F_CMD_INC) + err = sysfs_create_group(&dev->kobj, &ad525x_group_commands); if (err) { dev_err(dev, "failed to register sysfs hooks\n"); goto exit_free; } - data->devid = id->driver_data; - data->rdac_mask = data->max_pos - 1; - dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", id->name, data->max_pos); return 0; +exit_remove_files: + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); + exit_free: kfree(data); - i2c_set_clientdata(client, NULL); + dev_set_drvdata(dev, NULL); exit: - dev_err(dev, "failed to create client\n"); + dev_err(dev, "failed to create client for %s ID 0x%lX\n", + id->name, id->devid); return err; } +EXPORT_SYMBOL(ad_dpot_probe); -static int __devexit ad525x_remove(struct i2c_client *client) +__devexit int ad_dpot_remove(struct device *dev) { - struct dpot_data *data = i2c_get_clientdata(client); - struct device *dev = &client->dev; - - switch (data->devid) { - case AD5258_ID: - case AD5259_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - break; - case AD5251_ID: - case AD5252_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5253_ID: - case AD5254_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC3]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - case AD5255_ID: - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC0]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC1]); - sysfs_remove_group(&dev->kobj, - &ad525x_group_wipers[AD525X_RDAC2]); - sysfs_remove_group(&dev->kobj, &ad525x_group_commands); - break; - } + struct dpot_data *data = dev_get_drvdata(dev); + int i; + + for (i = DPOT_RDAC0; i < MAX_RDACS; i++) + if (data->wipers & (1 << i)) + ad_dpot_remove_files(dev, data->feat, i); - i2c_set_clientdata(client, NULL); kfree(data); return 0; } +EXPORT_SYMBOL(ad_dpot_remove); -static const struct i2c_device_id ad525x_idtable[] = { - {"ad5258", AD5258_ID}, - {"ad5259", AD5259_ID}, - {"ad5251", AD5251_ID}, - {"ad5252", AD5252_ID}, - {"ad5253", AD5253_ID}, - {"ad5254", AD5254_ID}, - {"ad5255", AD5255_ID}, - {} -}; - -MODULE_DEVICE_TABLE(i2c, ad525x_idtable); - -static struct i2c_driver ad525x_driver = { - .driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - }, - .id_table = ad525x_idtable, - .probe = ad525x_probe, - .remove = __devexit_p(ad525x_remove), -}; - -static int __init ad525x_init(void) -{ - return i2c_add_driver(&ad525x_driver); -} - -module_init(ad525x_init); - -static void __exit ad525x_exit(void) -{ - i2c_del_driver(&ad525x_driver); -} - -module_exit(ad525x_exit); MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " - "Michael Hennerich <hennerich@blackfin.uclinux.org>, "); -MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver"); + "Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("Digital potentiometer driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h new file mode 100644 index 000000000000..78b89fd2e2fd --- /dev/null +++ b/drivers/misc/ad525x_dpot.h @@ -0,0 +1,202 @@ +/* + * Driver for the Analog Devices digital potentiometers + * + * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _AD_DPOT_H_ +#define _AD_DPOT_H_ + +#include <linux/types.h> + +#define DPOT_CONF(features, wipers, max_pos, uid) \ + (((features) << 18) | (((wipers) & 0xFF) << 10) | \ + ((max_pos & 0xF) << 6) | (uid & 0x3F)) + +#define DPOT_UID(conf) (conf & 0x3F) +#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF) +#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF) +#define DPOT_FEAT(conf) (conf >> 18) + +#define BRDAC0 (1 << 0) +#define BRDAC1 (1 << 1) +#define BRDAC2 (1 << 2) +#define BRDAC3 (1 << 3) +#define BRDAC4 (1 << 4) +#define BRDAC5 (1 << 5) +#define MAX_RDACS 6 + +#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */ +#define F_CMD_EEP (1 << 1) /* Features EEPROM */ +#define F_CMD_OTP (1 << 2) /* Features OTP */ +#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */ +#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */ +#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */ +#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */ +#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */ +#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */ +#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */ + +#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL) +#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP) +#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT) + +enum dpot_devid { + AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */ + AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1), + AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC3, 6, 2), + AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC3, 8, 3), + AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4), + AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5), + AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 6), + AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 7), /* SPI */ + AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 8), + AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 9), + AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 10), + AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 11), + AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 5, 12), + AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13), + AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14), + AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5, + 8, 15), + AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 16), + AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0, 10, 17), + AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 18), + AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19), + AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 20), + AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 21), + AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 22), + AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23), + AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 24), + AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25), + AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26), + AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), + AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 7, 28), + AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + BRDAC0, 8, 29), + AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1, 8, 30), + AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, + BRDAC0 | BRDAC1 | BRDAC2, 8, 31), + ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT, + BRDAC0 | BRDAC1, 10, 32), + AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33), + AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34), + AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35), + AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36), + AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37), + AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38), + AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39), + AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40), + AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41), + ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC, + BRDAC0 | BRDAC1 | BRDAC2, 9, 42), + AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43), + AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44), + AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45), + AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46), + AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47), +}; + +#define DPOT_RDAC0 0 +#define DPOT_RDAC1 1 +#define DPOT_RDAC2 2 +#define DPOT_RDAC3 3 +#define DPOT_RDAC4 4 +#define DPOT_RDAC5 5 + +#define DPOT_RDAC_MASK 0x1F + +#define DPOT_REG_TOL 0x18 +#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0) +#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1) +#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2) +#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3) +#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4) +#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5) + +/* RDAC-to-EEPROM Interface Commands */ +#define DPOT_ADDR_RDAC (0x0 << 5) +#define DPOT_ADDR_EEPROM (0x1 << 5) +#define DPOT_ADDR_OTP (0x1 << 6) +#define DPOT_ADDR_CMD (0x1 << 7) +#define DPOT_ADDR_OTP_EN (0x1 << 9) + +#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3)) +#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3)) +#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3)) +#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3)) + +#define DPOT_SPI_RDAC 0xB0 +#define DPOT_SPI_EEPROM 0x30 +#define DPOT_SPI_READ_RDAC 0xA0 +#define DPOT_SPI_READ_EEPROM 0x90 +#define DPOT_SPI_DEC_ALL_6DB 0x50 +#define DPOT_SPI_INC_ALL_6DB 0xD0 +#define DPOT_SPI_DEC_ALL 0x70 +#define DPOT_SPI_INC_ALL 0xF0 + +/* AD5291/2/3 use special commands */ +#define DPOT_AD5291_RDAC 0x01 +#define DPOT_AD5291_READ_RDAC 0x02 + +/* AD524x use special commands */ +#define DPOT_AD5291_RDAC_AB 0x80 + +#define DPOT_AD5273_FUSE 0x80 +#define DPOT_AD5270_2_3_FUSE 0x20 +#define DPOT_AD5270_2_3_OW 0x08 +#define DPOT_AD5272_3_A0 0x08 +#define DPOT_AD5270_2FUSE 0x80 + +struct dpot_data; + +struct ad_dpot_bus_ops { + int (*read_d8) (void *client); + int (*read_r8d8) (void *client, u8 reg); + int (*read_r8d16) (void *client, u8 reg); + int (*write_d8) (void *client, u8 val); + int (*write_r8d8) (void *client, u8 reg, u8 val); + int (*write_r8d16) (void *client, u8 reg, u16 val); +}; + +struct ad_dpot_bus_data { + void *client; + const struct ad_dpot_bus_ops *bops; +}; + +struct ad_dpot_id { + char *name; + unsigned long devid; +}; + +int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id); +int ad_dpot_remove(struct device *dev); + +#endif diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 31a991161f0a..5bfb2a2041b8 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -75,6 +75,9 @@ enum ctype { UNALIGNED_LOAD_STORE_WRITE, OVERWRITE_ALLOCATION, WRITE_AFTER_FREE, + SOFTLOCKUP, + HARDLOCKUP, + HUNG_TASK, }; static char* cp_name[] = { @@ -99,6 +102,9 @@ static char* cp_type[] = { "UNALIGNED_LOAD_STORE_WRITE", "OVERWRITE_ALLOCATION", "WRITE_AFTER_FREE", + "SOFTLOCKUP", + "HARDLOCKUP", + "HUNG_TASK", }; static struct jprobe lkdtm; @@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which) memset(data, 0x78, len); break; } + case SOFTLOCKUP: + preempt_disable(); + for (;;) + cpu_relax(); + break; + case HARDLOCKUP: + local_irq_disable(); + for (;;) + cpu_relax(); + break; + case HUNG_TASK: + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + break; case NONE: default: break; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 3168ebd616b2..569e94da844c 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep); /** * mmc_suspend_host - suspend a host * @host: mmc host - * @state: suspend mode (PM_SUSPEND_xxx) */ -int mmc_suspend_host(struct mmc_host *host, pm_message_t state) +int mmc_suspend_host(struct mmc_host *host) { int err = 0; diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 0d96080d44b0..63772e7e7608 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, * we cannot use the retries field in mmc_command. */ for (i = 0;i <= retries;i++) { - memset(&mrq, 0, sizeof(struct mmc_request)); - err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 2dd4cfe7ca17..b9dee28ee7d0 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, card->type = MMC_TYPE_SDIO; /* + * Call the optional HC's init_card function to handle quirks. + */ + if (host->ops->init_card) + host->ops->init_card(host, card); + + /* * For native busses: set card RCA and quit open drain mode. */ if (!powered_resume && !mmc_host_is_spi(host)) { diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index ff27c8c71355..0f687cdeb064 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) EXPORT_SYMBOL_GPL(sdio_writeb); /** + * sdio_writeb_readb - write and read a byte from SDIO function + * @func: SDIO function to access + * @write_byte: byte to write + * @addr: address to write to + * @err_ret: optional status value from transfer + * + * Performs a RAW (Read after Write) operation as defined by SDIO spec - + * single byte is written to address space of a given SDIO function and + * response is read back from the same address, both using single request. + * If there is a problem with the operation, 0xff is returned and + * @err_ret will contain the error code. + */ +u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, + unsigned int addr, int *err_ret) +{ + int ret; + u8 val; + + ret = mmc_io_rw_direct(func->card, 1, func->num, addr, + write_byte, &val); + if (err_ret) + *err_ret = ret; + if (ret) + val = 0xff; + + return val; +} +EXPORT_SYMBOL_GPL(sdio_writeb_readb); + +/** * sdio_memcpy_fromio - read a chunk of memory from a SDIO function * @func: SDIO function to access * @dst: buffer to store the data diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2e13b94769fd..e171e77f6129 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -136,6 +136,18 @@ config MMC_SDHCI_S3C If unsure, say N. +config MMC_SDHCI_SPEAR + tristate "SDHCI support on ST SPEAr platform" + depends on MMC_SDHCI && PLAT_SPEAR + help + This selects the Secure Digital Host Controller Interface (SDHCI) + often referrered to as the HSMMC block in some of the ST SPEAR range + of SoC + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SDHCI_S3C_DMA bool "DMA support on S3C SDHCI" depends on MMC_SDHCI_S3C && EXPERIMENTAL @@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND depends on SDH_BFIN help If you say yes here SD-Cards may work on the EZkit. + +config MMC_SH_MMCIF + tristate "SuperH Internal MMCIF support" + depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) + help + This selects the MMC Host Interface controler (MMCIF). + + This driver supports MMCIF in sh7724/sh7757/sh7372. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index f4803977dfce..e30c2ee48894 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o +obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o @@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o +obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o sdhci-of-y := sdhci-of-core.o diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 336d9f553f3e..5f3a599ead07 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) enable_irq_wake(host->board->det_pin); if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index df0e8a88d85f..95ef864ad8f9 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -173,6 +173,7 @@ struct atmel_mci { * @mmc: The mmc_host representing this slot. * @host: The MMC controller this slot is using. * @sdc_reg: Value of SDCR to be written before using this slot. + * @sdio_irq: SDIO irq mask for this slot. * @mrq: mmc_request currently being processed or waiting to be * processed, or NULL when the slot is idle. * @queue_node: List node for placing this node in the @queue list of @@ -191,6 +192,7 @@ struct atmel_mci_slot { struct atmel_mci *host; u32 sdc_reg; + u32 sdio_irq; struct mmc_request *mrq; struct list_head queue_node; @@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host, mci_writel(host, SDCR, slot->sdc_reg); iflags = mci_readl(host, IMR); - if (iflags) + if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB)) dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", iflags); @@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (mci_has_rwproof()) host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); - if (list_empty(&host->queue)) + if (atmci_is_mci2()) { + /* setup High Speed mode in relation with card capacity */ + if (ios->timing == MMC_TIMING_SD_HS) + host->cfg_reg |= MCI_CFG_HSMODE; + else + host->cfg_reg &= ~MCI_CFG_HSMODE; + } + + if (list_empty(&host->queue)) { mci_writel(host, MR, host->mode_reg); - else + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); + } else { host->need_clock_update = true; + } spin_unlock_bh(&host->lock); } else { @@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc) return present; } +static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + + if (enable) + mci_writel(host, IER, slot->sdio_irq); + else + mci_writel(host, IDR, slot->sdio_irq); +} + static const struct mmc_host_ops atmci_ops = { .request = atmci_request, .set_ios = atmci_set_ios, .get_ro = atmci_get_ro, .get_cd = atmci_get_cd, + .enable_sdio_irq = atmci_enable_sdio_irq, }; /* Called with host->lock held */ @@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) * necessary if set_ios() is called when a different slot is * busy transfering data. */ - if (host->need_clock_update) + if (host->need_clock_update) { mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); + } host->cur_slot->mrq = NULL; host->mrq = NULL; @@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) tasklet_schedule(&host->tasklet); } +static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) +{ + int i; + + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + struct atmel_mci_slot *slot = host->slot[i]; + if (slot && (status & slot->sdio_irq)) { + mmc_signal_sdio_irq(slot->mmc); + } + } +} + + static irqreturn_t atmci_interrupt(int irq, void *dev_id) { struct atmel_mci *host = dev_id; @@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) if (pending & MCI_CMDRDY) atmci_cmd_interrupt(host, status); + + if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB)) + atmci_sdio_interrupt(host, status); + } while (pass_count++ < 5); return pass_count ? IRQ_HANDLED : IRQ_NONE; @@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) static int __init atmci_init_slot(struct atmel_mci *host, struct mci_slot_pdata *slot_data, unsigned int id, - u32 sdc_reg) + u32 sdc_reg, u32 sdio_irq) { struct mmc_host *mmc; struct atmel_mci_slot *slot; @@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host, slot->wp_pin = slot_data->wp_pin; slot->detect_is_active_high = slot_data->detect_is_active_high; slot->sdc_reg = sdc_reg; + slot->sdio_irq = sdio_irq; mmc->ops = &atmci_ops; mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); mmc->f_max = host->bus_hz / 2; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + if (sdio_irq) + mmc->caps |= MMC_CAP_SDIO_IRQ; + if (atmci_is_mci2()) + mmc->caps |= MMC_CAP_SD_HIGHSPEED; if (slot_data->bus_width >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; @@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev) ret = -ENODEV; if (pdata->slot[0].bus_width) { ret = atmci_init_slot(host, &pdata->slot[0], - 0, MCI_SDCSEL_SLOT_A); + 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA); if (!ret) nr_slots++; } if (pdata->slot[1].bus_width) { ret = atmci_init_slot(host, &pdata->slot[1], - 1, MCI_SDCSEL_SLOT_B); + 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB); if (!ret) nr_slots++; } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index f5834449400e..c8da5d30a861 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) struct au1xmmc_host *host = platform_get_drvdata(pdev); int ret; - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret) return ret; diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 6919e844072c..4b0e677d7295 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); peripheral_free_list(drv_data->pin_req); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 92a324f7417c..ca3bdc831900 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) struct mmc_host *mmc = cb710_slot_to_mmc(slot); int err; - err = mmc_suspend_host(mmc, state); + err = mmc_suspend_host(mmc); if (err) return err; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 3bd0ba294e9d..33d9f1b00862 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -137,15 +137,15 @@ /* * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, - * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only + * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only * for drivers with max_hw_segs == 1, making the segments bigger (64KB) - * than the page or two that's otherwise typical. NR_SG == 16 gives at - * least the same throughput boost, using EDMA transfer linkage instead - * of spending CPU time copying pages. + * than the page or two that's otherwise typical. nr_sg (passed from + * platform data) == 16 gives at least the same throughput boost, using + * EDMA transfer linkage instead of spending CPU time copying pages. */ #define MAX_CCNT ((1 << 16) - 1) -#define NR_SG 16 +#define MAX_NR_SG 16 static unsigned rw_threshold = 32; module_param(rw_threshold, uint, S_IRUGO); @@ -171,6 +171,7 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; + unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies @@ -192,7 +193,7 @@ struct mmc_davinci_host { struct edmacc_param tx_template; struct edmacc_param rx_template; unsigned n_link; - u32 links[NR_SG - 1]; + u32 links[MAX_NR_SG - 1]; /* For PIO we walk scatterlists one segment at a time. */ unsigned int sg_len; @@ -202,6 +203,8 @@ struct mmc_davinci_host { u8 version; /* for ns in one cycle calculation */ unsigned ns_in_one_cycle; + /* Number of sg segments */ + u8 nr_sg; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif @@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host) static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) { + u32 link_size; int r, i; /* Acquire master DMA write channel */ @@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) /* Allocate parameter RAM slots, which will later be bound to a * channel as needed to handle a scatterlist. */ - for (i = 0; i < ARRAY_SIZE(host->links); i++) { + link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); + for (i = 0; i < link_size; i++) { r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); if (r < 0) { dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", @@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, } } -static void -davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, + int val) { u32 temp; - /* reset command and data state machines */ temp = readl(host->base + DAVINCI_MMCCTL); - writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); + if (val) /* reset */ + temp |= MMCCTL_CMDRST | MMCCTL_DATRST; + else /* enable */ + temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); - temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); - udelay(10); writel(temp, host->base + DAVINCI_MMCCTL); + udelay(10); +} + +static void +davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +{ + mmc_davinci_reset_ctrl(host, 1); + mmc_davinci_reset_ctrl(host, 0); } static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) @@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) #endif static void __init init_mmcsd_host(struct mmc_davinci_host *host) { - /* DAT line portion is diabled and in reset state */ - writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); - - /* CMD line portion is diabled and in reset state */ - writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST, - host->base + DAVINCI_MMCCTL); - udelay(10); + mmc_davinci_reset_ctrl(host, 1); writel(0, host->base + DAVINCI_MMCCLK); writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); @@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host) writel(0x1FFF, host->base + DAVINCI_MMCTOR); writel(0xFFFF, host->base + DAVINCI_MMCTOD); - writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); - writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST, - host->base + DAVINCI_MMCCTL); - - udelay(10); + mmc_davinci_reset_ctrl(host, 0); } static int __init davinci_mmcsd_probe(struct platform_device *pdev) @@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) init_mmcsd_host(host); + if (pdata->nr_sg) + host->nr_sg = pdata->nr_sg - 1; + + if (host->nr_sg > MAX_NR_SG || !host->nr_sg) + host->nr_sg = MAX_NR_SG; + host->use_dma = use_dma; host->irq = irq; @@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) +static int davinci_mmcsd_suspend(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); + int ret; - return mmc_suspend_host(host->mmc, msg); + mmc_host_enable(host->mmc); + ret = mmc_suspend_host(host->mmc); + if (!ret) { + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + mmc_host_disable(host->mmc); + clk_disable(host->clk); + host->suspended = 1; + } else { + host->suspended = 0; + mmc_host_disable(host->mmc); + } + + return ret; } -static int davinci_mmcsd_resume(struct platform_device *pdev) +static int davinci_mmcsd_resume(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); + int ret; + + if (!host->suspended) + return 0; - return mmc_resume_host(host->mmc); + clk_enable(host->clk); + mmc_host_enable(host->mmc); + + mmc_davinci_reset_ctrl(host, 0); + ret = mmc_resume_host(host->mmc); + if (!ret) + host->suspended = 0; + + return ret; } + +static const struct dev_pm_ops davinci_mmcsd_pm = { + .suspend = davinci_mmcsd_suspend, + .resume = davinci_mmcsd_resume, +}; + +#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) #else -#define davinci_mmcsd_suspend NULL -#define davinci_mmcsd_resume NULL +#define davinci_mmcsd_pm_ops NULL #endif static struct platform_driver davinci_mmcsd_driver = { .driver = { .name = "davinci_mmc", .owner = THIS_MODULE, + .pm = davinci_mmcsd_pm_ops, }, .remove = __exit_p(davinci_mmcsd_remove), - .suspend = davinci_mmcsd_suspend, - .resume = davinci_mmcsd_resume, }; static int __init davinci_mmcsd_init(void) diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index bf98d7cc928a..9a68ff4353a2 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index ff115d920888..4917af96bae1 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state) if (mmc) { struct mmci_host *host = mmc_priv(mmc); - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); if (ret == 0) writel(0, host->base + MMCIMASK0); } diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 61f1d27fed3f..24e09454e522 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) disable_irq(host->stat_irq); if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc, state); + rc = mmc_suspend_host(mmc); if (!rc) msmsdcc_writel(host, 0, MMCIMASK0); if (host->clks_on) diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 34e23489811a..366eefa77c5a 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 2df90412abb5..d9d4a72e0ec7 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -119,6 +119,7 @@ struct mxcmci_host { int detect_irq; int dma; int do_dma; + int use_sdio; unsigned int power_mode; struct imxmmc_platform_data *pdata; @@ -138,6 +139,7 @@ struct mxcmci_host { int clock; struct work_struct datawork; + spinlock_t lock; }; static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); @@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host) { int i; + dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n"); + /* reset sequence */ writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK); writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, @@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, unsigned int cmdat) { + u32 int_cntr; + unsigned long flags; + WARN_ON(host->cmd != NULL); host->cmd = cmd; @@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, return -EINVAL; } + int_cntr = INT_END_CMD_RES_EN; + if (mxcmci_use_dma(host)) - writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN | - INT_END_CMD_RES_EN, - host->base + MMC_REG_INT_CNTR); - else - writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR); + int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; + + spin_lock_irqsave(&host->lock, flags); + if (host->use_sdio) + int_cntr |= INT_SDIO_IRQ_EN; + writel(int_cntr, host->base + MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); writew(cmd->opcode, host->base + MMC_REG_CMD); writel(cmd->arg, host->base + MMC_REG_ARG); @@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, static void mxcmci_finish_request(struct mxcmci_host *host, struct mmc_request *req) { - writel(0, host->base + MMC_REG_INT_CNTR); + u32 int_cntr = 0; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->use_sdio) + int_cntr |= INT_SDIO_IRQ_EN; + writel(int_cntr, host->base + MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); host->req = NULL; host->cmd = NULL; @@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat); if (stat & STATUS_CRC_READ_ERR) { + dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__); data->error = -EILSEQ; } else if (stat & STATUS_CRC_WRITE_ERR) { u32 err_code = (stat >> 9) & 0x3; - if (err_code == 2) /* No CRC response */ + if (err_code == 2) { /* No CRC response */ + dev_err(mmc_dev(host->mmc), + "%s: No CRC -ETIMEDOUT\n", __func__); data->error = -ETIMEDOUT; - else + } else { + dev_err(mmc_dev(host->mmc), + "%s: -EILSEQ\n", __func__); data->error = -EILSEQ; + } } else if (stat & STATUS_TIME_OUT_READ) { + dev_err(mmc_dev(host->mmc), + "%s: read -ETIMEDOUT\n", __func__); data->error = -ETIMEDOUT; } else { + dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__); data->error = -EIO; } } else { @@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) struct scatterlist *sg; int stat, i; - host->datasize = 0; - host->data = data; host->datasize = 0; @@ -464,6 +489,9 @@ static void mxcmci_datawork(struct work_struct *work) struct mxcmci_host *host = container_of(work, struct mxcmci_host, datawork); int datastat = mxcmci_transfer_data(host); + + writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, + host->base + MMC_REG_STATUS); mxcmci_finish_data(host, datastat); if (host->req->stop) { @@ -523,15 +551,35 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) static irqreturn_t mxcmci_irq(int irq, void *devid) { struct mxcmci_host *host = devid; + unsigned long flags; + bool sdio_irq; u32 stat; stat = readl(host->base + MMC_REG_STATUS); - writel(stat, host->base + MMC_REG_STATUS); + writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | + STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS); dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); + spin_lock_irqsave(&host->lock, flags); + sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; + spin_unlock_irqrestore(&host->lock, flags); + +#ifdef HAS_DMA + if (mxcmci_use_dma(host) && + (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) + writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, + host->base + MMC_REG_STATUS); +#endif + + if (sdio_irq) { + writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); + mmc_signal_sdio_irq(host->mmc); + } + if (stat & STATUS_END_CMD_RESP) mxcmci_cmd_done(host, stat); + #ifdef HAS_DMA if (mxcmci_use_dma(host) && (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) @@ -668,11 +716,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc) return -ENOSYS; } +static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct mxcmci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 int_cntr; + + spin_lock_irqsave(&host->lock, flags); + host->use_sdio = enable; + int_cntr = readl(host->base + MMC_REG_INT_CNTR); + + if (enable) + int_cntr |= INT_SDIO_IRQ_EN; + else + int_cntr &= ~INT_SDIO_IRQ_EN; + + writel(int_cntr, host->base + MMC_REG_INT_CNTR); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) +{ + /* + * MX3 SoCs have a silicon bug which corrupts CRC calculation of + * multi-block transfers when connected SDIO peripheral doesn't + * drive the BUSY line as required by the specs. + * One way to prevent this is to only allow 1-bit transfers. + */ + + if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO) + host->caps &= ~MMC_CAP_4_BIT_DATA; + else + host->caps |= MMC_CAP_4_BIT_DATA; +} static const struct mmc_host_ops mxcmci_ops = { - .request = mxcmci_request, - .set_ios = mxcmci_set_ios, - .get_ro = mxcmci_get_ro, + .request = mxcmci_request, + .set_ios = mxcmci_set_ios, + .get_ro = mxcmci_get_ro, + .enable_sdio_irq = mxcmci_enable_sdio_irq, + .init_card = mxcmci_init_card, }; static int mxcmci_probe(struct platform_device *pdev) @@ -700,7 +783,7 @@ static int mxcmci_probe(struct platform_device *pdev) } mmc->ops = &mxcmci_ops; - mmc->caps = MMC_CAP_4_BIT_DATA; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; /* MMC core transfer sizes tunable parameters */ mmc->max_hw_segs = 64; @@ -719,6 +802,7 @@ static int mxcmci_probe(struct platform_device *pdev) host->mmc = mmc; host->pdata = pdev->dev.platform_data; + spin_lock_init(&host->lock); if (host->pdata && host->pdata->ocr_avail) mmc->ocr_avail = host->pdata->ocr_avail; @@ -848,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index bb6cc54b558e..1247e5de9faa 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -64,7 +64,7 @@ static int of_mmc_spi_get_ro(struct device *dev) struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) { struct device *dev = &spi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; struct of_mmc_spi *oms; const u32 *voltage_ranges; int num_ranges; @@ -135,7 +135,7 @@ EXPORT_SYMBOL(mmc_spi_get_pdata); void mmc_spi_put_pdata(struct spi_device *spi) { struct device *dev = &spi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; struct of_mmc_spi *oms = to_of_mmc_spi(dev); int i; diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 84d280406341..2b281680e320 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -39,30 +39,30 @@ #include <plat/fpga.h> #define OMAP_MMC_REG_CMD 0x00 -#define OMAP_MMC_REG_ARGL 0x04 -#define OMAP_MMC_REG_ARGH 0x08 -#define OMAP_MMC_REG_CON 0x0c -#define OMAP_MMC_REG_STAT 0x10 -#define OMAP_MMC_REG_IE 0x14 -#define OMAP_MMC_REG_CTO 0x18 -#define OMAP_MMC_REG_DTO 0x1c -#define OMAP_MMC_REG_DATA 0x20 -#define OMAP_MMC_REG_BLEN 0x24 -#define OMAP_MMC_REG_NBLK 0x28 -#define OMAP_MMC_REG_BUF 0x2c -#define OMAP_MMC_REG_SDIO 0x34 -#define OMAP_MMC_REG_REV 0x3c -#define OMAP_MMC_REG_RSP0 0x40 -#define OMAP_MMC_REG_RSP1 0x44 -#define OMAP_MMC_REG_RSP2 0x48 -#define OMAP_MMC_REG_RSP3 0x4c -#define OMAP_MMC_REG_RSP4 0x50 -#define OMAP_MMC_REG_RSP5 0x54 -#define OMAP_MMC_REG_RSP6 0x58 -#define OMAP_MMC_REG_RSP7 0x5c -#define OMAP_MMC_REG_IOSR 0x60 -#define OMAP_MMC_REG_SYSC 0x64 -#define OMAP_MMC_REG_SYSS 0x68 +#define OMAP_MMC_REG_ARGL 0x01 +#define OMAP_MMC_REG_ARGH 0x02 +#define OMAP_MMC_REG_CON 0x03 +#define OMAP_MMC_REG_STAT 0x04 +#define OMAP_MMC_REG_IE 0x05 +#define OMAP_MMC_REG_CTO 0x06 +#define OMAP_MMC_REG_DTO 0x07 +#define OMAP_MMC_REG_DATA 0x08 +#define OMAP_MMC_REG_BLEN 0x09 +#define OMAP_MMC_REG_NBLK 0x0a +#define OMAP_MMC_REG_BUF 0x0b +#define OMAP_MMC_REG_SDIO 0x0d +#define OMAP_MMC_REG_REV 0x0f +#define OMAP_MMC_REG_RSP0 0x10 +#define OMAP_MMC_REG_RSP1 0x11 +#define OMAP_MMC_REG_RSP2 0x12 +#define OMAP_MMC_REG_RSP3 0x13 +#define OMAP_MMC_REG_RSP4 0x14 +#define OMAP_MMC_REG_RSP5 0x15 +#define OMAP_MMC_REG_RSP6 0x16 +#define OMAP_MMC_REG_RSP7 0x17 +#define OMAP_MMC_REG_IOSR 0x18 +#define OMAP_MMC_REG_SYSC 0x19 +#define OMAP_MMC_REG_SYSS 0x1a #define OMAP_MMC_STAT_CARD_ERR (1 << 14) #define OMAP_MMC_STAT_CARD_IRQ (1 << 13) @@ -78,8 +78,9 @@ #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) -#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) -#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) +#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) +#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) +#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) /* * Command types @@ -133,6 +134,7 @@ struct mmc_omap_host { int irq; unsigned char bus_mode; unsigned char hw_bus_mode; + unsigned int reg_shift; struct work_struct cmd_abort_work; unsigned abort:1; @@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); } } @@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) int dst_port = 0; int sync_dev = 0; - data_addr = host->phys_base + OMAP_MMC_REG_DATA; + data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); frame = data->blksz; count = sg_dma_len(sg); @@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev) } } + host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); + return 0; err_plat_cleanup: @@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) struct mmc_omap_slot *slot; slot = host->slots[i]; - ret = mmc_suspend_host(slot->mmc, mesg); + ret = mmc_suspend_host(slot->mmc); if (ret < 0) { while (--i >= 0) { slot = host->slots[i]; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index e9caf694c59e..b032828c6126 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -157,12 +157,10 @@ struct omap_hsmmc_host { */ struct regulator *vcc; struct regulator *vcc_aux; - struct semaphore sem; struct work_struct mmc_carddetect_work; void __iomem *base; resource_size_t mapbase; spinlock_t irq_lock; /* Prevent races with irq handler */ - unsigned long flags; unsigned int id; unsigned int dma_len; unsigned int dma_sg_idx; @@ -183,6 +181,7 @@ struct omap_hsmmc_host { int protect_card; int reqs_blocked; int use_reg; + int req_in_progress; struct omap_mmc_platform_data *pdata; }; @@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); } +static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host) +{ + unsigned int irq_mask; + + if (host->use_dma) + irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE); + else + irq_mask = INT_EN_MASK; + + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); +} + +static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) +{ + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); +} + #ifdef CONFIG_PM /* @@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) && time_before(jiffies, timeout)) ; - OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_disable_irq(host); /* Do not initialize card-specific things if the power is off */ if (host->power_mode == MMC_POWER_OFF) @@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host) return; disable_irq(host->irq); + + OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); @@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, mmc_hostname(host->mmc), cmd->opcode, cmd->arg); host->cmd = cmd; - /* - * Clear status bits and enable interrupts - */ - OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - - if (host->use_dma) - OMAP_HSMMC_WRITE(host->base, IE, - INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE)); - else - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_enable_irq(host); host->response_busy = 0; if (cmd->flags & MMC_RSP_PRESENT) { @@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, if (host->use_dma) cmdreg |= DMA_EN; - /* - * In an interrupt context (i.e. STOP command), the spinlock is unlocked - * by the interrupt handler, otherwise (i.e. for a new request) it is - * unlocked here. - */ - if (!in_interrupt()) - spin_unlock_irqrestore(&host->irq_lock, host->flags); + host->req_in_progress = 1; OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); @@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) +{ + int dma_ch; + + spin_lock(&host->irq_lock); + host->req_in_progress = 0; + dma_ch = host->dma_ch; + spin_unlock(&host->irq_lock); + + omap_hsmmc_disable_irq(host); + /* Do not complete the request if DMA is still in progress */ + if (mrq->data && host->use_dma && dma_ch != -1) + return; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + /* * Notify the transfer complete to MMC core */ @@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) return; } - host->mrq = NULL; - mmc_request_done(host->mmc, mrq); + omap_hsmmc_request_done(host, mrq); return; } host->data = NULL; - if (host->use_dma && host->dma_ch != -1) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, - omap_hsmmc_get_dma_dir(host, data)); - if (!data->error) data->bytes_xfered += data->blocks * (data->blksz); else data->bytes_xfered = 0; if (!data->stop) { - host->mrq = NULL; - mmc_request_done(host->mmc, data->mrq); + omap_hsmmc_request_done(host, data->mrq); return; } omap_hsmmc_start_command(host, data->stop, NULL); @@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); } } - if ((host->data == NULL && !host->response_busy) || cmd->error) { - host->mrq = NULL; - mmc_request_done(host->mmc, cmd->mrq); - } + if ((host->data == NULL && !host->response_busy) || cmd->error) + omap_hsmmc_request_done(host, cmd->mrq); } /* @@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) */ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) { + int dma_ch; + host->data->error = errno; - if (host->use_dma && host->dma_ch != -1) { + spin_lock(&host->irq_lock); + dma_ch = host->dma_ch; + host->dma_ch = -1; + spin_unlock(&host->irq_lock); + + if (host->use_dma && dma_ch != -1) { dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - up(&host->sem); + omap_free_dma(dma_ch); } host->data = NULL; } @@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, __func__); } -/* - * MMC controller IRQ handler - */ -static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) +static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) { - struct omap_hsmmc_host *host = dev_id; struct mmc_data *data; - int end_cmd = 0, end_trans = 0, status; - - spin_lock(&host->irq_lock); - - if (host->mrq == NULL) { - OMAP_HSMMC_WRITE(host->base, STAT, - OMAP_HSMMC_READ(host->base, STAT)); - /* Flush posted write */ - OMAP_HSMMC_READ(host->base, STAT); - spin_unlock(&host->irq_lock); - return IRQ_HANDLED; + int end_cmd = 0, end_trans = 0; + + if (!host->req_in_progress) { + do { + OMAP_HSMMC_WRITE(host->base, STAT, status); + /* Flush posted write */ + status = OMAP_HSMMC_READ(host->base, STAT); + } while (status & INT_EN_MASK); + return; } data = host->data; - status = OMAP_HSMMC_READ(host->base, STAT); dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); if (status & ERR) { @@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) } OMAP_HSMMC_WRITE(host->base, STAT, status); - /* Flush posted write */ - OMAP_HSMMC_READ(host->base, STAT); if (end_cmd || ((status & CC) && host->cmd)) omap_hsmmc_cmd_done(host, host->cmd); if ((end_trans || (status & TC)) && host->mrq) omap_hsmmc_xfer_done(host, data); +} - spin_unlock(&host->irq_lock); +/* + * MMC controller IRQ handler + */ +static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) +{ + struct omap_hsmmc_host *host = dev_id; + int status; + + status = OMAP_HSMMC_READ(host->base, STAT); + do { + omap_hsmmc_do_irq(host, status); + /* Flush posted write */ + status = OMAP_HSMMC_READ(host->base, STAT); + } while (status & INT_EN_MASK); return IRQ_HANDLED; } @@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, /* * DMA call back function */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) +static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) { - struct omap_hsmmc_host *host = data; + struct omap_hsmmc_host *host = cb_data; + struct mmc_data *data = host->mrq->data; + int dma_ch, req_in_progress; if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); - if (host->dma_ch < 0) + spin_lock(&host->irq_lock); + if (host->dma_ch < 0) { + spin_unlock(&host->irq_lock); return; + } host->dma_sg_idx++; if (host->dma_sg_idx < host->dma_len) { /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, host->data, - host->data->sg + host->dma_sg_idx); + omap_hsmmc_config_dma_params(host, data, + data->sg + host->dma_sg_idx); + spin_unlock(&host->irq_lock); return; } - omap_free_dma(host->dma_ch); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, + omap_hsmmc_get_dma_dir(host, data)); + + req_in_progress = host->req_in_progress; + dma_ch = host->dma_ch; host->dma_ch = -1; - /* - * DMA Callback: run in interrupt context. - * mutex_unlock will throw a kernel warning if used. - */ - up(&host->sem); + spin_unlock(&host->irq_lock); + + omap_free_dma(dma_ch); + + /* If DMA has finished after TC, complete the request */ + if (!req_in_progress) { + struct mmc_request *mrq = host->mrq; + + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); + } } /* @@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, err = 1, i; + int dma_ch = 0, ret = 0, i; struct mmc_data *data = req->data; /* Sanity check: all the SG entries must be aligned by block size. */ @@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, */ return -EINVAL; - /* - * If for some reason the DMA transfer is still active, - * we wait for timeout period and free the dma - */ - if (host->dma_ch != -1) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(100); - if (down_trylock(&host->sem)) { - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - up(&host->sem); - return err; - } - } else { - if (down_trylock(&host->sem)) - return err; - } + BUG_ON(host->dma_ch != -1); ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); @@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) struct omap_hsmmc_host *host = mmc_priv(mmc); int err; - /* - * Prevent races with the interrupt handler because of unexpected - * interrupts, but not if we are already in interrupt context i.e. - * retries. - */ - if (!in_interrupt()) { - spin_lock_irqsave(&host->irq_lock, host->flags); - /* - * Protect the card from I/O if there is a possibility - * it can be removed. - */ - if (host->protect_card) { - if (host->reqs_blocked < 3) { - /* - * Ensure the controller is left in a consistent - * state by resetting the command and data state - * machines. - */ - omap_hsmmc_reset_controller_fsm(host, SRD); - omap_hsmmc_reset_controller_fsm(host, SRC); - host->reqs_blocked += 1; - } - req->cmd->error = -EBADF; - if (req->data) - req->data->error = -EBADF; - spin_unlock_irqrestore(&host->irq_lock, host->flags); - mmc_request_done(mmc, req); - return; - } else if (host->reqs_blocked) - host->reqs_blocked = 0; - } + BUG_ON(host->req_in_progress); + BUG_ON(host->dma_ch != -1); + if (host->protect_card) { + if (host->reqs_blocked < 3) { + /* + * Ensure the controller is left in a consistent + * state by resetting the command and data state + * machines. + */ + omap_hsmmc_reset_controller_fsm(host, SRD); + omap_hsmmc_reset_controller_fsm(host, SRC); + host->reqs_blocked += 1; + } + req->cmd->error = -EBADF; + if (req->data) + req->data->error = -EBADF; + req->cmd->retries = 0; + mmc_request_done(mmc, req); + return; + } else if (host->reqs_blocked) + host->reqs_blocked = 0; WARN_ON(host->mrq != NULL); host->mrq = req; err = omap_hsmmc_prepare_data(host, req); @@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) if (req->data) req->data->error = err; host->mrq = NULL; - if (!in_interrupt()) - spin_unlock_irqrestore(&host->irq_lock, host->flags); mmc_request_done(mmc, req); return; } @@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) mmc->f_min = 400000; mmc->f_max = 52000000; - sema_init(&host->sem, 1); spin_lock_init(&host->irq_lock); host->iclk = clk_get(&pdev->dev, "ick"); @@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) } } - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_disable_irq(host); mmc_host_lazy_disable(host->mmc); @@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) +static int omap_hsmmc_suspend(struct device *dev) { int ret = 0; + struct platform_device *pdev = to_platform_device(dev); struct omap_hsmmc_host *host = platform_get_drvdata(pdev); + pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */ if (host && host->suspended) return 0; @@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) } cancel_work_sync(&host->mmc_carddetect_work); mmc_host_enable(host->mmc); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret == 0) { - OMAP_HSMMC_WRITE(host->base, ISE, 0); - OMAP_HSMMC_WRITE(host->base, IE, 0); - - + omap_hsmmc_disable_irq(host); OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); mmc_host_disable(host->mmc); @@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) } /* Routine to resume the MMC device */ -static int omap_hsmmc_resume(struct platform_device *pdev) +static int omap_hsmmc_resume(struct device *dev) { int ret = 0; + struct platform_device *pdev = to_platform_device(dev); struct omap_hsmmc_host *host = platform_get_drvdata(pdev); if (host && !host->suspended) @@ -2363,13 +2372,17 @@ clk_en_err: #define omap_hsmmc_resume NULL #endif -static struct platform_driver omap_hsmmc_driver = { - .remove = omap_hsmmc_remove, +static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { .suspend = omap_hsmmc_suspend, .resume = omap_hsmmc_resume, +}; + +static struct platform_driver omap_hsmmc_driver = { + .remove = omap_hsmmc_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .pm = &omap_hsmmc_dev_pm_ops, }, }; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index e4f00e70a749..0a4e43f37140 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, PMSG_SUSPEND); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2fdf7689ae6c..2e16e0a90a5e 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); static int s3cmci_suspend(struct device *dev) { struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - struct pm_message event = { PM_EVENT_SUSPEND }; - return mmc_suspend_host(mmc, event); + return mmc_suspend_host(mmc); } static int s3cmci_resume(struct device *dev) diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index 55e33135edb4..a2e9820cd42f 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state) { struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); - return mmc_suspend_host(host->mmc, state); + return mmc_suspend_host(host->mmc); } static int sdhci_of_resume(struct of_device *ofdev) @@ -118,7 +118,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) static int __devinit sdhci_of_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct sdhci_of_data *sdhci_of_data = match->data; struct sdhci_host *host; struct sdhci_of_host *of_host; @@ -205,8 +205,11 @@ static const struct of_device_id sdhci_of_match[] = { MODULE_DEVICE_TABLE(of, sdhci_of_match); static struct of_platform_driver sdhci_of_driver = { - .driver.name = "sdhci-of", - .match_table = sdhci_of_match, + .driver = { + .name = "sdhci-of", + .owner = THIS_MODULE, + .of_match_table = sdhci_of_match, + }, .probe = sdhci_of_probe, .remove = __devexit_p(sdhci_of_remove), .suspend = sdhci_of_suspend, diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d5b11a17e648..c8623de13af3 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = { SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | SDHCI_QUIRK_NO_CARD_NO_RESET, .ops = { - .readl = sdhci_be32bs_readl, - .readw = esdhc_readw, - .readb = sdhci_be32bs_readb, - .writel = sdhci_be32bs_writel, - .writew = esdhc_writew, - .writeb = esdhc_writeb, + .read_l = sdhci_be32bs_readl, + .read_w = esdhc_readw, + .read_b = sdhci_be32bs_readb, + .write_l = sdhci_be32bs_writel, + .write_w = esdhc_writew, + .write_b = esdhc_writeb, .set_clock = esdhc_set_clock, .enable_dma = esdhc_enable_dma, .get_max_clock = esdhc_get_max_clock, diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 35117f3ed757..68ddb7546ae2 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = { .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE, .ops = { - .readl = sdhci_be32bs_readl, - .readw = sdhci_be32bs_readw, - .readb = sdhci_be32bs_readb, - .writel = sdhci_hlwd_writel, - .writew = sdhci_hlwd_writew, - .writeb = sdhci_hlwd_writeb, + .read_l = sdhci_be32bs_readl, + .read_w = sdhci_be32bs_readw, + .read_b = sdhci_be32bs_readb, + .write_l = sdhci_hlwd_writel, + .write_w = sdhci_hlwd_writew, + .write_b = sdhci_hlwd_writeb, }, }; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 6701af629c30..65483fdea45b 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); if (IS_ERR(host)) { dev_err(&pdev->dev, "cannot allocate host\n"); - return ERR_PTR(PTR_ERR(host)); + return ERR_CAST(host); } slot = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 297f40ae6ad5..b6ee0d719698 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -29,6 +29,7 @@ #include <linux/mmc/host.h> #include <linux/io.h> +#include <linux/sdhci-pltfm.h> #include "sdhci.h" @@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = { static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) { + struct sdhci_pltfm_data *pdata = pdev->dev.platform_data; struct sdhci_host *host; struct resource *iomem; int ret; - BUG_ON(pdev == NULL); - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } - if (resource_size(iomem) != 0x100) + if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); @@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) } host->hw_name = "platform"; - host->ops = &sdhci_pltfm_ops; + if (pdata && pdata->ops) + host->ops = pdata->ops; + else + host->ops = &sdhci_pltfm_ops; + if (pdata) + host->quirks = pdata->quirks; host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), @@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) goto err_remap; } + if (pdata && pdata->init) { + ret = pdata->init(host); + if (ret) + goto err_plat_init; + } + ret = sdhci_add_host(host); if (ret) goto err_add_host; @@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) return 0; err_add_host: + if (pdata && pdata->exit) + pdata->exit(host); +err_plat_init: iounmap(host->ioaddr); err_remap: release_mem_region(iomem->start, resource_size(iomem)); @@ -114,6 +128,7 @@ err: static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) { + struct sdhci_pltfm_data *pdata = pdev->dev.platform_data; struct sdhci_host *host = platform_get_drvdata(pdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int dead; @@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) dead = 1; sdhci_remove_host(host, dead); + if (pdata && pdata->exit) + pdata->exit(host); iounmap(host->ioaddr); release_mem_region(iomem->start, resource_size(iomem)); sdhci_free_host(host); @@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver"); MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sdhci"); - diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 2136794c0cfa..af217924a76e 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) host->irq = irq; /* Setup quirks for the controller */ - - /* Currently with ADMA enabled we are getting some length - * interrupts that are not being dealt with, do disable - * ADMA until this is sorted out. */ - host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE; + host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; #ifndef CONFIG_MMC_SDHCI_S3C_DMA @@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) * support as well. */ host->quirks |= SDHCI_QUIRK_BROKEN_DMA; - /* PIO currently has problems with multi-block IO */ - host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; - #endif /* CONFIG_MMC_SDHCI_S3C_DMA */ /* It seems we do not get an DATA transfer complete on non-busy diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c new file mode 100644 index 000000000000..d70c54c7b70a --- /dev/null +++ b/drivers/mmc/host/sdhci-spear.c @@ -0,0 +1,298 @@ +/* + * drivers/mmc/host/sdhci-spear.c + * + * Support of SDHCI platform devices for spear soc family + * + * Copyright (C) 2010 ST Microelectronics + * Viresh Kumar<viresh.kumar@st.com> + * + * Inspired by sdhci-pltfm.c + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sdhci-spear.h> +#include <linux/io.h> +#include "sdhci.h" + +struct spear_sdhci { + struct clk *clk; + struct sdhci_plat_data *data; +}; + +/* sdhci ops */ +static struct sdhci_ops sdhci_pltfm_ops = { + /* Nothing to do for now. */ +}; + +/* gpio card detection interrupt handler */ +static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct sdhci_host *host = platform_get_drvdata(pdev); + struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); + unsigned long gpio_irq_type; + int val; + + val = gpio_get_value(sdhci->data->card_int_gpio); + + /* val == 1 -> card removed, val == 0 -> card inserted */ + /* if card removed - set irq for low level, else vice versa */ + gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; + set_irq_type(irq, gpio_irq_type); + + if (sdhci->data->card_power_gpio >= 0) { + if (!sdhci->data->power_always_enb) { + /* if card inserted, give power, otherwise remove it */ + val = sdhci->data->power_active_high ? !val : val ; + gpio_set_value(sdhci->data->card_power_gpio, val); + } + } + + /* inform sdhci driver about card insertion/removal */ + tasklet_schedule(&host->card_tasklet); + + return IRQ_HANDLED; +} + +static int __devinit sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct resource *iomem; + struct spear_sdhci *sdhci; + int ret; + + BUG_ON(pdev == NULL); + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "memory resource not defined\n"); + goto err; + } + + if (!request_mem_region(iomem->start, resource_size(iomem), + "spear-sdhci")) { + ret = -EBUSY; + dev_dbg(&pdev->dev, "cannot request region\n"); + goto err; + } + + sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); + if (!sdhci) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); + goto err_kzalloc; + } + + /* clk enable */ + sdhci->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(sdhci->clk)) { + ret = PTR_ERR(sdhci->clk); + dev_dbg(&pdev->dev, "Error getting clock\n"); + goto err_clk_get; + } + + ret = clk_enable(sdhci->clk); + if (ret) { + dev_dbg(&pdev->dev, "Error enabling clock\n"); + goto err_clk_enb; + } + + /* overwrite platform_data */ + sdhci->data = dev_get_platdata(&pdev->dev); + pdev->dev.platform_data = sdhci; + + if (pdev->dev.parent) + host = sdhci_alloc_host(pdev->dev.parent, 0); + else + host = sdhci_alloc_host(&pdev->dev, 0); + + if (IS_ERR(host)) { + ret = PTR_ERR(host); + dev_dbg(&pdev->dev, "error allocating host\n"); + goto err_alloc_host; + } + + host->hw_name = "sdhci"; + host->ops = &sdhci_pltfm_ops; + host->irq = platform_get_irq(pdev, 0); + host->quirks = SDHCI_QUIRK_BROKEN_ADMA; + + host->ioaddr = ioremap(iomem->start, resource_size(iomem)); + if (!host->ioaddr) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "failed to remap registers\n"); + goto err_ioremap; + } + + ret = sdhci_add_host(host); + if (ret) { + dev_dbg(&pdev->dev, "error adding host\n"); + goto err_add_host; + } + + platform_set_drvdata(pdev, host); + + /* + * It is optional to use GPIOs for sdhci Power control & sdhci card + * interrupt detection. If sdhci->data is NULL, then use original sdhci + * lines otherwise GPIO lines. + * If GPIO is selected for power control, then power should be disabled + * after card removal and should be enabled when card insertion + * interrupt occurs + */ + if (!sdhci->data) + return 0; + + if (sdhci->data->card_power_gpio >= 0) { + int val = 0; + + ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); + if (ret < 0) { + dev_dbg(&pdev->dev, "gpio request fail: %d\n", + sdhci->data->card_power_gpio); + goto err_pgpio_request; + } + + if (sdhci->data->power_always_enb) + val = sdhci->data->power_active_high; + else + val = !sdhci->data->power_active_high; + + ret = gpio_direction_output(sdhci->data->card_power_gpio, val); + if (ret) { + dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", + sdhci->data->card_power_gpio); + goto err_pgpio_direction; + } + + gpio_set_value(sdhci->data->card_power_gpio, 1); + } + + if (sdhci->data->card_int_gpio >= 0) { + ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); + if (ret < 0) { + dev_dbg(&pdev->dev, "gpio request fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_request; + } + + ret = gpio_direction_input(sdhci->data->card_int_gpio); + if (ret) { + dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_direction; + } + ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), + sdhci_gpio_irq, IRQF_TRIGGER_LOW, + mmc_hostname(host->mmc), pdev); + if (ret) { + dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_request_irq; + } + + } + + return 0; + +err_igpio_request_irq: +err_igpio_direction: + if (sdhci->data->card_int_gpio >= 0) + gpio_free(sdhci->data->card_int_gpio); +err_igpio_request: +err_pgpio_direction: + if (sdhci->data->card_power_gpio >= 0) + gpio_free(sdhci->data->card_power_gpio); +err_pgpio_request: + platform_set_drvdata(pdev, NULL); + sdhci_remove_host(host, 1); +err_add_host: + iounmap(host->ioaddr); +err_ioremap: + sdhci_free_host(host); +err_alloc_host: + clk_disable(sdhci->clk); +err_clk_enb: + clk_put(sdhci->clk); +err_clk_get: + kfree(sdhci); +err_kzalloc: + release_mem_region(iomem->start, resource_size(iomem)); +err: + dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); + return ret; +} + +static int __devexit sdhci_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); + int dead; + u32 scratch; + + if (sdhci->data) { + if (sdhci->data->card_int_gpio >= 0) { + free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev); + gpio_free(sdhci->data->card_int_gpio); + } + + if (sdhci->data->card_power_gpio >= 0) + gpio_free(sdhci->data->card_power_gpio); + } + + platform_set_drvdata(pdev, NULL); + dead = 0; + scratch = readl(host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(host, dead); + iounmap(host->ioaddr); + sdhci_free_host(host); + clk_disable(sdhci->clk); + clk_put(sdhci->clk); + kfree(sdhci); + if (iomem) + release_mem_region(iomem->start, resource_size(iomem)); + + return 0; +} + +static struct platform_driver sdhci_driver = { + .driver = { + .name = "sdhci", + .owner = THIS_MODULE, + }, + .probe = sdhci_probe, + .remove = __devexit_p(sdhci_remove), +}; + +static int __init sdhci_init(void) +{ + return platform_driver_register(&sdhci_driver); +} +module_init(sdhci_init); + +static void __exit sdhci_exit(void) +{ + platform_driver_unregister(&sdhci_driver); +} +module_exit(sdhci_exit); + +MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 9d4fdfa685e5..c6d1bd8d4ac4 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); } - /* - * Add a terminating entry. - */ + if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { + /* + * Mark the last descriptor as the terminating descriptor + */ + if (desc != host->adma_desc) { + desc -= 8; + desc[0] |= 0x2; /* end */ + } + } else { + /* + * Add a terminating entry. + */ - /* nop, end, valid */ - sdhci_set_adma_desc(desc, 0, 0, 0x3); + /* nop, end, valid */ + sdhci_set_adma_desc(desc, 0, 0, 0x3); + } /* * Resync align buffer as we might have changed it. @@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) sdhci_disable_card_detection(host); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret) return ret; @@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host) host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; host->max_clk *= 1000000; - if (host->max_clk == 0) { + if (host->max_clk == 0 || host->quirks & + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { if (!host->ops->get_max_clock) { printk(KERN_ERR "%s: Hardware doesn't specify base clock " diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 842f46f94284..c8468134adc9 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -127,7 +127,7 @@ #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ - SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) + SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR) #define SDHCI_INT_ALL_MASK ((unsigned int)-1) #define SDHCI_ACMD12_ERR 0x3C @@ -236,6 +236,10 @@ struct sdhci_host { #define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) /* Controller uses SDCLK instead of TMCLK for data timeouts */ #define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) +/* Controller reports wrong base clock capability */ +#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25) +/* Controller cannot support End Attribute in NOP ADMA descriptor */ +#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26) int irq; /* Device IRQ */ void __iomem * ioaddr; /* Mapped address */ @@ -294,12 +298,12 @@ struct sdhci_host { struct sdhci_ops { #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS - u32 (*readl)(struct sdhci_host *host, int reg); - u16 (*readw)(struct sdhci_host *host, int reg); - u8 (*readb)(struct sdhci_host *host, int reg); - void (*writel)(struct sdhci_host *host, u32 val, int reg); - void (*writew)(struct sdhci_host *host, u16 val, int reg); - void (*writeb)(struct sdhci_host *host, u8 val, int reg); + u32 (*read_l)(struct sdhci_host *host, int reg); + u16 (*read_w)(struct sdhci_host *host, int reg); + u8 (*read_b)(struct sdhci_host *host, int reg); + void (*write_l)(struct sdhci_host *host, u32 val, int reg); + void (*write_w)(struct sdhci_host *host, u16 val, int reg); + void (*write_b)(struct sdhci_host *host, u8 val, int reg); #endif void (*set_clock)(struct sdhci_host *host, unsigned int clock); @@ -314,48 +318,48 @@ struct sdhci_ops { static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) { - if (unlikely(host->ops->writel)) - host->ops->writel(host, val, reg); + if (unlikely(host->ops->write_l)) + host->ops->write_l(host, val, reg); else writel(val, host->ioaddr + reg); } static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) { - if (unlikely(host->ops->writew)) - host->ops->writew(host, val, reg); + if (unlikely(host->ops->write_w)) + host->ops->write_w(host, val, reg); else writew(val, host->ioaddr + reg); } static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) { - if (unlikely(host->ops->writeb)) - host->ops->writeb(host, val, reg); + if (unlikely(host->ops->write_b)) + host->ops->write_b(host, val, reg); else writeb(val, host->ioaddr + reg); } static inline u32 sdhci_readl(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readl)) - return host->ops->readl(host, reg); + if (unlikely(host->ops->read_l)) + return host->ops->read_l(host, reg); else return readl(host->ioaddr + reg); } static inline u16 sdhci_readw(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readw)) - return host->ops->readw(host, reg); + if (unlikely(host->ops->read_w)) + return host->ops->read_w(host, reg); else return readw(host->ioaddr + reg); } static inline u8 sdhci_readb(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readb)) - return host->ops->readb(host, reg); + if (unlikely(host->ops->read_b)) + return host->ops->read_b(host, reg); else return readb(host->ioaddr + reg); } diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index cb41e9c3ac07..e7507af3856e 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); - mmc_suspend_host(mmc, PMSG_SUSPEND); + mmc_suspend_host(mmc); return 0; } diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c new file mode 100644 index 000000000000..eb97830c0344 --- /dev/null +++ b/drivers/mmc/host/sh_mmcif.c @@ -0,0 +1,965 @@ +/* + * MMCIF eMMC driver. + * + * Copyright (C) 2010 Renesas Solutions Corp. + * Yusuke Goda <yusuke.goda.sx@renesas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * + * TODO + * 1. DMA + * 2. Power management + * 3. Handle MMC errors better + * + */ + +#include <linux/dma-mapping.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/core.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/mmc/sh_mmcif.h> + +#define DRIVER_NAME "sh_mmcif" +#define DRIVER_VERSION "2010-04-28" + +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_VERSION 0x0000007C + +/* CE_CMD_SET */ +#define CMD_MASK 0x3f000000 +#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) +#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */ +#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */ +#define CMD_SET_RBSY (1 << 21) /* R1b */ +#define CMD_SET_CCSEN (1 << 20) +#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */ +#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */ +#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */ +#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */ +#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */ +#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */ +#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */ +#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/ +#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/ +#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/ +#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/ +#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */ +#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ +#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ +#define CMD_SET_CCSH (1 << 5) +#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ +#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ +#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ + +/* CE_CMD_CTRL */ +#define CMD_CTRL_BREAK (1 << 0) + +/* CE_BLOCK_SET */ +#define BLOCK_SIZE_MASK 0x0000ffff + +/* CE_CLK_CTRL */ +#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ +#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) +#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) +#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */ +#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \ + (1 << 9) | (1 << 8)) /* resp busy timeout */ +#define SRWDTO_29 ((1 << 7) | (1 << 6) | \ + (1 << 5) | (1 << 4)) /* read/write timeout */ +#define SCCSTO_29 ((1 << 3) | (1 << 2) | \ + (1 << 1) | (1 << 0)) /* ccs timeout */ + +/* CE_BUF_ACC */ +#define BUF_ACC_DMAWEN (1 << 25) +#define BUF_ACC_DMAREN (1 << 24) +#define BUF_ACC_BUSW_32 (0 << 17) +#define BUF_ACC_BUSW_16 (1 << 17) +#define BUF_ACC_ATYP (1 << 16) + +/* CE_INT */ +#define INT_CCSDE (1 << 29) +#define INT_CMD12DRE (1 << 26) +#define INT_CMD12RBE (1 << 25) +#define INT_CMD12CRE (1 << 24) +#define INT_DTRANE (1 << 23) +#define INT_BUFRE (1 << 22) +#define INT_BUFWEN (1 << 21) +#define INT_BUFREN (1 << 20) +#define INT_CCSRCV (1 << 19) +#define INT_RBSYE (1 << 17) +#define INT_CRSPE (1 << 16) +#define INT_CMDVIO (1 << 15) +#define INT_BUFVIO (1 << 14) +#define INT_WDATERR (1 << 11) +#define INT_RDATERR (1 << 10) +#define INT_RIDXERR (1 << 9) +#define INT_RSPERR (1 << 8) +#define INT_CCSTO (1 << 5) +#define INT_CRCSTO (1 << 4) +#define INT_WDATTO (1 << 3) +#define INT_RDATTO (1 << 2) +#define INT_RBSYTO (1 << 1) +#define INT_RSPTO (1 << 0) +#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \ + INT_RDATERR | INT_RIDXERR | INT_RSPERR | \ + INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ + INT_RDATTO | INT_RBSYTO | INT_RSPTO) + +/* CE_INT_MASK */ +#define MASK_ALL 0x00000000 +#define MASK_MCCSDE (1 << 29) +#define MASK_MCMD12DRE (1 << 26) +#define MASK_MCMD12RBE (1 << 25) +#define MASK_MCMD12CRE (1 << 24) +#define MASK_MDTRANE (1 << 23) +#define MASK_MBUFRE (1 << 22) +#define MASK_MBUFWEN (1 << 21) +#define MASK_MBUFREN (1 << 20) +#define MASK_MCCSRCV (1 << 19) +#define MASK_MRBSYE (1 << 17) +#define MASK_MCRSPE (1 << 16) +#define MASK_MCMDVIO (1 << 15) +#define MASK_MBUFVIO (1 << 14) +#define MASK_MWDATERR (1 << 11) +#define MASK_MRDATERR (1 << 10) +#define MASK_MRIDXERR (1 << 9) +#define MASK_MRSPERR (1 << 8) +#define MASK_MCCSTO (1 << 5) +#define MASK_MCRCSTO (1 << 4) +#define MASK_MWDATTO (1 << 3) +#define MASK_MRDATTO (1 << 2) +#define MASK_MRBSYTO (1 << 1) +#define MASK_MRSPTO (1 << 0) + +/* CE_HOST_STS1 */ +#define STS1_CMDSEQ (1 << 31) + +/* CE_HOST_STS2 */ +#define STS2_CRCSTE (1 << 31) +#define STS2_CRC16E (1 << 30) +#define STS2_AC12CRCE (1 << 29) +#define STS2_RSPCRC7E (1 << 28) +#define STS2_CRCSTEBE (1 << 27) +#define STS2_RDATEBE (1 << 26) +#define STS2_AC12REBE (1 << 25) +#define STS2_RSPEBE (1 << 24) +#define STS2_AC12IDXE (1 << 23) +#define STS2_RSPIDXE (1 << 22) +#define STS2_CCSTO (1 << 15) +#define STS2_RDATTO (1 << 14) +#define STS2_DATBSYTO (1 << 13) +#define STS2_CRCSTTO (1 << 12) +#define STS2_AC12BSYTO (1 << 11) +#define STS2_RSPBSYTO (1 << 10) +#define STS2_AC12RSPTO (1 << 9) +#define STS2_RSPTO (1 << 8) +#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \ + STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE) +#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \ + STS2_DATBSYTO | STS2_CRCSTTO | \ + STS2_AC12BSYTO | STS2_RSPBSYTO | \ + STS2_AC12RSPTO | STS2_RSPTO) + +/* CE_VERSION */ +#define SOFT_RST_ON (1 << 31) +#define SOFT_RST_OFF (0 << 31) + +#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ +#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ +#define CLKDEV_INIT 400000 /* 400 KHz */ + +struct sh_mmcif_host { + struct mmc_host *mmc; + struct mmc_data *data; + struct mmc_command *cmd; + struct platform_device *pd; + struct clk *hclk; + unsigned int clk; + int bus_width; + u16 wait_int; + u16 sd_error; + long timeout; + void __iomem *addr; + wait_queue_head_t intr_wait; +}; + +static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg) +{ + return readl(host->addr + reg); +} + +static inline void sh_mmcif_writel(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(val, host->addr + reg); +} + +static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(val | sh_mmcif_readl(host, reg), host->addr + reg); +} + +static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(~val & sh_mmcif_readl(host, reg), host->addr + reg); +} + + +static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) +{ + struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); + + if (!clk) + return; + if (p->sup_pclk && clk == host->clk) + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); + else + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & + (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); + + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); +} + +static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) +{ + u32 tmp; + + tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL); + + sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF); + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | + SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + /* byte swap on */ + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); +} + +static int sh_mmcif_error_manage(struct sh_mmcif_host *host) +{ + u32 state1, state2; + int ret, timeout = 10000000; + + host->sd_error = 0; + host->wait_int = 0; + + state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1); + state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2); + pr_debug("%s: ERR HOST_STS1 = %08x\n", \ + DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)); + pr_debug("%s: ERR HOST_STS2 = %08x\n", \ + DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2)); + + if (state1 & STS1_CMDSEQ) { + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); + while (1) { + timeout--; + if (timeout < 0) { + pr_err(DRIVER_NAME": Forceed end of " \ + "command sequence timeout err\n"); + return -EIO; + } + if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1) + & STS1_CMDSEQ)) + break; + mdelay(1); + } + sh_mmcif_sync_reset(host); + pr_debug(DRIVER_NAME": Forced end of command sequence\n"); + return -EIO; + } + + if (state2 & STS2_CRC_ERR) { + pr_debug(DRIVER_NAME": Happened CRC error\n"); + ret = -EIO; + } else if (state2 & STS2_TIMEOUT_ERR) { + pr_debug(DRIVER_NAME": Happened Timeout error\n"); + ret = -ETIMEDOUT; + } else { + pr_debug(DRIVER_NAME": Happened End/Index error\n"); + ret = -EIO; + } + return ret; +} + +static int sh_mmcif_single_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, *p = sg_virt(data->sg); + + host->wait_int = 0; + + /* buf read enable */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + blocksize = (BLOCK_SIZE_MASK & + sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + for (i = 0; i < blocksize / 4; i++) + *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + + /* buffer read end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + return 0; +} + +static int sh_mmcif_multi_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, j, sec, *p; + + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + for (j = 0; j < data->sg_len; j++) { + p = sg_virt(data->sg); + host->wait_int = 0; + for (sec = 0; sec < data->sg->length / blocksize; sec++) { + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + /* buf read enable */ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + + if (host->wait_int != 1 && + (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + for (i = 0; i < blocksize / 4; i++) + *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + } + if (j < data->sg_len - 1) + data->sg++; + } + return 0; +} + +static int sh_mmcif_single_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, *p = sg_virt(data->sg); + + host->wait_int = 0; + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + + /* buf write enable */ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + blocksize = (BLOCK_SIZE_MASK & + sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + for (i = 0; i < blocksize / 4; i++) + sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + + /* buffer write end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + return 0; +} + +static int sh_mmcif_multi_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 i, sec, j, blocksize, *p; + + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + + for (j = 0; j < data->sg_len; j++) { + p = sg_virt(data->sg); + host->wait_int = 0; + for (sec = 0; sec < data->sg->length / blocksize; sec++) { + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + /* buf write enable*/ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + + if (host->wait_int != 1 && + (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + for (i = 0; i < blocksize / 4; i++) + sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + } + if (j < data->sg_len - 1) + data->sg++; + } + return 0; +} + +static void sh_mmcif_get_response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3); + cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2); + cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1); + cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0); + } else + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0); +} + +static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12); +} + +static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) +{ + u32 tmp = 0; + + /* Response Type check */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + tmp |= CMD_SET_RTYP_NO; + break; + case MMC_RSP_R1: + case MMC_RSP_R1B: + case MMC_RSP_R3: + tmp |= CMD_SET_RTYP_6B; + break; + case MMC_RSP_R2: + tmp |= CMD_SET_RTYP_17B; + break; + default: + pr_err(DRIVER_NAME": Not support type response.\n"); + break; + } + switch (opc) { + /* RBSY */ + case MMC_SWITCH: + case MMC_STOP_TRANSMISSION: + case MMC_SET_WRITE_PROT: + case MMC_CLR_WRITE_PROT: + case MMC_ERASE: + case MMC_GEN_CMD: + tmp |= CMD_SET_RBSY; + break; + } + /* WDAT / DATW */ + if (host->data) { + tmp |= CMD_SET_WDAT; + switch (host->bus_width) { + case MMC_BUS_WIDTH_1: + tmp |= CMD_SET_DATW_1; + break; + case MMC_BUS_WIDTH_4: + tmp |= CMD_SET_DATW_4; + break; + case MMC_BUS_WIDTH_8: + tmp |= CMD_SET_DATW_8; + break; + default: + pr_err(DRIVER_NAME": Not support bus width.\n"); + break; + } + } + /* DWEN */ + if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) + tmp |= CMD_SET_DWEN; + /* CMLTE/CMD12EN */ + if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { + tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; + sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, + mrq->data->blocks << 16); + } + /* RIDXC[1:0] check bits */ + if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_RIDXC_BITS; + /* RCRC7C[1:0] check bits */ + if (opc == MMC_SEND_OP_COND) + tmp |= CMD_SET_CRC7C_BITS; + /* RCRC7C[1:0] internal CRC7 */ + if (opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_CRC7C_INTERNAL; + + return opc = ((opc << 24) | tmp); +} + +static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host, + struct mmc_request *mrq, u32 opc) +{ + u32 ret; + + switch (opc) { + case MMC_READ_MULTIPLE_BLOCK: + ret = sh_mmcif_multi_read(host, mrq); + break; + case MMC_WRITE_MULTIPLE_BLOCK: + ret = sh_mmcif_multi_write(host, mrq); + break; + case MMC_WRITE_BLOCK: + ret = sh_mmcif_single_write(host, mrq); + break; + case MMC_READ_SINGLE_BLOCK: + case MMC_SEND_EXT_CSD: + ret = sh_mmcif_single_read(host, mrq); + break; + default: + pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc); + ret = -EINVAL; + break; + } + return ret; +} + +static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + long time; + int ret = 0, mask = 0; + u32 opc = cmd->opcode; + + host->cmd = cmd; + + switch (opc) { + /* respons busy check */ + case MMC_SWITCH: + case MMC_STOP_TRANSMISSION: + case MMC_SET_WRITE_PROT: + case MMC_CLR_WRITE_PROT: + case MMC_ERASE: + case MMC_GEN_CMD: + mask = MASK_MRBSYE; + break; + default: + mask = MASK_MCRSPE; + break; + } + mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | + MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | + MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | + MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; + + if (host->data) { + sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0); + sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz); + } + opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); + + sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0); + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask); + /* set arg */ + sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg); + host->wait_int = 0; + /* set cmd */ + sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc); + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && time == 0) { + cmd->error = sh_mmcif_error_manage(host); + return; + } + if (host->sd_error) { + switch (cmd->opcode) { + case MMC_ALL_SEND_CID: + case MMC_SELECT_CARD: + case MMC_APP_CMD: + cmd->error = -ETIMEDOUT; + break; + default: + pr_debug("%s: Cmd(d'%d) err\n", + DRIVER_NAME, cmd->opcode); + cmd->error = sh_mmcif_error_manage(host); + break; + } + host->sd_error = 0; + host->wait_int = 0; + return; + } + if (!(cmd->flags & MMC_RSP_PRESENT)) { + cmd->error = ret; + host->wait_int = 0; + return; + } + if (host->wait_int == 1) { + sh_mmcif_get_response(host, cmd); + host->wait_int = 0; + } + if (host->data) { + ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); + if (ret < 0) + mrq->data->bytes_xfered = 0; + else + mrq->data->bytes_xfered = + mrq->data->blocks * mrq->data->blksz; + } + cmd->error = ret; +} + +static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + long time; + + if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); + else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); + else { + pr_err(DRIVER_NAME": not support stop cmd\n"); + cmd->error = sh_mmcif_error_manage(host); + return; + } + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) { + cmd->error = sh_mmcif_error_manage(host); + return; + } + sh_mmcif_get_cmd12response(host, cmd); + host->wait_int = 0; + cmd->error = 0; +} + +static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + + switch (mrq->cmd->opcode) { + /* MMCIF does not support SD/SDIO command */ + case SD_IO_SEND_OP_COND: + case MMC_APP_CMD: + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, mrq); + return; + case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ + if (!mrq->data) { + /* send_if_cond cmd (not support) */ + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, mrq); + return; + } + break; + default: + break; + } + host->data = mrq->data; + sh_mmcif_start_cmd(host, mrq, mrq->cmd); + host->data = NULL; + + if (mrq->cmd->error != 0) { + mmc_request_done(mmc, mrq); + return; + } + if (mrq->stop) + sh_mmcif_stop_cmd(host, mrq, mrq->stop); + mmc_request_done(mmc, mrq); +} + +static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + + if (ios->power_mode == MMC_POWER_OFF) { + /* clock stop */ + sh_mmcif_clock_control(host, 0); + if (p->down_pwr) + p->down_pwr(host->pd); + return; + } else if (ios->power_mode == MMC_POWER_UP) { + if (p->set_pwr) + p->set_pwr(host->pd, ios->power_mode); + } + + if (ios->clock) + sh_mmcif_clock_control(host, ios->clock); + + host->bus_width = ios->bus_width; +} + +static struct mmc_host_ops sh_mmcif_ops = { + .request = sh_mmcif_request, + .set_ios = sh_mmcif_set_ios, +}; + +static void sh_mmcif_detect(struct mmc_host *mmc) +{ + mmc_detect_change(mmc, 0); +} + +static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) +{ + struct sh_mmcif_host *host = dev_id; + u32 state = 0; + int err = 0; + + state = sh_mmcif_readl(host, MMCIF_CE_INT); + + if (state & INT_RBSYE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); + } else if (state & INT_CRSPE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); + } else if (state & INT_BUFREN) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + } else if (state & INT_BUFWEN) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + } else if (state & INT_CMD12DRE) { + sh_mmcif_writel(host, MMCIF_CE_INT, + ~(INT_CMD12DRE | INT_CMD12RBE | + INT_CMD12CRE | INT_BUFRE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); + } else if (state & INT_BUFRE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); + } else if (state & INT_DTRANE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); + } else if (state & INT_CMD12RBE) { + sh_mmcif_writel(host, MMCIF_CE_INT, + ~(INT_CMD12RBE | INT_CMD12CRE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); + } else if (state & INT_ERR_STS) { + /* err interrupts */ + sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); + err = 1; + } else { + pr_debug("%s: Not support int\n", DRIVER_NAME); + sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); + err = 1; + } + if (err) { + host->sd_error = 1; + pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state); + } + host->wait_int = 1; + wake_up(&host->intr_wait); + + return IRQ_HANDLED; +} + +static int __devinit sh_mmcif_probe(struct platform_device *pdev) +{ + int ret = 0, irq[2]; + struct mmc_host *mmc; + struct sh_mmcif_host *host = NULL; + struct sh_mmcif_plat_data *pd = NULL; + struct resource *res; + void __iomem *reg; + char clk_name[8]; + + irq[0] = platform_get_irq(pdev, 0); + irq[1] = platform_get_irq(pdev, 1); + if (irq[0] < 0 || irq[1] < 0) { + pr_err(DRIVER_NAME": Get irq error\n"); + return -ENXIO; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "platform_get_resource error.\n"); + return -ENXIO; + } + reg = ioremap(res->start, resource_size(res)); + if (!reg) { + dev_err(&pdev->dev, "ioremap error.\n"); + return -ENOMEM; + } + pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); + if (!pd) { + dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); + ret = -ENXIO; + goto clean_up; + } + mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto clean_up; + } + host = mmc_priv(mmc); + host->mmc = mmc; + host->addr = reg; + host->timeout = 1000; + + snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); + host->hclk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(host->hclk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(host->hclk); + goto clean_up1; + } + clk_enable(host->hclk); + host->clk = clk_get_rate(host->hclk); + host->pd = pdev; + + init_waitqueue_head(&host->intr_wait); + + mmc->ops = &sh_mmcif_ops; + mmc->f_max = host->clk; + /* close to 400KHz */ + if (mmc->f_max < 51200000) + mmc->f_min = mmc->f_max / 128; + else if (mmc->f_max < 102400000) + mmc->f_min = mmc->f_max / 256; + else + mmc->f_min = mmc->f_max / 512; + if (pd->ocr) + mmc->ocr_avail = pd->ocr; + mmc->caps = MMC_CAP_MMC_HIGHSPEED; + if (pd->caps) + mmc->caps |= pd->caps; + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + + sh_mmcif_sync_reset(host); + platform_set_drvdata(pdev, host); + mmc_add_host(mmc); + + ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); + if (ret) { + pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); + goto clean_up2; + } + ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); + if (ret) { + free_irq(irq[0], host); + pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); + goto clean_up2; + } + + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_detect(host->mmc); + + pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); + pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, + sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff); + return ret; + +clean_up2: + clk_disable(host->hclk); +clean_up1: + mmc_free_host(mmc); +clean_up: + if (reg) + iounmap(reg); + return ret; +} + +static int __devexit sh_mmcif_remove(struct platform_device *pdev) +{ + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + int irq[2]; + + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + + irq[0] = platform_get_irq(pdev, 0); + irq[1] = platform_get_irq(pdev, 1); + + if (host->addr) + iounmap(host->addr); + + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + + free_irq(irq[0], host); + free_irq(irq[1], host); + + clk_disable(host->hclk); + mmc_free_host(host->mmc); + + return 0; +} + +static struct platform_driver sh_mmcif_driver = { + .probe = sh_mmcif_probe, + .remove = sh_mmcif_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init sh_mmcif_init(void) +{ + return platform_driver_register(&sh_mmcif_driver); +} + +static void __exit sh_mmcif_exit(void) +{ + platform_driver_unregister(&sh_mmcif_driver); +} + +module_init(sh_mmcif_init); +module_exit(sh_mmcif_exit); + + +MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS(DRIVER_NAME); +MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 82554ddec6b3..cec99958b652 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - return mmc_suspend_host(tifm_get_drvdata(sock), state); + return mmc_suspend_host(tifm_get_drvdata(sock)); } static int tifm_sd_resume(struct tifm_dev *sock) diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index b2b577f6afd4..ee7d0a5a51c4 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -29,6 +29,7 @@ #include <linux/irq.h> #include <linux/device.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/mmc/host.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> @@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) host->cmd = cmd; -/* FIXME - this seems to be ok comented out but the spec suggest this bit should - * be set when issuing app commands. +/* FIXME - this seems to be ok commented out but the spec suggest this bit + * should be set when issuing app commands. * if(cmd->flags & MMC_FLAG_ACMD) * c |= APP_CMD; */ @@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) return 0; } -/* This chip always returns (at least?) as much data as you ask for. +/* + * This chip always returns (at least?) as much data as you ask for. * I'm unsure what happens if you ask for less than a block. This should be * looked into to ensure that a funny length read doesnt hose the controller. - * */ -static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) +static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) { struct mmc_data *data = host->data; unsigned short *buf; @@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) count = data->blksz; pr_debug("count: %08x offset: %08x flags %08x\n", - count, host->sg_off, data->flags); + count, host->sg_off, data->flags); /* Transfer the data */ if (data->flags & MMC_DATA_READ) @@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) return; } -static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) +static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) { struct mmc_data *data = host->data; struct mmc_command *stop; @@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) host->data = NULL; if (!data) { - pr_debug("Spurious data end IRQ\n"); + dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); return; } stop = data->stop; @@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) pr_debug("Completed data request\n"); - /*FIXME - other drivers allow an optional stop command of any given type + /* + * FIXME: other drivers allow an optional stop command of any given type * which we dont do, as the chip can auto generate them. * Perhaps we can be smarter about when to use auto CMD12 and * only issue the auto request when we know this is the desired @@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) * upper layers expect. For now, we do what works. */ - if (data->flags & MMC_DATA_READ) - disable_mmc_irqs(host, TMIO_MASK_READOP); - else - disable_mmc_irqs(host, TMIO_MASK_WRITEOP); + if (data->flags & MMC_DATA_READ) { + if (!host->chan_rx) + disable_mmc_irqs(host, TMIO_MASK_READOP); + dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", + host->mrq); + } else { + if (!host->chan_tx) + disable_mmc_irqs(host, TMIO_MASK_WRITEOP); + dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", + host->mrq); + } if (stop) { if (stop->opcode == 12 && !stop->arg) @@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) tmio_mmc_finish_request(host); } -static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, +static void tmio_mmc_data_irq(struct tmio_mmc_host *host) +{ + struct mmc_data *data = host->data; + + if (!data) + return; + + if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { + /* + * Has all data been written out yet? Testing on SuperH showed, + * that in most cases the first interrupt comes already with the + * BUSY status bit clear, but on some operations, like mount or + * in the beginning of a write / sync / umount, there is one + * DATAEND interrupt with the BUSY bit set, in this cases + * waiting for one more interrupt fixes the problem. + */ + if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { + disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tasklet_schedule(&host->dma_complete); + } + } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { + disable_mmc_irqs(host, TMIO_STAT_DATAEND); + tasklet_schedule(&host->dma_complete); + } else { + tmio_mmc_do_data_irq(host); + } +} + +static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat) { struct mmc_command *cmd = host->cmd; @@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, * If theres no data or we encountered an error, finish now. */ if (host->data && !cmd->error) { - if (host->data->flags & MMC_DATA_READ) - enable_mmc_irqs(host, TMIO_MASK_READOP); - else - enable_mmc_irqs(host, TMIO_MASK_WRITEOP); + if (host->data->flags & MMC_DATA_READ) { + if (!host->chan_rx) + enable_mmc_irqs(host, TMIO_MASK_READOP); + } else { + struct dma_chan *chan = host->chan_tx; + if (!chan) + enable_mmc_irqs(host, TMIO_MASK_WRITEOP); + else + tasklet_schedule(&host->dma_issue); + } } else { tmio_mmc_finish_request(host); } @@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, return; } - static irqreturn_t tmio_mmc_irq(int irq, void *devid) { struct tmio_mmc_host *host = devid; @@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) if (!ireg) { disable_mmc_irqs(host, status & ~irq_mask); - pr_debug("tmio_mmc: Spurious irq, disabling! " + pr_warning("tmio_mmc: Spurious irq, disabling! " "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); pr_debug_status(status); @@ -363,16 +405,265 @@ out: return IRQ_HANDLED; } +#ifdef CONFIG_TMIO_MMC_DMA +static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) +{ +#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) + /* Switch DMA mode on or off - SuperH specific? */ + sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); +#endif +} + +static void tmio_dma_complete(void *arg) +{ + struct tmio_mmc_host *host = arg; + + dev_dbg(&host->pdev->dev, "Command completed\n"); + + if (!host->data) + dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); + else + enable_mmc_irqs(host, TMIO_STAT_DATAEND); +} + +static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) +{ + struct scatterlist *sg = host->sg_ptr; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_rx; + int ret; + + ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); + if (ret > 0) { + host->dma_sglen = ret; + desc = chan->device->device_prep_slave_sg(chan, sg, ret, + DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + host->desc = desc; + desc->callback = tmio_dma_complete; + desc->callback_param = host; + host->cookie = desc->tx_submit(desc); + if (host->cookie < 0) { + host->desc = NULL; + ret = host->cookie; + } else { + chan->device->device_issue_pending(chan); + } + } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", + __func__, host->sg_len, ret, host->cookie, host->mrq); + + if (!host->desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_rx = NULL; + dma_release_channel(chan); + /* Free the Tx channel too */ + chan = host->chan_tx; + if (chan) { + host->chan_tx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + tmio_mmc_enable_dma(host, false); + reset(host); + /* Fail this request, let above layers recover */ + host->mrq->cmd->error = ret; + tmio_mmc_finish_request(host); + } + + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, + desc, host->cookie, host->sg_len); + + return ret > 0 ? 0 : ret; +} + +static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) +{ + struct scatterlist *sg = host->sg_ptr; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan = host->chan_tx; + int ret; + + ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); + if (ret > 0) { + host->dma_sglen = ret; + desc = chan->device->device_prep_slave_sg(chan, sg, ret, + DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + host->desc = desc; + desc->callback = tmio_dma_complete; + desc->callback_param = host; + host->cookie = desc->tx_submit(desc); + if (host->cookie < 0) { + host->desc = NULL; + ret = host->cookie; + } + } + dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", + __func__, host->sg_len, ret, host->cookie, host->mrq); + + if (!host->desc) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = -EIO; + host->chan_tx = NULL; + dma_release_channel(chan); + /* Free the Rx channel too */ + chan = host->chan_rx; + if (chan) { + host->chan_rx = NULL; + dma_release_channel(chan); + } + dev_warn(&host->pdev->dev, + "DMA failed: %d, falling back to PIO\n", ret); + tmio_mmc_enable_dma(host, false); + reset(host); + /* Fail this request, let above layers recover */ + host->mrq->cmd->error = ret; + tmio_mmc_finish_request(host); + } + + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, + desc, host->cookie); + + return ret > 0 ? 0 : ret; +} + +static int tmio_mmc_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + if (data->flags & MMC_DATA_READ) { + if (host->chan_rx) + return tmio_mmc_start_dma_rx(host); + } else { + if (host->chan_tx) + return tmio_mmc_start_dma_tx(host); + } + + return 0; +} + +static void tmio_issue_tasklet_fn(unsigned long priv) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; + struct dma_chan *chan = host->chan_tx; + + chan->device->device_issue_pending(chan); +} + +static void tmio_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + if (host->data->flags & MMC_DATA_READ) + dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, + DMA_FROM_DEVICE); + else + dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, + DMA_TO_DEVICE); + + tmio_mmc_do_data_irq(host); +} + +/* It might be necessary to make filter MFD specific */ +static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) +{ + dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); + chan->private = arg; + return true; +} + +static void tmio_mmc_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + host->cookie = -EINVAL; + host->desc = NULL; + + /* We can only either use DMA for both Tx and Rx or not use it at all */ + if (pdata->dma) { + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, + pdata->dma->chan_priv_tx); + dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, + host->chan_tx); + + if (!host->chan_tx) + return; + + host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, + pdata->dma->chan_priv_rx); + dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, + host->chan_rx); + + if (!host->chan_rx) { + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; + return; + } + + tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); + tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); + + tmio_mmc_enable_dma(host, true); + } +} + +static void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + host->chan_rx = NULL; + dma_release_channel(chan); + } + + host->cookie = -EINVAL; + host->desc = NULL; +} +#else +static int tmio_mmc_start_dma(struct tmio_mmc_host *host, + struct mmc_data *data) +{ + return 0; +} + +static void tmio_mmc_request_dma(struct tmio_mmc_host *host, + struct tmio_mmc_data *pdata) +{ + host->chan_tx = NULL; + host->chan_rx = NULL; +} + +static void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ +} +#endif + static int tmio_mmc_start_data(struct tmio_mmc_host *host, struct mmc_data *data) { pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", - data->blksz, data->blocks); + data->blksz, data->blocks); /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { - printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n", - mmc_hostname(host->mmc), data->blksz); + pr_err("%s: %d byte block unsupported in 4 bit mode\n", + mmc_hostname(host->mmc), data->blksz); return -EINVAL; } @@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); - return 0; + return tmio_mmc_start_dma(host, data); } /* Process requests from the MMC layer */ @@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } ret = tmio_mmc_start_command(host, mrq->cmd); - if (!ret) return; @@ -458,11 +748,14 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) static int tmio_mmc_get_ro(struct mmc_host *mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); + struct mfd_cell *cell = host->pdev->dev.platform_data; + struct tmio_mmc_data *pdata = cell->driver_data; - return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1; + return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || + (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; } -static struct mmc_host_ops tmio_mmc_ops = { +static const struct mmc_host_ops tmio_mmc_ops = { .request = tmio_mmc_request, .set_ios = tmio_mmc_set_ios, .get_ro = tmio_mmc_get_ro, @@ -475,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) struct mmc_host *mmc = platform_get_drvdata(dev); int ret; - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); /* Tell MFD core it can disable us now.*/ if (!ret && cell->disable) @@ -515,6 +808,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) struct tmio_mmc_host *host; struct mmc_host *mmc; int ret = -EINVAL; + u32 irq_mask = TMIO_MASK_CMD; if (dev->num_resources != 2) goto out; @@ -553,7 +847,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) mmc->caps |= pdata->capabilities; mmc->f_max = pdata->hclk; mmc->f_min = mmc->f_max / 512; - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + if (pdata->ocr_mask) + mmc->ocr_avail = pdata->ocr_mask; + else + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; /* Tell the MFD core we are ready to be enabled */ if (cell->enable) { @@ -578,13 +875,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) if (ret) goto cell_disable; + /* See if we also get DMA */ + tmio_mmc_request_dma(host, pdata); + mmc_add_host(mmc); - printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), - (unsigned long)host->ctl, host->irq); + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), + (unsigned long)host->ctl, host->irq); /* Unmask the IRQs we want to know about */ - enable_mmc_irqs(host, TMIO_MASK_IRQ); + if (!host->chan_rx) + irq_mask |= TMIO_MASK_READOP; + if (!host->chan_tx) + irq_mask |= TMIO_MASK_WRITEOP; + enable_mmc_irqs(host, irq_mask); return 0; @@ -609,6 +913,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev) if (mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); mmc_remove_host(mmc); + tmio_mmc_release_dma(host); free_irq(host->irq, host); if (cell->disable) cell->disable(dev); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index dafecfbcd91a..64f7d5dfc106 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -10,6 +10,8 @@ */ #include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/dmaengine.h> #define CTL_SD_CMD 0x00 #define CTL_ARG_REG 0x04 @@ -106,6 +108,17 @@ struct tmio_mmc_host { unsigned int sg_off; struct platform_device *pdev; + + /* DMA support */ + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + struct tasklet_struct dma_complete; + struct tasklet_struct dma_issue; +#ifdef CONFIG_TMIO_MMC_DMA + struct dma_async_tx_descriptor *desc; + unsigned int dma_sglen; + dma_cookie_t cookie; +#endif }; #include <linux/io.h> diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 632858a94376..19f2d72dbca5 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) via_save_pcictrlreg(host); via_save_sdcreg(host); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 69efe01eece8..0012f5d13d28 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) { BUG_ON(host == NULL); - return mmc_suspend_host(host->mmc, state); + return mmc_suspend_host(host->mmc); } static int wbsd_resume(struct wbsd_host *host) diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 36dbcee1ac29..ba124baa646d 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -143,7 +143,7 @@ static int of_flash_remove(struct of_device *dev) static struct mtd_info * __devinit obsolete_probe(struct of_device *dev, struct map_info *map) { - struct device_node *dp = dev->node; + struct device_node *dp = dev->dev.of_node; const char *of_probe; struct mtd_info *mtd; static const char *rom_probe_types[] @@ -221,7 +221,7 @@ static int __devinit of_flash_probe(struct of_device *dev, #ifdef CONFIG_MTD_PARTITIONS const char **part_probe_types; #endif - struct device_node *dp = dev->node; + struct device_node *dp = dev->dev.of_node; struct resource res; struct of_flash *info; const char *probe_type = match->data; @@ -245,7 +245,7 @@ static int __devinit of_flash_probe(struct of_device *dev, p = of_get_property(dp, "reg", &count); if (count % reg_tuple_size != 0) { dev_err(&dev->dev, "Malformed reg property on %s\n", - dev->node->full_name); + dev->dev.of_node->full_name); err = -EINVAL; goto err_flash_remove; } @@ -418,8 +418,11 @@ static struct of_device_id of_flash_match[] = { MODULE_DEVICE_TABLE(of, of_flash_match); static struct of_platform_driver of_flash_driver = { - .name = "of-flash", - .match_table = of_flash_match, + .driver = { + .name = "of-flash", + .owner = THIS_MODULE, + .of_match_table = of_flash_match, + }, .probe = of_flash_probe, .remove = of_flash_remove, }; diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index fadc4c45b455..0391c2527bd7 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -110,7 +110,7 @@ int uflash_devinit(struct of_device *op, struct device_node *dp) static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; /* Flashprom must have the "user" property in order to * be used by this driver. @@ -149,8 +149,11 @@ static const struct of_device_id uflash_match[] = { MODULE_DEVICE_TABLE(of, uflash_match); static struct of_platform_driver uflash_driver = { - .name = DRIVER_NAME, - .match_table = uflash_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = uflash_match, + }, .probe = uflash_probe, .remove = __devexit_p(uflash_remove), }; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 8bb5e4a66328..000d65ea55a4 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -468,8 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, return ret; } -static int mtd_ioctl(struct inode *inode, struct file *file, - u_int cmd, u_long arg) +static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) { struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; @@ -840,6 +839,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file, return ret; } /* memory_ioctl */ +static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) +{ + int ret; + + lock_kernel(); + ret = mtd_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + #ifdef CONFIG_COMPAT struct mtd_oob_buf32 { @@ -854,7 +864,6 @@ struct mtd_oob_buf32 { static long mtd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; void __user *argp = compat_ptr(arg); @@ -892,7 +901,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd, break; } default: - ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp); + ret = mtd_ioctl(file, cmd, (unsigned long)argp); } unlock_kernel(); @@ -960,7 +969,7 @@ static const struct file_operations mtd_fops = { .llseek = mtd_lseek, .read = mtd_read, .write = mtd_write, - .ioctl = mtd_ioctl, + .unlocked_ioctl = mtd_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mtd_compat_ioctl, #endif diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 3f38fb8e6666..5084cc517944 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -1030,14 +1030,14 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev, init_waitqueue_head(&ctrl->controller.wq); init_waitqueue_head(&ctrl->irq_wait); - ctrl->regs = of_iomap(ofdev->node, 0); + ctrl->regs = of_iomap(ofdev->dev.of_node, 0); if (!ctrl->regs) { dev_err(&ofdev->dev, "failed to get memory region\n"); ret = -ENODEV; goto err; } - ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL); + ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (ctrl->irq == NO_IRQ) { dev_err(&ofdev->dev, "failed to get irq resource\n"); ret = -ENODEV; @@ -1058,7 +1058,7 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev, goto err; } - for_each_child_of_node(ofdev->node, child) + for_each_child_of_node(ofdev->dev.of_node, child) if (of_device_is_compatible(child, "fsl,elbc-fcm-nand")) fsl_elbc_chip_probe(ctrl, child); @@ -1078,9 +1078,10 @@ static const struct of_device_id fsl_elbc_match[] = { static struct of_platform_driver fsl_elbc_ctrl_driver = { .driver = { - .name = "fsl-elbc", + .name = "fsl-elbc", + .owner = THIS_MODULE, + .of_match_table = fsl_elbc_match, }, - .match_table = fsl_elbc_match, .probe = fsl_elbc_ctrl_probe, .remove = fsl_elbc_ctrl_remove, }; diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 2d215ccb564d..00aea6f7d1f1 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c @@ -360,8 +360,11 @@ static const struct of_device_id of_fun_match[] = { MODULE_DEVICE_TABLE(of, of_fun_match); static struct of_platform_driver of_fun_driver = { - .name = "fsl,upm-nand", - .match_table = of_fun_match, + .driver = { + .name = "fsl,upm-nand", + .owner = THIS_MODULE, + .of_match_table = of_fun_match, + }, .probe = fun_probe, .remove = __devexit_p(fun_remove), }; diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index b983cae8c298..98fd2bdf8be1 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c @@ -239,14 +239,14 @@ static int __devinit ndfc_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, ndfc); /* Read the reg property to get the chip select */ - reg = of_get_property(ofdev->node, "reg", &len); + reg = of_get_property(ofdev->dev.of_node, "reg", &len); if (reg == NULL || len != 12) { dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); return -ENOENT; } ndfc->chip_select = reg[0]; - ndfc->ndfcbase = of_iomap(ofdev->node, 0); + ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0); if (!ndfc->ndfcbase) { dev_err(&ofdev->dev, "failed to get memory\n"); return -EIO; @@ -255,20 +255,20 @@ static int __devinit ndfc_probe(struct of_device *ofdev, ccr = NDFC_CCR_BS(ndfc->chip_select); /* It is ok if ccr does not exist - just default to 0 */ - reg = of_get_property(ofdev->node, "ccr", NULL); + reg = of_get_property(ofdev->dev.of_node, "ccr", NULL); if (reg) ccr |= *reg; out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); /* Set the bank settings if given */ - reg = of_get_property(ofdev->node, "bank-settings", NULL); + reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL); if (reg) { int offset = NDFC_BCFG0 + (ndfc->chip_select << 2); out_be32(ndfc->ndfcbase + offset, *reg); } - err = ndfc_chip_init(ndfc, ofdev->node); + err = ndfc_chip_init(ndfc, ofdev->dev.of_node); if (err) { iounmap(ndfc->ndfcbase); return err; @@ -294,9 +294,10 @@ MODULE_DEVICE_TABLE(of, ndfc_match); static struct of_platform_driver ndfc_driver = { .driver = { - .name = "ndfc", + .name = "ndfc", + .owner = THIS_MODULE, + .of_match_table = ndfc_match, }, - .match_table = ndfc_match, .probe = ndfc_probe, .remove = __devexit_p(ndfc_remove), }; diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 090a05c12cbe..f02af24d033a 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -93,7 +93,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev, const struct of_device_id *match) { struct pci_dev *pdev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct resource res; struct nand_chip *chip; int err = 0; @@ -221,8 +221,11 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match); static struct of_platform_driver pasemi_nand_driver = { - .name = (char*)driver_name, - .match_table = pasemi_nand_match, + .driver = { + .name = (char*)driver_name, + .owner = THIS_MODULE, + .of_match_table = pasemi_nand_match, + }, .probe = pasemi_nand_probe, .remove = pasemi_nand_remove, }; diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index b37cbde6e7db..884852dc7eb4 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -301,8 +301,11 @@ static const struct of_device_id socrates_nand_match[] = MODULE_DEVICE_TABLE(of, socrates_nand_match); static struct of_platform_driver socrates_nand_driver = { - .name = "socrates_nand", - .match_table = socrates_nand_match, + .driver = { + .name = "socrates_nand", + .owner = THIS_MODULE, + .of_match_table = socrates_nand_match, + }, .probe = socrates_nand_probe, .remove = __devexit_p(socrates_nand_remove), }; diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 72ebb3f06b86..4dfa6b90c21c 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) return new_offset; } -static int vol_cdev_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int vol_cdev_fsync(struct file *file, int datasync) { struct ubi_volume_desc *desc = file->private_data; struct ubi_device *ubi = desc->vol->ubi; diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 82eaf65d2d85..ea9b7a098c9b 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) void __iomem *shmem; if (dev == NULL) { - pr_err("%s: net_interrupt(): irq %d for unknown device.\n", - dev->name, irq); + pr_err("net_interrupt(): irq %d for unknown device.\n", irq); return IRQ_NONE; } diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 373c1a563474..b46be490cd2a 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -283,6 +283,8 @@ struct be_adapter { u8 port_type; u8 transceiver; u8 generation; /* BladeEngine ASIC generation */ + u32 flash_status; + struct completion flash_compl; bool sriov_enabled; u32 vf_if_handle[BE_MAX_VF]; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index e79bf8b9af3b..9d11dbf5e4da 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -59,6 +59,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter, compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; + + if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && + (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { + adapter->flash_status = compl_status; + complete(&adapter->flash_compl); + } + if (compl_status == MCC_STATUS_SUCCESS) { if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { struct be_cmd_resp_get_stats *resp = @@ -287,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter) } else { return 0; } - } while (timeout < 20); + } while (timeout < 40); dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); return -1; @@ -1417,6 +1424,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, int status; spin_lock_bh(&adapter->mcc_lock); + adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1428,6 +1436,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, be_wrb_hdr_prepare(wrb, cmd->size, false, 1, OPCODE_COMMON_WRITE_FLASHROM); + wrb->tag1 = CMD_SUBSYSTEM_COMMON; be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_FLASHROM, cmd->size); @@ -1439,10 +1448,16 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); - status = be_mcc_notify_wait(adapter); + be_mcc_notify(adapter); + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->flash_compl, + msecs_to_jiffies(12000))) + status = -1; + else + status = adapter->flash_status; err: - spin_unlock_bh(&adapter->mcc_lock); return status; } diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 058d7f95f5ae..54b14272f333 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter) goto if_destroy; } vf++; - } while (vf < num_vfs); + } } else if (!be_physfn(adapter)) { status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); @@ -2319,6 +2319,7 @@ static int be_ctrl_init(struct be_adapter *adapter) spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); + init_completion(&adapter->flash_compl); pci_save_state(adapter->pdev); return 0; @@ -2487,10 +2488,6 @@ static int __devinit be_probe(struct pci_dev *pdev, status = be_cmd_POST(adapter); if (status) goto ctrl_clean; - - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; } /* tell fw we're ready to fire cmds */ @@ -2498,6 +2495,12 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto ctrl_clean; + if (be_physfn(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + goto ctrl_clean; + } + status = be_stats_init(adapter); if (status) goto ctrl_clean; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 39a54bad397f..368f33313fb6 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -1626,6 +1626,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) return 0; out_err_mdiobus_register: + kfree(miibus->irq); mdiobus_free(miibus); out_err_alloc: peripheral_free_list(pin_req); @@ -1638,6 +1639,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev) struct mii_bus *miibus = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); mdiobus_unregister(miibus); + kfree(miibus->irq); mdiobus_free(miibus); peripheral_free_list(pin_req); return 0; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 05b751719bd5..2c5227c02fa0 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -63,6 +63,16 @@ config CAN_BFIN To compile this driver as a module, choose M here: the module will be called bfin_can. +config CAN_JANZ_ICAN3 + tristate "Janz VMOD-ICAN3 Intelligent CAN controller" + depends on CAN_DEV && MFD_JANZ_CMODIO + ---help--- + Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which + connects to a MODULbus carrier board. + + This driver can also be built as a module. If so, the module will be + called janz-ican3.ko. + source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/sja1000/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 7a702f28d01c..9047cd066fea 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o obj-$(CONFIG_CAN_BFIN) += bfin_can.o +obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c new file mode 100644 index 000000000000..6e533dcc36c0 --- /dev/null +++ b/drivers/net/can/janz-ican3.c @@ -0,0 +1,1830 @@ +/* + * Janz MODULbus VMOD-ICAN3 CAN Interface Driver + * + * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/platform_device.h> + +#include <linux/netdevice.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> + +#include <linux/mfd/janz.h> + +/* the DPM has 64k of memory, organized into 256x 256 byte pages */ +#define DPM_NUM_PAGES 256 +#define DPM_PAGE_SIZE 256 +#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE) + +/* JANZ ICAN3 "old-style" host interface queue page numbers */ +#define QUEUE_OLD_CONTROL 0 +#define QUEUE_OLD_RB0 1 +#define QUEUE_OLD_RB1 2 +#define QUEUE_OLD_WB0 3 +#define QUEUE_OLD_WB1 4 + +/* Janz ICAN3 "old-style" host interface control registers */ +#define MSYNC_PEER 0x00 /* ICAN only */ +#define MSYNC_LOCL 0x01 /* host only */ +#define TARGET_RUNNING 0x02 + +#define MSYNC_RB0 0x01 +#define MSYNC_RB1 0x02 +#define MSYNC_RBLW 0x04 +#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1) + +#define MSYNC_WB0 0x10 +#define MSYNC_WB1 0x20 +#define MSYNC_WBLW 0x40 +#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1) + +/* Janz ICAN3 "new-style" host interface queue page numbers */ +#define QUEUE_TOHOST 5 +#define QUEUE_FROMHOST_MID 6 +#define QUEUE_FROMHOST_HIGH 7 +#define QUEUE_FROMHOST_LOW 8 + +/* The first free page in the DPM is #9 */ +#define DPM_FREE_START 9 + +/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */ +#define DESC_VALID 0x80 +#define DESC_WRAP 0x40 +#define DESC_INTERRUPT 0x20 +#define DESC_IVALID 0x10 +#define DESC_LEN(len) (len) + +/* Janz ICAN3 Firmware Messages */ +#define MSG_CONNECTI 0x02 +#define MSG_DISCONNECT 0x03 +#define MSG_IDVERS 0x04 +#define MSG_MSGLOST 0x05 +#define MSG_NEWHOSTIF 0x08 +#define MSG_INQUIRY 0x0a +#define MSG_SETAFILMASK 0x10 +#define MSG_INITFDPMQUEUE 0x11 +#define MSG_HWCONF 0x12 +#define MSG_FMSGLOST 0x15 +#define MSG_CEVTIND 0x37 +#define MSG_CBTRREQ 0x41 +#define MSG_COFFREQ 0x42 +#define MSG_CONREQ 0x43 +#define MSG_CCONFREQ 0x47 + +/* + * Janz ICAN3 CAN Inquiry Message Types + * + * NOTE: there appears to be a firmware bug here. You must send + * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED + * NOTE: response. The controller never responds to a message with + * NOTE: the INQUIRY_EXTENDED subspec :( + */ +#define INQUIRY_STATUS 0x00 +#define INQUIRY_TERMINATION 0x01 +#define INQUIRY_EXTENDED 0x04 + +/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */ +#define SETAFILMASK_REJECT 0x00 +#define SETAFILMASK_FASTIF 0x02 + +/* Janz ICAN3 CAN Hardware Configuration Message Types */ +#define HWCONF_TERMINATE_ON 0x01 +#define HWCONF_TERMINATE_OFF 0x00 + +/* Janz ICAN3 CAN Event Indication Message Types */ +#define CEVTIND_EI 0x01 +#define CEVTIND_DOI 0x02 +#define CEVTIND_LOST 0x04 +#define CEVTIND_FULL 0x08 +#define CEVTIND_BEI 0x10 + +#define CEVTIND_CHIP_SJA1000 0x02 + +#define ICAN3_BUSERR_QUOTA_MAX 255 + +/* Janz ICAN3 CAN Frame Conversion */ +#define ICAN3_ECHO 0x10 +#define ICAN3_EFF_RTR 0x40 +#define ICAN3_SFF_RTR 0x10 +#define ICAN3_EFF 0x80 + +#define ICAN3_CAN_TYPE_MASK 0x0f +#define ICAN3_CAN_TYPE_SFF 0x00 +#define ICAN3_CAN_TYPE_EFF 0x01 + +#define ICAN3_CAN_DLC_MASK 0x0f + +/* + * SJA1000 Status and Error Register Definitions + * + * Copied from drivers/net/can/sja1000/sja1000.h + */ + +/* status register content */ +#define SR_BS 0x80 +#define SR_ES 0x40 +#define SR_TS 0x20 +#define SR_RS 0x10 +#define SR_TCS 0x08 +#define SR_TBS 0x04 +#define SR_DOS 0x02 +#define SR_RBS 0x01 + +#define SR_CRIT (SR_BS|SR_ES) + +/* ECC register */ +#define ECC_SEG 0x1F +#define ECC_DIR 0x20 +#define ECC_ERR 6 +#define ECC_BIT 0x00 +#define ECC_FORM 0x40 +#define ECC_STUFF 0x80 +#define ECC_MASK 0xc0 + +/* Number of buffers for use in the "new-style" host interface */ +#define ICAN3_NEW_BUFFERS 16 + +/* Number of buffers for use in the "fast" host interface */ +#define ICAN3_TX_BUFFERS 512 +#define ICAN3_RX_BUFFERS 1024 + +/* SJA1000 Clock Input */ +#define ICAN3_CAN_CLOCK 8000000 + +/* Driver Name */ +#define DRV_NAME "janz-ican3" + +/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */ +struct ican3_dpm_control { + /* window address register */ + u8 window_address; + u8 unused1; + + /* + * Read access: clear interrupt from microcontroller + * Write access: send interrupt to microcontroller + */ + u8 interrupt; + u8 unused2; + + /* write-only: reset all hardware on the module */ + u8 hwreset; + u8 unused3; + + /* write-only: generate an interrupt to the TPU */ + u8 tpuinterrupt; +}; + +struct ican3_dev { + + /* must be the first member */ + struct can_priv can; + + /* CAN network device */ + struct net_device *ndev; + struct napi_struct napi; + + /* Device for printing */ + struct device *dev; + + /* module number */ + unsigned int num; + + /* base address of registers and IRQ */ + struct janz_cmodio_onboard_regs __iomem *ctrl; + struct ican3_dpm_control __iomem *dpmctrl; + void __iomem *dpm; + int irq; + + /* CAN bus termination status */ + struct completion termination_comp; + bool termination_enabled; + + /* CAN bus error status registers */ + struct completion buserror_comp; + struct can_berr_counter bec; + + /* old and new style host interface */ + unsigned int iftype; + + /* + * Any function which changes the current DPM page must hold this + * lock while it is performing data accesses. This ensures that the + * function will not be preempted and end up reading data from a + * different DPM page than it expects. + */ + spinlock_t lock; + + /* new host interface */ + unsigned int rx_int; + unsigned int rx_num; + unsigned int tx_num; + + /* fast host interface */ + unsigned int fastrx_start; + unsigned int fastrx_int; + unsigned int fastrx_num; + unsigned int fasttx_start; + unsigned int fasttx_num; + + /* first free DPM page */ + unsigned int free_page; +}; + +struct ican3_msg { + u8 control; + u8 spec; + __le16 len; + u8 data[252]; +}; + +struct ican3_new_desc { + u8 control; + u8 pointer; +}; + +struct ican3_fast_desc { + u8 control; + u8 command; + u8 data[14]; +}; + +/* write to the window basic address register */ +static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page) +{ + BUG_ON(page >= DPM_NUM_PAGES); + iowrite8(page, &mod->dpmctrl->window_address); +} + +/* + * ICAN3 "old-style" host interface + */ + +/* + * Recieve a message from the ICAN3 "old-style" firmware interface + * + * LOCKING: must hold mod->lock + * + * returns 0 on success, -ENOMEM when no message exists + */ +static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned int mbox, mbox_page; + u8 locl, peer, xord; + + /* get the MSYNC registers */ + ican3_set_page(mod, QUEUE_OLD_CONTROL); + peer = ioread8(mod->dpm + MSYNC_PEER); + locl = ioread8(mod->dpm + MSYNC_LOCL); + xord = locl ^ peer; + + if ((xord & MSYNC_RB_MASK) == 0x00) { + dev_dbg(mod->dev, "no mbox for reading\n"); + return -ENOMEM; + } + + /* find the first free mbox to read */ + if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK) + mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1; + else + mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1; + + /* copy the message */ + mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1; + ican3_set_page(mod, mbox_page); + memcpy_fromio(msg, mod->dpm, sizeof(*msg)); + + /* + * notify the firmware that the read buffer is available + * for it to fill again + */ + locl ^= mbox; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + iowrite8(locl, mod->dpm + MSYNC_LOCL); + return 0; +} + +/* + * Send a message through the "old-style" firmware interface + * + * LOCKING: must hold mod->lock + * + * returns 0 on success, -ENOMEM when no free space exists + */ +static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned int mbox, mbox_page; + u8 locl, peer, xord; + + /* get the MSYNC registers */ + ican3_set_page(mod, QUEUE_OLD_CONTROL); + peer = ioread8(mod->dpm + MSYNC_PEER); + locl = ioread8(mod->dpm + MSYNC_LOCL); + xord = locl ^ peer; + + if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { + dev_err(mod->dev, "no mbox for writing\n"); + return -ENOMEM; + } + + /* calculate a free mbox to use */ + mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0; + + /* copy the message to the DPM */ + mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1; + ican3_set_page(mod, mbox_page); + memcpy_toio(mod->dpm, msg, sizeof(*msg)); + + locl ^= mbox; + if (mbox == MSYNC_WB1) + locl |= MSYNC_WBLW; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + iowrite8(locl, mod->dpm + MSYNC_LOCL); + return 0; +} + +/* + * ICAN3 "new-style" Host Interface Setup + */ + +static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod) +{ + struct ican3_new_desc desc; + unsigned long flags; + void __iomem *dst; + int i; + + spin_lock_irqsave(&mod->lock, flags); + + /* setup the internal datastructures for RX */ + mod->rx_num = 0; + mod->rx_int = 0; + + /* tohost queue descriptors are in page 5 */ + ican3_set_page(mod, QUEUE_TOHOST); + dst = mod->dpm; + + /* initialize the tohost (rx) queue descriptors: pages 9-24 */ + for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { + desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */ + desc.pointer = mod->free_page; + + /* set wrap flag on last buffer */ + if (i == ICAN3_NEW_BUFFERS - 1) + desc.control |= DESC_WRAP; + + memcpy_toio(dst, &desc, sizeof(desc)); + dst += sizeof(desc); + mod->free_page++; + } + + /* fromhost (tx) mid queue descriptors are in page 6 */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + dst = mod->dpm; + + /* setup the internal datastructures for TX */ + mod->tx_num = 0; + + /* initialize the fromhost mid queue descriptors: pages 25-40 */ + for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { + desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */ + desc.pointer = mod->free_page; + + /* set wrap flag on last buffer */ + if (i == ICAN3_NEW_BUFFERS - 1) + desc.control |= DESC_WRAP; + + memcpy_toio(dst, &desc, sizeof(desc)); + dst += sizeof(desc); + mod->free_page++; + } + + /* fromhost hi queue descriptors are in page 7 */ + ican3_set_page(mod, QUEUE_FROMHOST_HIGH); + dst = mod->dpm; + + /* initialize only a single buffer in the fromhost hi queue (unused) */ + desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ + desc.pointer = mod->free_page; + memcpy_toio(dst, &desc, sizeof(desc)); + mod->free_page++; + + /* fromhost low queue descriptors are in page 8 */ + ican3_set_page(mod, QUEUE_FROMHOST_LOW); + dst = mod->dpm; + + /* initialize only a single buffer in the fromhost low queue (unused) */ + desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ + desc.pointer = mod->free_page; + memcpy_toio(dst, &desc, sizeof(desc)); + mod->free_page++; + + spin_unlock_irqrestore(&mod->lock, flags); +} + +/* + * ICAN3 Fast Host Interface Setup + */ + +static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod) +{ + struct ican3_fast_desc desc; + unsigned long flags; + unsigned int addr; + void __iomem *dst; + int i; + + spin_lock_irqsave(&mod->lock, flags); + + /* save the start recv page */ + mod->fastrx_start = mod->free_page; + mod->fastrx_num = 0; + mod->fastrx_int = 0; + + /* build a single fast tohost queue descriptor */ + memset(&desc, 0, sizeof(desc)); + desc.control = 0x00; + desc.command = 1; + + /* build the tohost queue descriptor ring in memory */ + addr = 0; + for (i = 0; i < ICAN3_RX_BUFFERS; i++) { + + /* set the wrap bit on the last buffer */ + if (i == ICAN3_RX_BUFFERS - 1) + desc.control |= DESC_WRAP; + + /* switch to the correct page */ + ican3_set_page(mod, mod->free_page); + + /* copy the descriptor to the DPM */ + dst = mod->dpm + addr; + memcpy_toio(dst, &desc, sizeof(desc)); + addr += sizeof(desc); + + /* move to the next page if necessary */ + if (addr >= DPM_PAGE_SIZE) { + addr = 0; + mod->free_page++; + } + } + + /* make sure we page-align the next queue */ + if (addr != 0) + mod->free_page++; + + /* save the start xmit page */ + mod->fasttx_start = mod->free_page; + mod->fasttx_num = 0; + + /* build a single fast fromhost queue descriptor */ + memset(&desc, 0, sizeof(desc)); + desc.control = DESC_VALID; + desc.command = 1; + + /* build the fromhost queue descriptor ring in memory */ + addr = 0; + for (i = 0; i < ICAN3_TX_BUFFERS; i++) { + + /* set the wrap bit on the last buffer */ + if (i == ICAN3_TX_BUFFERS - 1) + desc.control |= DESC_WRAP; + + /* switch to the correct page */ + ican3_set_page(mod, mod->free_page); + + /* copy the descriptor to the DPM */ + dst = mod->dpm + addr; + memcpy_toio(dst, &desc, sizeof(desc)); + addr += sizeof(desc); + + /* move to the next page if necessary */ + if (addr >= DPM_PAGE_SIZE) { + addr = 0; + mod->free_page++; + } + } + + spin_unlock_irqrestore(&mod->lock, flags); +} + +/* + * ICAN3 "new-style" Host Interface Message Helpers + */ + +/* + * LOCKING: must hold mod->lock + */ +static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct ican3_new_desc desc; + void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc)); + + /* switch to the fromhost mid queue, and read the buffer descriptor */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + if (!(desc.control & DESC_VALID)) { + dev_dbg(mod->dev, "%s: no free buffers\n", __func__); + return -ENOMEM; + } + + /* switch to the data page, copy the data */ + ican3_set_page(mod, desc.pointer); + memcpy_toio(mod->dpm, msg, sizeof(*msg)); + + /* switch back to the descriptor, set the valid bit, write it back */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the tx number */ + mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1); + return 0; +} + +/* + * LOCKING: must hold mod->lock + */ +static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct ican3_new_desc desc; + void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc)); + + /* switch to the tohost queue, and read the buffer descriptor */ + ican3_set_page(mod, QUEUE_TOHOST); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + if (!(desc.control & DESC_VALID)) { + dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__); + return -ENOMEM; + } + + /* switch to the data page, copy the data */ + ican3_set_page(mod, desc.pointer); + memcpy_fromio(msg, mod->dpm, sizeof(*msg)); + + /* switch back to the descriptor, toggle the valid bit, write it back */ + ican3_set_page(mod, QUEUE_TOHOST); + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the rx number */ + mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1); + return 0; +} + +/* + * Message Send / Recv Helpers + */ + +static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mod->lock, flags); + + if (mod->iftype == 0) + ret = ican3_old_send_msg(mod, msg); + else + ret = ican3_new_send_msg(mod, msg); + + spin_unlock_irqrestore(&mod->lock, flags); + return ret; +} + +static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mod->lock, flags); + + if (mod->iftype == 0) + ret = ican3_old_recv_msg(mod, msg); + else + ret = ican3_new_recv_msg(mod, msg); + + spin_unlock_irqrestore(&mod->lock, flags); + return ret; +} + +/* + * Quick Pre-constructed Messages + */ + +static int __devinit ican3_msg_connect(struct ican3_dev *mod) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CONNECTI; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int __devexit ican3_msg_disconnect(struct ican3_dev *mod) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_DISCONNECT; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int __devinit ican3_msg_newhostif(struct ican3_dev *mod) +{ + struct ican3_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_NEWHOSTIF; + msg.len = cpu_to_le16(0); + + /* If we're not using the old interface, switching seems bogus */ + WARN_ON(mod->iftype != 0); + + ret = ican3_send_msg(mod, &msg); + if (ret) + return ret; + + /* mark the module as using the new host interface */ + mod->iftype = 1; + return 0; +} + +static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod) +{ + struct ican3_msg msg; + unsigned int addr; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_INITFDPMQUEUE; + msg.len = cpu_to_le16(8); + + /* write the tohost queue start address */ + addr = DPM_PAGE_ADDR(mod->fastrx_start); + msg.data[0] = addr & 0xff; + msg.data[1] = (addr >> 8) & 0xff; + msg.data[2] = (addr >> 16) & 0xff; + msg.data[3] = (addr >> 24) & 0xff; + + /* write the fromhost queue start address */ + addr = DPM_PAGE_ADDR(mod->fasttx_start); + msg.data[4] = addr & 0xff; + msg.data[5] = (addr >> 8) & 0xff; + msg.data[6] = (addr >> 16) & 0xff; + msg.data[7] = (addr >> 24) & 0xff; + + /* If we're not using the new interface yet, we cannot do this */ + WARN_ON(mod->iftype != 1); + + return ican3_send_msg(mod, &msg); +} + +/* + * Setup the CAN filter to either accept or reject all + * messages from the CAN bus. + */ +static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept) +{ + struct ican3_msg msg; + int ret; + + /* Standard Frame Format */ + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_SETAFILMASK; + msg.len = cpu_to_le16(5); + msg.data[0] = 0x00; /* IDLo LSB */ + msg.data[1] = 0x00; /* IDLo MSB */ + msg.data[2] = 0xff; /* IDHi LSB */ + msg.data[3] = 0x07; /* IDHi MSB */ + + /* accept all frames for fast host if, or reject all frames */ + msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; + + ret = ican3_send_msg(mod, &msg); + if (ret) + return ret; + + /* Extended Frame Format */ + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_SETAFILMASK; + msg.len = cpu_to_le16(13); + msg.data[0] = 0; /* MUX = 0 */ + msg.data[1] = 0x00; /* IDLo LSB */ + msg.data[2] = 0x00; + msg.data[3] = 0x00; + msg.data[4] = 0x20; /* IDLo MSB */ + msg.data[5] = 0xff; /* IDHi LSB */ + msg.data[6] = 0xff; + msg.data[7] = 0xff; + msg.data[8] = 0x3f; /* IDHi MSB */ + + /* accept all frames for fast host if, or reject all frames */ + msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; + + return ican3_send_msg(mod, &msg); +} + +/* + * Bring the CAN bus online or offline + */ +static int ican3_set_bus_state(struct ican3_dev *mod, bool on) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_termination(struct ican3_dev *mod, bool on) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_HWCONF; + msg.len = cpu_to_le16(2); + msg.data[0] = 0x00; + msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_INQUIRY; + msg.len = cpu_to_le16(2); + msg.data[0] = subspec; + msg.data[1] = 0x00; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CCONFREQ; + msg.len = cpu_to_le16(2); + msg.data[0] = 0x00; + msg.data[1] = quota; + + return ican3_send_msg(mod, &msg); +} + +/* + * ICAN3 to Linux CAN Frame Conversion + */ + +static void ican3_to_can_frame(struct ican3_dev *mod, + struct ican3_fast_desc *desc, + struct can_frame *cf) +{ + if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) { + if (desc->data[1] & ICAN3_SFF_RTR) + cf->can_id |= CAN_RTR_FLAG; + + cf->can_id |= desc->data[0] << 3; + cf->can_id |= (desc->data[1] & 0xe0) >> 5; + cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK; + memcpy(cf->data, &desc->data[2], sizeof(cf->data)); + } else { + cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK; + if (desc->data[0] & ICAN3_EFF_RTR) + cf->can_id |= CAN_RTR_FLAG; + + if (desc->data[0] & ICAN3_EFF) { + cf->can_id |= CAN_EFF_FLAG; + cf->can_id |= desc->data[2] << 21; /* 28-21 */ + cf->can_id |= desc->data[3] << 13; /* 20-13 */ + cf->can_id |= desc->data[4] << 5; /* 12-5 */ + cf->can_id |= (desc->data[5] & 0xf8) >> 3; + } else { + cf->can_id |= desc->data[2] << 3; /* 10-3 */ + cf->can_id |= desc->data[3] >> 5; /* 2-0 */ + } + + memcpy(cf->data, &desc->data[6], sizeof(cf->data)); + } +} + +static void can_frame_to_ican3(struct ican3_dev *mod, + struct can_frame *cf, + struct ican3_fast_desc *desc) +{ + /* clear out any stale data in the descriptor */ + memset(desc->data, 0, sizeof(desc->data)); + + /* we always use the extended format, with the ECHO flag set */ + desc->command = ICAN3_CAN_TYPE_EFF; + desc->data[0] |= cf->can_dlc; + desc->data[1] |= ICAN3_ECHO; + + if (cf->can_id & CAN_RTR_FLAG) + desc->data[0] |= ICAN3_EFF_RTR; + + /* pack the id into the correct places */ + if (cf->can_id & CAN_EFF_FLAG) { + desc->data[0] |= ICAN3_EFF; + desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */ + desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */ + desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */ + desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */ + } else { + desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */ + desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */ + } + + /* copy the data bits into the descriptor */ + memcpy(&desc->data[6], cf->data, sizeof(cf->data)); +} + +/* + * Interrupt Handling + */ + +/* + * Handle an ID + Version message response from the firmware. We never generate + * this message in production code, but it is very useful when debugging to be + * able to display this message. + */ +static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) +{ + dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data); +} + +static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct net_device *dev = mod->ndev; + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* + * Report that communication messages with the microcontroller firmware + * are being lost. These are never CAN frames, so we do not generate an + * error frame for userspace + */ + if (msg->spec == MSG_MSGLOST) { + dev_err(mod->dev, "lost %d control messages\n", msg->data[0]); + return; + } + + /* + * Oops, this indicates that we have lost messages in the fast queue, + * which are exclusively CAN messages. Our driver isn't reading CAN + * frames fast enough. + * + * We'll pretend that the SJA1000 told us that it ran out of buffer + * space, because there is not a better message for this. + */ + skb = alloc_can_err_skb(dev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_errors++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/* + * Handle CAN Event Indication Messages from the firmware + * + * The ICAN3 firmware provides the values of some SJA1000 registers when it + * generates this message. The code below is largely copied from the + * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary + */ +static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct net_device *dev = mod->ndev; + struct net_device_stats *stats = &dev->stats; + enum can_state state = mod->can.state; + u8 status, isrc, rxerr, txerr; + struct can_frame *cf; + struct sk_buff *skb; + + /* we can only handle the SJA1000 part */ + if (msg->data[1] != CEVTIND_CHIP_SJA1000) { + dev_err(mod->dev, "unable to handle errors on non-SJA1000\n"); + return -ENODEV; + } + + /* check the message length for sanity */ + if (le16_to_cpu(msg->len) < 6) { + dev_err(mod->dev, "error message too short\n"); + return -EINVAL; + } + + skb = alloc_can_err_skb(dev, &cf); + if (skb == NULL) + return -ENOMEM; + + isrc = msg->data[0]; + status = msg->data[3]; + rxerr = msg->data[4]; + txerr = msg->data[5]; + + /* data overrun interrupt */ + if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { + dev_dbg(mod->dev, "data overrun interrupt\n"); + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* error warning + passive interrupt */ + if (isrc == CEVTIND_EI) { + dev_dbg(mod->dev, "error warning + passive interrupt\n"); + if (status & SR_BS) { + state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + can_bus_off(dev); + } else if (status & SR_ES) { + if (rxerr >= 128 || txerr >= 128) + state = CAN_STATE_ERROR_PASSIVE; + else + state = CAN_STATE_ERROR_WARNING; + } else { + state = CAN_STATE_ERROR_ACTIVE; + } + } + + /* bus error interrupt */ + if (isrc == CEVTIND_BEI) { + u8 ecc = msg->data[2]; + + dev_dbg(mod->dev, "bus error interrupt\n"); + mod->can.can_stats.bus_error++; + stats->rx_errors++; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (ecc & ECC_MASK) { + case ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + cf->data[3] = ecc & ECC_SEG; + break; + } + + if ((ecc & ECC_DIR) == 0) + cf->data[2] |= CAN_ERR_PROT_TX; + + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || + state == CAN_STATE_ERROR_PASSIVE)) { + cf->can_id |= CAN_ERR_CRTL; + if (state == CAN_STATE_ERROR_WARNING) { + mod->can.can_stats.error_warning++; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + } else { + mod->can.can_stats.error_passive++; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + mod->can.state = state; + stats->rx_errors++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + return 0; +} + +static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) +{ + switch (msg->data[0]) { + case INQUIRY_STATUS: + case INQUIRY_EXTENDED: + mod->bec.rxerr = msg->data[5]; + mod->bec.txerr = msg->data[6]; + complete(&mod->buserror_comp); + break; + case INQUIRY_TERMINATION: + mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; + complete(&mod->termination_comp); + break; + default: + dev_err(mod->dev, "recieved an unknown inquiry response\n"); + break; + } +} + +static void ican3_handle_unknown_message(struct ican3_dev *mod, + struct ican3_msg *msg) +{ + dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n", + msg->spec, le16_to_cpu(msg->len)); +} + +/* + * Handle a control message from the firmware + */ +static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) +{ + dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, + mod->num, msg->spec, le16_to_cpu(msg->len)); + + switch (msg->spec) { + case MSG_IDVERS: + ican3_handle_idvers(mod, msg); + break; + case MSG_MSGLOST: + case MSG_FMSGLOST: + ican3_handle_msglost(mod, msg); + break; + case MSG_CEVTIND: + ican3_handle_cevtind(mod, msg); + break; + case MSG_INQUIRY: + ican3_handle_inquiry(mod, msg); + break; + default: + ican3_handle_unknown_message(mod, msg); + break; + } +} + +/* + * Check that there is room in the TX ring to transmit another skb + * + * LOCKING: must hold mod->lock + */ +static bool ican3_txok(struct ican3_dev *mod) +{ + struct ican3_fast_desc __iomem *desc; + u8 control; + + /* copy the control bits of the descriptor */ + ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); + desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc)); + control = ioread8(&desc->control); + + /* if the control bits are not valid, then we have no more space */ + if (!(control & DESC_VALID)) + return false; + + return true; +} + +/* + * Recieve one CAN frame from the hardware + * + * This works like the core of a NAPI function, but is intended to be called + * from workqueue context instead. This driver already needs a workqueue to + * process control messages, so we use the workqueue instead of using NAPI. + * This was done to simplify locking. + * + * CONTEXT: must be called from user context + */ +static int ican3_recv_skb(struct ican3_dev *mod) +{ + struct net_device *ndev = mod->ndev; + struct net_device_stats *stats = &ndev->stats; + struct ican3_fast_desc desc; + void __iomem *desc_addr; + struct can_frame *cf; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&mod->lock, flags); + + /* copy the whole descriptor */ + ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); + desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc)); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + spin_unlock_irqrestore(&mod->lock, flags); + + /* check that we actually have a CAN frame */ + if (!(desc.control & DESC_VALID)) + return -ENOBUFS; + + /* allocate an skb */ + skb = alloc_can_skb(ndev, &cf); + if (unlikely(skb == NULL)) { + stats->rx_dropped++; + goto err_noalloc; + } + + /* convert the ICAN3 frame into Linux CAN format */ + ican3_to_can_frame(mod, &desc, cf); + + /* receive the skb, update statistics */ + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + +err_noalloc: + /* toggle the valid bit and return the descriptor to the ring */ + desc.control ^= DESC_VALID; + + spin_lock_irqsave(&mod->lock, flags); + + ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); + memcpy_toio(desc_addr, &desc, 1); + + /* update the next buffer pointer */ + mod->fastrx_num = (desc.control & DESC_WRAP) ? 0 + : (mod->fastrx_num + 1); + + /* there are still more buffers to process */ + spin_unlock_irqrestore(&mod->lock, flags); + return 0; +} + +static int ican3_napi(struct napi_struct *napi, int budget) +{ + struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi); + struct ican3_msg msg; + unsigned long flags; + int received = 0; + int ret; + + /* process all communication messages */ + while (true) { + ret = ican3_recv_msg(mod, &msg); + if (ret) + break; + + ican3_handle_message(mod, &msg); + } + + /* process all CAN frames from the fast interface */ + while (received < budget) { + ret = ican3_recv_skb(mod); + if (ret) + break; + + received++; + } + + /* We have processed all packets that the adapter had, but it + * was less than our budget, stop polling */ + if (received < budget) + napi_complete(napi); + + spin_lock_irqsave(&mod->lock, flags); + + /* Wake up the transmit queue if necessary */ + if (netif_queue_stopped(mod->ndev) && ican3_txok(mod)) + netif_wake_queue(mod->ndev); + + spin_unlock_irqrestore(&mod->lock, flags); + + /* re-enable interrupt generation */ + iowrite8(1 << mod->num, &mod->ctrl->int_enable); + return received; +} + +static irqreturn_t ican3_irq(int irq, void *dev_id) +{ + struct ican3_dev *mod = dev_id; + u8 stat; + + /* + * The interrupt status register on this device reports interrupts + * as zeroes instead of using ones like most other devices + */ + stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num); + if (stat == (1 << mod->num)) + return IRQ_NONE; + + /* clear the MODULbus interrupt from the microcontroller */ + ioread8(&mod->dpmctrl->interrupt); + + /* disable interrupt generation, schedule the NAPI poller */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + napi_schedule(&mod->napi); + return IRQ_HANDLED; +} + +/* + * Firmware reset, startup, and shutdown + */ + +/* + * Reset an ICAN module to its power-on state + * + * CONTEXT: no network device registered + * LOCKING: work function disabled + */ +static int ican3_reset_module(struct ican3_dev *mod) +{ + u8 val = 1 << mod->num; + unsigned long start; + u8 runold, runnew; + + /* disable interrupts so no more work is scheduled */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + + /* flush any pending work */ + flush_scheduled_work(); + + /* the first unallocated page in the DPM is #9 */ + mod->free_page = DPM_FREE_START; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + runold = ioread8(mod->dpm + TARGET_RUNNING); + + /* reset the module */ + iowrite8(val, &mod->ctrl->reset_assert); + iowrite8(val, &mod->ctrl->reset_deassert); + + /* wait until the module has finished resetting and is running */ + start = jiffies; + do { + ican3_set_page(mod, QUEUE_OLD_CONTROL); + runnew = ioread8(mod->dpm + TARGET_RUNNING); + if (runnew == (runold ^ 0xff)) + return 0; + + msleep(10); + } while (time_before(jiffies, start + HZ / 4)); + + dev_err(mod->dev, "failed to reset CAN module\n"); + return -ETIMEDOUT; +} + +static void __devexit ican3_shutdown_module(struct ican3_dev *mod) +{ + ican3_msg_disconnect(mod); + ican3_reset_module(mod); +} + +/* + * Startup an ICAN module, bringing it into fast mode + */ +static int __devinit ican3_startup_module(struct ican3_dev *mod) +{ + int ret; + + ret = ican3_reset_module(mod); + if (ret) { + dev_err(mod->dev, "unable to reset module\n"); + return ret; + } + + /* re-enable interrupts so we can send messages */ + iowrite8(1 << mod->num, &mod->ctrl->int_enable); + + ret = ican3_msg_connect(mod); + if (ret) { + dev_err(mod->dev, "unable to connect to module\n"); + return ret; + } + + ican3_init_new_host_interface(mod); + ret = ican3_msg_newhostif(mod); + if (ret) { + dev_err(mod->dev, "unable to switch to new-style interface\n"); + return ret; + } + + /* default to "termination on" */ + ret = ican3_set_termination(mod, true); + if (ret) { + dev_err(mod->dev, "unable to enable termination\n"); + return ret; + } + + /* default to "bus errors enabled" */ + ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX); + if (ret) { + dev_err(mod->dev, "unable to set bus-error\n"); + return ret; + } + + ican3_init_fast_host_interface(mod); + ret = ican3_msg_fasthostif(mod); + if (ret) { + dev_err(mod->dev, "unable to switch to fast host interface\n"); + return ret; + } + + ret = ican3_set_id_filter(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set acceptance filter\n"); + return ret; + } + + return 0; +} + +/* + * CAN Network Device + */ + +static int ican3_open(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + u8 quota; + int ret; + + /* open the CAN layer */ + ret = open_candev(ndev); + if (ret) { + dev_err(mod->dev, "unable to start CAN layer\n"); + return ret; + } + + /* set the bus error generation state appropriately */ + if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + quota = ICAN3_BUSERR_QUOTA_MAX; + else + quota = 0; + + ret = ican3_set_buserror(mod, quota); + if (ret) { + dev_err(mod->dev, "unable to set bus-error\n"); + close_candev(ndev); + return ret; + } + + /* bring the bus online */ + ret = ican3_set_bus_state(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set bus-on\n"); + close_candev(ndev); + return ret; + } + + /* start up the network device */ + mod->can.state = CAN_STATE_ERROR_ACTIVE; + netif_start_queue(ndev); + + return 0; +} + +static int ican3_stop(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + /* stop the network device xmit routine */ + netif_stop_queue(ndev); + mod->can.state = CAN_STATE_STOPPED; + + /* bring the bus offline, stop receiving packets */ + ret = ican3_set_bus_state(mod, false); + if (ret) { + dev_err(mod->dev, "unable to set bus-off\n"); + return ret; + } + + /* close the CAN layer */ + close_candev(ndev); + return 0; +} + +static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + struct ican3_fast_desc desc; + void __iomem *desc_addr; + unsigned long flags; + + spin_lock_irqsave(&mod->lock, flags); + + /* check that we can actually transmit */ + if (!ican3_txok(mod)) { + dev_err(mod->dev, "no free descriptors, stopping queue\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mod->lock, flags); + return NETDEV_TX_BUSY; + } + + /* copy the control bits of the descriptor */ + ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); + desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc)); + memset(&desc, 0, sizeof(desc)); + memcpy_fromio(&desc, desc_addr, 1); + + /* convert the Linux CAN frame into ICAN3 format */ + can_frame_to_ican3(mod, cf, &desc); + + /* + * the programming manual says that you must set the IVALID bit, then + * interrupt, then set the valid bit. Quite weird, but it seems to be + * required for this to work + */ + desc.control |= DESC_IVALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* generate a MODULbus interrupt to the microcontroller */ + iowrite8(0x01, &mod->dpmctrl->interrupt); + + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the next buffer pointer */ + mod->fasttx_num = (desc.control & DESC_WRAP) ? 0 + : (mod->fasttx_num + 1); + + /* update statistics */ + stats->tx_packets++; + stats->tx_bytes += cf->can_dlc; + kfree_skb(skb); + + /* + * This hardware doesn't have TX-done notifications, so we'll try and + * emulate it the best we can using ECHO skbs. Get the next TX + * descriptor, and see if we have room to send. If not, stop the queue. + * It will be woken when the ECHO skb for the current packet is recv'd. + */ + + /* copy the control bits of the descriptor */ + if (!ican3_txok(mod)) + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&mod->lock, flags); + return NETDEV_TX_OK; +} + +static const struct net_device_ops ican3_netdev_ops = { + .ndo_open = ican3_open, + .ndo_stop = ican3_stop, + .ndo_start_xmit = ican3_xmit, +}; + +/* + * Low-level CAN Device + */ + +/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */ +static struct can_bittiming_const ican3_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +/* + * This routine was stolen from drivers/net/can/sja1000/sja1000.c + * + * The bittiming register command for the ICAN3 just sets the bit timing + * registers on the SJA1000 chip directly + */ +static int ican3_set_bittiming(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + struct can_bittiming *bt = &mod->can.bittiming; + struct ican3_msg msg; + u8 btr0, btr1; + + btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); + btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | + (((bt->phase_seg2 - 1) & 0x7) << 4); + if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + btr1 |= 0x80; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CBTRREQ; + msg.len = cpu_to_le16(4); + msg.data[0] = 0x00; + msg.data[1] = 0x00; + msg.data[2] = btr0; + msg.data[3] = btr1; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + if (mode != CAN_MODE_START) + return -ENOTSUPP; + + /* bring the bus online */ + ret = ican3_set_bus_state(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set bus-on\n"); + return ret; + } + + /* start up the network device */ + mod->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + return 0; +} + +static int ican3_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + ret = ican3_send_inquiry(mod, INQUIRY_STATUS); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); + if (ret <= 0) { + dev_info(mod->dev, "%s timed out\n", __func__); + return -ETIMEDOUT; + } + + bec->rxerr = mod->bec.rxerr; + bec->txerr = mod->bec.txerr; + return 0; +} + +/* + * Sysfs Attributes + */ + +static ssize_t ican3_sysfs_show_term(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); + int ret; + + ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&mod->termination_comp, HZ); + if (ret <= 0) { + dev_info(mod->dev, "%s timed out\n", __func__); + return -ETIMEDOUT; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); +} + +static ssize_t ican3_sysfs_set_term(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); + unsigned long enable; + int ret; + + if (strict_strtoul(buf, 0, &enable)) + return -EINVAL; + + ret = ican3_set_termination(mod, enable); + if (ret) + return ret; + + return count; +} + +static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, + ican3_sysfs_set_term); + +static struct attribute *ican3_sysfs_attrs[] = { + &dev_attr_termination.attr, + NULL, +}; + +static struct attribute_group ican3_sysfs_attr_group = { + .attrs = ican3_sysfs_attrs, +}; + +/* + * PCI Subsystem + */ + +static int __devinit ican3_probe(struct platform_device *pdev) +{ + struct janz_platform_data *pdata; + struct net_device *ndev; + struct ican3_dev *mod; + struct resource *res; + struct device *dev; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) + return -ENXIO; + + dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno); + + /* save the struct device for printing */ + dev = &pdev->dev; + + /* allocate the CAN device and private data */ + ndev = alloc_candev(sizeof(*mod), 0); + if (!ndev) { + dev_err(dev, "unable to allocate CANdev\n"); + ret = -ENOMEM; + goto out_return; + } + + platform_set_drvdata(pdev, ndev); + mod = netdev_priv(ndev); + mod->ndev = ndev; + mod->dev = &pdev->dev; + mod->num = pdata->modno; + netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); + spin_lock_init(&mod->lock); + init_completion(&mod->termination_comp); + init_completion(&mod->buserror_comp); + + /* setup device-specific sysfs attributes */ + ndev->sysfs_groups[0] = &ican3_sysfs_attr_group; + + /* the first unallocated page in the DPM is 9 */ + mod->free_page = DPM_FREE_START; + + ndev->netdev_ops = &ican3_netdev_ops; + ndev->flags |= IFF_ECHO; + SET_NETDEV_DEV(ndev, &pdev->dev); + + mod->can.clock.freq = ICAN3_CAN_CLOCK; + mod->can.bittiming_const = &ican3_bittiming_const; + mod->can.do_set_bittiming = ican3_set_bittiming; + mod->can.do_set_mode = ican3_set_mode; + mod->can.do_get_berr_counter = ican3_get_berr_counter; + mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES + | CAN_CTRLMODE_BERR_REPORTING; + + /* find our IRQ number */ + mod->irq = platform_get_irq(pdev, 0); + if (mod->irq < 0) { + dev_err(dev, "IRQ line not found\n"); + ret = -ENODEV; + goto out_free_ndev; + } + + ndev->irq = mod->irq; + + /* get access to the MODULbus registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "MODULbus registers not found\n"); + ret = -ENODEV; + goto out_free_ndev; + } + + mod->dpm = ioremap(res->start, resource_size(res)); + if (!mod->dpm) { + dev_err(dev, "MODULbus registers not ioremap\n"); + ret = -ENOMEM; + goto out_free_ndev; + } + + mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE; + + /* get access to the control registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "CONTROL registers not found\n"); + ret = -ENODEV; + goto out_iounmap_dpm; + } + + mod->ctrl = ioremap(res->start, resource_size(res)); + if (!mod->ctrl) { + dev_err(dev, "CONTROL registers not ioremap\n"); + ret = -ENOMEM; + goto out_iounmap_dpm; + } + + /* disable our IRQ, then hookup the IRQ handler */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod); + if (ret) { + dev_err(dev, "unable to request IRQ\n"); + goto out_iounmap_ctrl; + } + + /* reset and initialize the CAN controller into fast mode */ + napi_enable(&mod->napi); + ret = ican3_startup_module(mod); + if (ret) { + dev_err(dev, "%s: unable to start CANdev\n", __func__); + goto out_free_irq; + } + + /* register with the Linux CAN layer */ + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "%s: unable to register CANdev\n", __func__); + goto out_free_irq; + } + + dev_info(dev, "module %d: registered CAN device\n", pdata->modno); + return 0; + +out_free_irq: + napi_disable(&mod->napi); + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + free_irq(mod->irq, mod); +out_iounmap_ctrl: + iounmap(mod->ctrl); +out_iounmap_dpm: + iounmap(mod->dpm); +out_free_ndev: + free_candev(ndev); +out_return: + return ret; +} + +static int __devexit ican3_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ican3_dev *mod = netdev_priv(ndev); + + /* unregister the netdevice, stop interrupts */ + unregister_netdev(ndev); + napi_disable(&mod->napi); + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + free_irq(mod->irq, mod); + + /* put the module into reset */ + ican3_shutdown_module(mod); + + /* unmap all registers */ + iounmap(mod->ctrl); + iounmap(mod->dpm); + + free_candev(ndev); + + return 0; +} + +static struct platform_driver ican3_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ican3_probe, + .remove = __devexit_p(ican3_remove), +}; + +static int __init ican3_init(void) +{ + return platform_driver_register(&ican3_driver); +} + +static void __exit ican3_exit(void) +{ + platform_driver_unregister(&ican3_driver); +} + +MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); +MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:janz-ican3"); + +module_init(ican3_init); +module_exit(ican3_exit); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 225fd147774a..8af8442c694a 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -392,15 +392,17 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = { }; static struct of_platform_driver mpc5xxx_can_driver = { - .owner = THIS_MODULE, - .name = "mpc5xxx_can", + .driver = { + .name = "mpc5xxx_can", + .owner = THIS_MODULE, + .of_match_table = mpc5xxx_can_table, + }, .probe = mpc5xxx_can_probe, .remove = __devexit_p(mpc5xxx_can_remove), #ifdef CONFIG_PM .suspend = mpc5xxx_can_suspend, .resume = mpc5xxx_can_resume, #endif - .match_table = mpc5xxx_can_table, }; static int __init mpc5xxx_can_init(void) diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 85f7cbfe8e5f..0a8de01d52f7 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -599,6 +599,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv) priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING; + spin_lock_init(&priv->cmdreg_lock); + if (sizeof_priv) priv->priv = (void *)priv + sizeof(struct sja1000_priv); diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 34e79efbd2fc..ac1a83d7c204 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -71,7 +71,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev) { struct net_device *dev = dev_get_drvdata(&ofdev->dev); struct sja1000_priv *priv = netdev_priv(dev); - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct resource res; dev_set_drvdata(&ofdev->dev, NULL); @@ -90,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev) static int __devinit sja1000_ofp_probe(struct of_device *ofdev, const struct of_device_id *id) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct net_device *dev; struct sja1000_priv *priv; struct resource res; @@ -215,11 +215,13 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = { MODULE_DEVICE_TABLE(of, sja1000_ofp_table); static struct of_platform_driver sja1000_ofp_driver = { - .owner = THIS_MODULE, - .name = DRV_NAME, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = sja1000_ofp_table, + }, .probe = sja1000_ofp_probe, .remove = __devexit_p(sja1000_ofp_remove), - .match_table = sja1000_ofp_table, }; static int __init sja1000_ofp_init(void) diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index be90d3598bca..fe925663d39a 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev) static void cnic_init_context(struct cnic_dev *dev, u32 cid) { - struct cnic_local *cp = dev->cnic_priv; u32 cid_addr; int i; - if (CHIP_NUM(cp) == CHIP_NUM_5709) - return; - cid_addr = GET_CID_ADDR(cid); for (i = 0; i < CTX_SIZE; i += 4) @@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) sb_id = cp->status_blk_num; tx_cid = 20; - cnic_init_context(dev, tx_cid); - cnic_init_context(dev, tx_cid + 1); cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { struct status_block_msix *sblk = cp->status_blk.bnx2; tx_cid = TX_TSS_CID + sb_id - 1; - cnic_init_context(dev, tx_cid); CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | (TX_TSS_CID << 7)); cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; @@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; } else { + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + offset0 = BNX2_L2CTX_TYPE; offset1 = BNX2_L2CTX_CMD_TYPE; offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 110c62072e6f..0c55177db046 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.1.1" -#define CNIC_MODULE_RELDATE "Feb 22, 2010" +#define CNIC_MODULE_VERSION "2.1.2" +#define CNIC_MODULE_RELDATE "May 26, 2010" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 02698a1c80b0..f547894ff48f 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -122,8 +122,11 @@ static struct of_device_id ehea_device_table[] = { MODULE_DEVICE_TABLE(of, ehea_device_table); static struct of_platform_driver ehea_driver = { - .name = "ehea", - .match_table = ehea_device_table, + .driver = { + .name = "ehea", + .owner = THIS_MODULE, + .of_match_table = ehea_device_table, + }, .probe = ehea_probe_adapter, .remove = ehea_remove, }; @@ -3050,7 +3053,7 @@ static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, static void __devinit logical_port_release(struct device *dev) { struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); - of_node_put(port->ofdev.node); + of_node_put(port->ofdev.dev.of_node); } static struct device *ehea_register_port(struct ehea_port *port, @@ -3058,7 +3061,7 @@ static struct device *ehea_register_port(struct ehea_port *port, { int ret; - port->ofdev.node = of_node_get(dn); + port->ofdev.dev.of_node = of_node_get(dn); port->ofdev.dev.parent = &port->adapter->ofdev->dev; port->ofdev.dev.bus = &ibmebus_bus_type; @@ -3225,7 +3228,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) const u32 *dn_log_port_id; int i = 0; - lhea_dn = adapter->ofdev->node; + lhea_dn = adapter->ofdev->dev.of_node; while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", @@ -3264,7 +3267,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, struct device_node *eth_dn = NULL; const u32 *dn_log_port_id; - lhea_dn = adapter->ofdev->node; + lhea_dn = adapter->ofdev->dev.of_node; while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", @@ -3394,7 +3397,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev, const u64 *adapter_handle; int ret; - if (!dev || !dev->node) { + if (!dev || !dev->dev.of_node) { ehea_error("Invalid ibmebus device probed"); return -EINVAL; } @@ -3410,14 +3413,14 @@ static int __devinit ehea_probe_adapter(struct of_device *dev, adapter->ofdev = dev; - adapter_handle = of_get_property(dev->node, "ibm,hea-handle", + adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", NULL); if (adapter_handle) adapter->handle = *adapter_handle; if (!adapter->handle) { dev_err(&dev->dev, "failed getting handle for adapter" - " '%s'\n", dev->node->full_name); + " '%s'\n", dev->dev.of_node->full_name); ret = -ENODEV; goto out_free_ad; } diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index e125113759a5..6586b5c7e4b6 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1034,9 +1034,10 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, { struct vic_provinfo *vp; u8 oui[3] = VIC_PROVINFO_CISCO_OUI; - unsigned short *uuid; + u8 *uuid; char uuid_str[38]; - static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X"; + static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-" + "%02X%02X-%02X%02X%02X%02X%0X%02X"; int err; if (!name) @@ -1058,20 +1059,24 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, ETH_ALEN, mac); if (instance_uuid) { - uuid = (unsigned short *)instance_uuid; + uuid = instance_uuid; sprintf(uuid_str, uuid_fmt, - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7]); + uuid[0], uuid[1], uuid[2], uuid[3], + uuid[4], uuid[5], uuid[6], uuid[7], + uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15]); vic_provinfo_add_tlv(vp, VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, sizeof(uuid_str), uuid_str); } if (host_uuid) { - uuid = (unsigned short *)host_uuid; + uuid = host_uuid; sprintf(uuid_str, uuid_fmt, - uuid[0], uuid[1], uuid[2], uuid[3], - uuid[4], uuid[5], uuid[6], uuid[7]); + uuid[0], uuid[1], uuid[2], uuid[3], + uuid[4], uuid[5], uuid[6], uuid[7], + uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15]); vic_provinfo_add_tlv(vp, VIC_LINUX_PROV_TLV_HOST_UUID_STR, sizeof(uuid_str), uuid_str); @@ -1127,6 +1132,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, switch (request) { case PORT_REQUEST_ASSOCIATE: + /* If the interface mac addr hasn't been assigned, + * assign a random mac addr before setting port- + * profile. + */ + + if (is_zero_ether_addr(netdev->dev_addr)) + random_ether_addr(netdev->dev_addr); + if (port[IFLA_PORT_PROFILE]) name = nla_data(port[IFLA_PORT_PROFILE]); diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 14cbde5cf68e..6ed2df14ec84 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -174,6 +174,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region * @dma_alloc: dma allocated buffer size + * @io_region_size: I/O memory region size * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent @@ -193,6 +194,7 @@ struct ethoc { void __iomem *iobase; void __iomem *membase; int dma_alloc; + resource_size_t io_region_size; unsigned int num_tx; unsigned int cur_tx; @@ -943,6 +945,7 @@ static int ethoc_probe(struct platform_device *pdev) priv = netdev_priv(netdev); priv->netdev = netdev; priv->dma_alloc = 0; + priv->io_region_size = mmio->end - mmio->start + 1; priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, resource_size(mmio)); @@ -1047,20 +1050,34 @@ static int ethoc_probe(struct platform_device *pdev) ret = register_netdev(netdev); if (ret < 0) { dev_err(&netdev->dev, "failed to register interface\n"); - goto error; + goto error2; } goto out; +error2: + netif_napi_del(&priv->napi); error: mdiobus_unregister(priv->mdio); free_mdio: kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: - if (priv->dma_alloc) - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, - netdev->mem_start); + if (priv) { + if (priv->dma_alloc) + dma_free_coherent(NULL, priv->dma_alloc, priv->membase, + netdev->mem_start); + else if (priv->membase) + devm_iounmap(&pdev->dev, priv->membase); + if (priv->iobase) + devm_iounmap(&pdev->dev, priv->iobase); + } + if (mem) + devm_release_mem_region(&pdev->dev, mem->start, + mem->end - mem->start + 1); + if (mmio) + devm_release_mem_region(&pdev->dev, mmio->start, + mmio->end - mmio->start + 1); free_netdev(netdev); out: return ret; @@ -1078,6 +1095,7 @@ static int ethoc_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); if (netdev) { + netif_napi_del(&priv->napi); phy_disconnect(priv->phy); priv->phy = NULL; @@ -1089,6 +1107,14 @@ static int ethoc_remove(struct platform_device *pdev) if (priv->dma_alloc) dma_free_coherent(NULL, priv->dma_alloc, priv->membase, netdev->mem_start); + else { + devm_iounmap(&pdev->dev, priv->membase); + devm_release_mem_region(&pdev->dev, netdev->mem_start, + netdev->mem_end - netdev->mem_start + 1); + } + devm_iounmap(&pdev->dev, priv->iobase); + devm_release_mem_region(&pdev->dev, netdev->base_addr, + priv->io_region_size); unregister_netdev(netdev); free_netdev(netdev); } diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 42d9ac9ba395..ddf7a86cd466 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -41,6 +41,7 @@ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/phy.h> +#include <linux/fec.h> #include <asm/cacheflush.h> @@ -182,6 +183,7 @@ struct fec_enet_private { struct phy_device *phy_dev; int mii_timeout; uint phy_speed; + phy_interface_t phy_interface; int index; int link; int full_duplex; @@ -679,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev) struct phy_device *phy_dev = NULL; int phy_addr; + fep->phy_dev = NULL; + /* find the first phy */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (fep->mii_bus->phy_map[phy_addr]) { @@ -709,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev) fep->link = 0; fep->full_duplex = 0; + printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq); + return 0; } @@ -754,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; - if (fec_enet_mii_probe(dev) != 0) - goto err_out_unregister_bus; - return 0; -err_out_unregister_bus: - mdiobus_unregister(fep->mii_bus); err_out_free_mdio_irq: kfree(fep->mii_bus->irq); err_out_free_mdiobus: @@ -913,7 +917,12 @@ fec_enet_open(struct net_device *dev) if (ret) return ret; - /* schedule a link state check */ + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(dev); + if (ret) { + fec_enet_free_buffers(dev); + return ret; + } phy_start(fep->phy_dev); netif_start_queue(dev); fep->opened = 1; @@ -927,10 +936,12 @@ fec_enet_close(struct net_device *dev) /* Don't know what to do yet. */ fep->opened = 0; - phy_stop(fep->phy_dev); netif_stop_queue(dev); fec_stop(dev); + if (fep->phy_dev) + phy_disconnect(fep->phy_dev); + fec_enet_free_buffers(dev); return 0; @@ -1191,6 +1202,21 @@ fec_restart(struct net_device *dev, int duplex) /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); +#ifdef FEC_MIIGSK_ENR + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ + writel(1, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); + } +#endif + /* And last, enable the transmit and receive processing */ writel(2, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); @@ -1226,6 +1252,7 @@ static int __devinit fec_probe(struct platform_device *pdev) { struct fec_enet_private *fep; + struct fec_platform_data *pdata; struct net_device *ndev; int i, irq, ret = 0; struct resource *r; @@ -1259,6 +1286,10 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + pdata = pdev->dev.platform_data; + if (pdata) + fep->phy_interface = pdata->phy; + /* This device has up to three irqs on some platforms */ for (i = 0; i < 3; i++) { irq = platform_get_irq(pdev, i); @@ -1294,11 +1325,6 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_register; - printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); - return 0; failed_register: diff --git a/drivers/net/fec.h b/drivers/net/fec.h index cc47f3f057c7..2c48b25668d5 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h @@ -43,6 +43,8 @@ #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ +#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ +#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ #else diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 221f440c10f4..25e6cc6840b1 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -871,7 +871,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) priv->ndev = ndev; /* Reserve FEC control zone */ - rv = of_address_to_resource(op->node, 0, &mem); + rv = of_address_to_resource(op->dev.of_node, 0, &mem); if (rv) { printk(KERN_ERR DRIVER_NAME ": " "Error while parsing device node resource\n" ); @@ -919,7 +919,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) /* Get the IRQ we need one by one */ /* Control */ - ndev->irq = irq_of_parse_and_map(op->node, 0); + ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0); /* RX */ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); @@ -942,20 +942,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) /* Start with safe defaults for link connection */ priv->speed = 100; priv->duplex = DUPLEX_HALF; - priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1; + priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1; /* The current speed preconfigures the speed of the MII link */ - prop = of_get_property(op->node, "current-speed", &prop_size); + prop = of_get_property(op->dev.of_node, "current-speed", &prop_size); if (prop && (prop_size >= sizeof(u32) * 2)) { priv->speed = prop[0]; priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; } /* If there is a phy handle, then get the PHY node */ - priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0); + priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); /* the 7-wire property means don't use MII mode */ - if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) { + if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) { priv->seven_wire_mode = 1; dev_info(&ndev->dev, "using 7-wire PHY mode\n"); } @@ -1063,9 +1063,11 @@ static struct of_device_id mpc52xx_fec_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); static struct of_platform_driver mpc52xx_fec_driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = mpc52xx_fec_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = mpc52xx_fec_match, + }, .probe = mpc52xx_fec_probe, .remove = mpc52xx_fec_remove, #ifdef CONFIG_PM diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 7658a082e390..006f64d9f96a 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -66,7 +66,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) { struct device *dev = &of->dev; - struct device_node *np = of->node; + struct device_node *np = of->dev.of_node; struct mii_bus *bus; struct mpc52xx_fec_mdio_priv *priv; struct resource res = {}; @@ -107,7 +107,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, /* set MII speed */ out_be32(&priv->regs->mii_speed, - ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1); + ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1); err = of_mdiobus_register(bus, np); if (err) @@ -159,10 +159,13 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); struct of_platform_driver mpc52xx_fec_mdio_driver = { - .name = "mpc5200b-fec-phy", + .driver = { + .name = "mpc5200b-fec-phy", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_fec_mdio_match, + }, .probe = mpc52xx_fec_mdio_probe, .remove = mpc52xx_fec_mdio_remove, - .match_table = mpc52xx_fec_mdio_match, }; /* let fec driver call it, since this has to be registered before it */ diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 0fb0fefcb787..309a0eaddd81 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1013,7 +1013,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, return -ENOMEM; if (!IS_FEC(match)) { - data = of_get_property(ofdev->node, "fsl,cpm-command", &len); + data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -1025,8 +1025,8 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, fpi->rx_copybreak = 240; fpi->use_napi = 1; fpi->napi_weight = 17; - fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); - if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link", + fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); + if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", NULL))) goto out_free_fpi; @@ -1059,7 +1059,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); - mac_addr = of_get_mac_address(ofdev->node); + mac_addr = of_get_mac_address(ofdev->dev.of_node); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, 6); @@ -1156,8 +1156,11 @@ static struct of_device_id fs_enet_match[] = { MODULE_DEVICE_TABLE(of, fs_enet_match); static struct of_platform_driver fs_enet_driver = { - .name = "fs_enet", - .match_table = fs_enet_match, + .driver = { + .owner = THIS_MODULE, + .name = "fs_enet", + .of_match_table = fs_enet_match, + }, .probe = fs_enet_probe, .remove = fs_enet_remove, }; diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 714da967fa19..5d45084b287d 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -88,19 +88,19 @@ static int do_pd_setup(struct fs_enet_private *fep) struct fs_platform_info *fpi = fep->fpi; int ret = -EINVAL; - fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (fep->interrupt == NO_IRQ) goto out; - fep->fcc.fccp = of_iomap(ofdev->node, 0); + fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0); if (!fep->fcc.fccp) goto out; - fep->fcc.ep = of_iomap(ofdev->node, 1); + fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); if (!fep->fcc.ep) goto out_fccp; - fep->fcc.fcccp = of_iomap(ofdev->node, 2); + fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2); if (!fep->fcc.fcccp) goto out_ep; diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index 7eff92ef01da..7ca1642276d0 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -98,11 +98,11 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct of_device *ofdev = to_of_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (fep->interrupt == NO_IRQ) return -EINVAL; - fep->fec.fecp = of_iomap(ofdev->node, 0); + fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); if (!fep->fcc.fccp) return -EINVAL; diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index 7f0591e43cd9..a3c44544846d 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -98,15 +98,15 @@ static int do_pd_setup(struct fs_enet_private *fep) { struct of_device *ofdev = to_of_device(fep->dev); - fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); if (fep->interrupt == NO_IRQ) return -EINVAL; - fep->scc.sccp = of_iomap(ofdev->node, 0); + fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0); if (!fep->scc.sccp) return -EINVAL; - fep->scc.ep = of_iomap(ofdev->node, 1); + fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); if (!fep->scc.ep) { iounmap(fep->scc.sccp); return -EINVAL; diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 24ff9f43a62b..0f90685d3d19 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -224,8 +224,11 @@ static struct of_device_id fs_enet_mdio_bb_match[] = { MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); static struct of_platform_driver fs_enet_bb_mdio_driver = { - .name = "fsl-bb-mdio", - .match_table = fs_enet_mdio_bb_match, + .driver = { + .name = "fsl-bb-mdio", + .owner = THIS_MODULE, + .of_match_table = fs_enet_mdio_bb_match, + }, .probe = fs_enet_mdio_probe, .remove = fs_enet_mdio_remove, }; diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 5944b65082cb..bddffd169b93 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -124,7 +124,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, new_bus->write = &fs_enet_fec_mii_write; new_bus->reset = &fs_enet_fec_mii_reset; - ret = of_address_to_resource(ofdev->node, 0, &res); + ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); if (ret) goto out_res; @@ -135,7 +135,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, goto out_fec; if (get_bus_freq) { - clock = get_bus_freq(ofdev->node); + clock = get_bus_freq(ofdev->dev.of_node); if (!clock) { /* Use maximum divider if clock is unknown */ dev_warn(&ofdev->dev, "could not determine IPS clock\n"); @@ -172,7 +172,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); - ret = of_mdiobus_register(new_bus, ofdev->node); + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) goto out_free_irqs; @@ -222,8 +222,11 @@ static struct of_device_id fs_enet_mdio_fec_match[] = { MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); static struct of_platform_driver fs_enet_fec_mdio_driver = { - .name = "fsl-fec-mdio", - .match_table = fs_enet_mdio_fec_match, + .driver = { + .name = "fsl-fec-mdio", + .owner = THIS_MODULE, + .of_match_table = fs_enet_mdio_fec_match, + }, .probe = fs_enet_mdio_probe, .remove = fs_enet_mdio_remove, }; diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index ff028f59b930..b4c41d72c423 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -267,7 +267,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) static int fsl_pq_mdio_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct device_node *tbi; struct fsl_pq_mdio_priv *priv; struct fsl_pq_mdio __iomem *regs = NULL; @@ -471,10 +471,13 @@ static struct of_device_id fsl_pq_mdio_match[] = { MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); static struct of_platform_driver fsl_pq_mdio_driver = { - .name = "fsl-pq_mdio", + .driver = { + .name = "fsl-pq_mdio", + .owner = THIS_MODULE, + .of_match_table = fsl_pq_mdio_match, + }, .probe = fsl_pq_mdio_probe, .remove = fsl_pq_mdio_remove, - .match_table = fsl_pq_mdio_match, }; int __init fsl_pq_mdio_init(void) diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index c6791cd4ee05..1830f3199cb5 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -608,7 +608,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) int err = 0, i; struct net_device *dev = NULL; struct gfar_private *priv = NULL; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; const u32 *stash; const u32 *stash_len; @@ -646,7 +646,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) return -ENOMEM; priv = netdev_priv(dev); - priv->node = ofdev->node; + priv->node = ofdev->dev.of_node; priv->ndev = dev; dev->num_tx_queues = num_tx_qs; @@ -939,7 +939,7 @@ static int gfar_probe(struct of_device *ofdev, priv = netdev_priv(dev); priv->ndev = dev; priv->ofdev = ofdev; - priv->node = ofdev->node; + priv->node = ofdev->dev.of_node; SET_NETDEV_DEV(dev, &ofdev->dev); spin_lock_init(&priv->bflock); @@ -3167,12 +3167,14 @@ MODULE_DEVICE_TABLE(of, gfar_match); /* Structure for a device driver */ static struct of_platform_driver gfar_driver = { - .name = "fsl-gianfar", - .match_table = gfar_match, - + .driver = { + .name = "fsl-gianfar", + .owner = THIS_MODULE, + .pm = GFAR_PM_OPS, + .of_match_table = gfar_match, + }, .probe = gfar_probe, .remove = gfar_remove, - .driver.pm = GFAR_PM_OPS, }; static int __init gfar_init(void) diff --git a/drivers/net/greth.c b/drivers/net/greth.c index fd491e409488..f37a4c143ddd 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1499,7 +1499,8 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev if (i == 6) { const unsigned char *addr; int len; - addr = of_get_property(ofdev->node, "local-mac-address", &len); + addr = of_get_property(ofdev->dev.of_node, "local-mac-address", + &len); if (addr != NULL && len == 6) { for (i = 0; i < 6; i++) macaddr[i] = (unsigned int) addr[i]; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 694132e04af6..4e7d1d0a2340 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void) dev = alloc_netdev(sizeof(struct yam_port), name, yam_setup); if (!dev) { - printk(KERN_ERR "yam: cannot allocate net device %s\n", - dev->name); + pr_err("yam: cannot allocate net device\n"); err = -ENOMEM; goto error; } diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 2484e9e6c1ed..b150c102ca5a 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -136,7 +136,8 @@ static inline void emac_report_timeout_error(struct emac_instance *dev, EMAC_FTR_440EP_PHY_CLK_FIX)) DBG(dev, "%s" NL, error); else if (net_ratelimit()) - printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error); + printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name, + error); } /* EMAC PHY clock workaround: @@ -2185,7 +2186,7 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strcpy(info->version, DRV_VERSION); info->fw_version[0] = '\0'; sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", - dev->cell_index, dev->ofdev->node->full_name); + dev->cell_index, dev->ofdev->dev.of_node->full_name); info->regdump_len = emac_ethtool_get_regs_len(ndev); } @@ -2379,7 +2380,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam static int __devinit emac_init_phy(struct emac_instance *dev) { - struct device_node *np = dev->ofdev->node; + struct device_node *np = dev->ofdev->dev.of_node; struct net_device *ndev = dev->ndev; u32 phy_map, adv; int i; @@ -2514,7 +2515,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev) static int __devinit emac_init_config(struct emac_instance *dev) { - struct device_node *np = dev->ofdev->node; + struct device_node *np = dev->ofdev->dev.of_node; const void *p; unsigned int plen; const char *pm, *phy_modes[] = { @@ -2723,7 +2724,7 @@ static int __devinit emac_probe(struct of_device *ofdev, { struct net_device *ndev; struct emac_instance *dev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct device_node **blist = NULL; int err, i; @@ -2810,7 +2811,7 @@ static int __devinit emac_probe(struct of_device *ofdev, err = mal_register_commac(dev->mal, &dev->commac); if (err) { printk(KERN_ERR "%s: failed to register with mal %s!\n", - np->full_name, dev->mal_dev->node->full_name); + np->full_name, dev->mal_dev->dev.of_node->full_name); goto err_rel_deps; } dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); @@ -2995,9 +2996,11 @@ static struct of_device_id emac_match[] = MODULE_DEVICE_TABLE(of, emac_match); static struct of_platform_driver emac_driver = { - .name = "emac", - .match_table = emac_match, - + .driver = { + .name = "emac", + .owner = THIS_MODULE, + .of_match_table = emac_match, + }, .probe = emac_probe, .remove = emac_remove, }; diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c index 775c850a425a..3995fafc1e08 100644 --- a/drivers/net/ibm_newemac/debug.c +++ b/drivers/net/ibm_newemac/debug.c @@ -33,7 +33,7 @@ static void emac_desc_dump(struct emac_instance *p) int i; printk("** EMAC %s TX BDs **\n" " tx_cnt = %d tx_slot = %d ack_slot = %d\n", - p->ofdev->node->full_name, + p->ofdev->dev.of_node->full_name, p->tx_cnt, p->tx_slot, p->ack_slot); for (i = 0; i < NUM_TX_BUFF / 2; ++i) printk @@ -49,7 +49,7 @@ static void emac_desc_dump(struct emac_instance *p) printk("** EMAC %s RX BDs **\n" " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" " rx_sg_skb = 0x%p\n", - p->ofdev->node->full_name, + p->ofdev->dev.of_node->full_name, p->rx_slot, p->commac.flags, p->rx_skb_size, p->rx_sync_size, p->rx_sg_skb); for (i = 0; i < NUM_RX_BUFF / 2; ++i) @@ -77,7 +77,8 @@ static void emac_mac_dump(struct emac_instance *dev) "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", - dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), + dev->ofdev->dev.of_node->full_name, + in_be32(&p->mr0), in_be32(&p->mr1), in_be32(&p->tmr0), in_be32(&p->tmr1), in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), @@ -128,7 +129,7 @@ static void emac_mal_dump(struct mal_instance *mal) "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", - mal->ofdev->node->full_name, + mal->ofdev->dev.of_node->full_name, get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), get_mal_dcrn(mal, MAL_IER), get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h index b631842ec8d0..e596c77ccdf7 100644 --- a/drivers/net/ibm_newemac/debug.h +++ b/drivers/net/ibm_newemac/debug.h @@ -53,8 +53,8 @@ extern void emac_dbg_dump_all(void); #endif -#define EMAC_DBG(dev, name, fmt, arg...) \ - printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg) +#define EMAC_DBG(d, name, fmt, arg...) \ + printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg) #if DBG_LEVEL > 0 # define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 5b3d94419fe6..fcff9e0bd382 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c @@ -538,11 +538,11 @@ static int __devinit mal_probe(struct of_device *ofdev, } mal->index = index; mal->ofdev = ofdev; - mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; + mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; MAL_DBG(mal, "probe" NL); - prop = of_get_property(ofdev->node, "num-tx-chans", NULL); + prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-tx-chans property!\n", @@ -552,7 +552,7 @@ static int __devinit mal_probe(struct of_device *ofdev, } mal->num_tx_chans = prop[0]; - prop = of_get_property(ofdev->node, "num-rx-chans", NULL); + prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-rx-chans property!\n", @@ -562,14 +562,14 @@ static int __devinit mal_probe(struct of_device *ofdev, } mal->num_rx_chans = prop[0]; - dcr_base = dcr_resource_start(ofdev->node, 0); + dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); if (dcr_base == 0) { printk(KERN_ERR "mal%d: can't find DCR resource!\n", index); err = -ENODEV; goto fail; } - mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100); + mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); if (!DCR_MAP_OK(mal->dcr_host)) { printk(KERN_ERR "mal%d: failed to map DCRs !\n", index); @@ -577,28 +577,28 @@ static int __devinit mal_probe(struct of_device *ofdev, goto fail; } - if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) { + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { #if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \ defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", - ofdev->node->full_name); + ofdev->dev.of_node->full_name); err = -ENODEV; goto fail; #endif } - mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); - mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); - mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); + mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); + mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { mal->txde_irq = mal->rxde_irq = mal->serr_irq; } else { - mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); - mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); + mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); + mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); } if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || @@ -629,7 +629,7 @@ static int __devinit mal_probe(struct of_device *ofdev, /* Current Axon is not happy with priority being non-0, it can * deadlock, fix it up here */ - if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); /* Apply configuration */ @@ -701,7 +701,7 @@ static int __devinit mal_probe(struct of_device *ofdev, printk(KERN_INFO "MAL v%d %s, %d TX channels, %d RX channels\n", - mal->version, ofdev->node->full_name, + mal->version, ofdev->dev.of_node->full_name, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ @@ -790,9 +790,11 @@ static struct of_device_id mal_platform_match[] = }; static struct of_platform_driver mal_of_driver = { - .name = "mcmal", - .match_table = mal_platform_match, - + .driver = { + .name = "mcmal", + .owner = THIS_MODULE, + .of_match_table = mal_platform_match, + }, .probe = mal_probe, .remove = mal_remove, }; diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index 5b90d34c8455..108919bcdf13 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -103,7 +103,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) /* Check if we need to attach to a RGMII */ if (input < 0 || !rgmii_valid_mode(mode)) { printk(KERN_ERR "%s: unsupported settings !\n", - ofdev->node->full_name); + ofdev->dev.of_node->full_name); return -ENODEV; } @@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); printk(KERN_NOTICE "%s: input %d in %s mode\n", - ofdev->node->full_name, input, rgmii_mode_name(mode)); + ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode)); ++dev->users; @@ -231,7 +231,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf) static int __devinit rgmii_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct rgmii_instance *dev; struct resource regs; int rc; @@ -264,11 +264,11 @@ static int __devinit rgmii_probe(struct of_device *ofdev, } /* Check for RGMII flags */ - if (of_get_property(ofdev->node, "has-mdio", NULL)) + if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL)) dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; /* CAB lacks the right properties, fix this up */ - if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon")) + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon")) dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", @@ -279,7 +279,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev, printk(KERN_INFO "RGMII %s initialized with%s MDIO support\n", - ofdev->node->full_name, + ofdev->dev.of_node->full_name, (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); @@ -319,9 +319,11 @@ static struct of_device_id rgmii_match[] = }; static struct of_platform_driver rgmii_driver = { - .name = "emac-rgmii", - .match_table = rgmii_match, - + .driver = { + .name = "emac-rgmii", + .owner = THIS_MODULE, + .of_match_table = rgmii_match, + }, .probe = rgmii_probe, .remove = rgmii_remove, }; diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c index 30173a9fb557..044637144c43 100644 --- a/drivers/net/ibm_newemac/tah.c +++ b/drivers/net/ibm_newemac/tah.c @@ -57,7 +57,8 @@ void tah_reset(struct of_device *ofdev) --n; if (unlikely(!n)) - printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name); + printk(KERN_ERR "%s: reset timeout\n", + ofdev->dev.of_node->full_name); /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ out_be32(&p->mr, @@ -89,7 +90,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf) static int __devinit tah_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct tah_instance *dev; struct resource regs; int rc; @@ -127,7 +128,7 @@ static int __devinit tah_probe(struct of_device *ofdev, tah_reset(ofdev); printk(KERN_INFO - "TAH %s initialized\n", ofdev->node->full_name); + "TAH %s initialized\n", ofdev->dev.of_node->full_name); wmb(); return 0; @@ -165,9 +166,11 @@ static struct of_device_id tah_match[] = }; static struct of_platform_driver tah_driver = { - .name = "emac-tah", - .match_table = tah_match, - + .driver = { + .name = "emac-tah", + .owner = THIS_MODULE, + .of_match_table = tah_match, + }, .probe = tah_probe, .remove = tah_remove, }; diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c index 1f038f808ab3..046dcd069c45 100644 --- a/drivers/net/ibm_newemac/zmii.c +++ b/drivers/net/ibm_newemac/zmii.c @@ -121,13 +121,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode) dev->mode = *mode; printk(KERN_NOTICE "%s: bridge in %s mode\n", - ofdev->node->full_name, zmii_mode_name(dev->mode)); + ofdev->dev.of_node->full_name, + zmii_mode_name(dev->mode)); } else { /* All inputs must use the same mode */ if (*mode != PHY_MODE_NA && *mode != dev->mode) { printk(KERN_ERR "%s: invalid mode %d specified for input %d\n", - ofdev->node->full_name, *mode, input); + ofdev->dev.of_node->full_name, *mode, input); mutex_unlock(&dev->lock); return -EINVAL; } @@ -233,7 +234,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf) static int __devinit zmii_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct zmii_instance *dev; struct resource regs; int rc; @@ -273,7 +274,7 @@ static int __devinit zmii_probe(struct of_device *ofdev, out_be32(&dev->base->fer, 0); printk(KERN_INFO - "ZMII %s initialized\n", ofdev->node->full_name); + "ZMII %s initialized\n", ofdev->dev.of_node->full_name); wmb(); dev_set_drvdata(&ofdev->dev, dev); @@ -312,9 +313,11 @@ static struct of_device_id zmii_match[] = }; static struct of_platform_driver zmii_driver = { - .name = "emac-zmii", - .match_table = zmii_match, - + .driver = { + .name = "emac-zmii", + .owner = THIS_MODULE, + .of_match_table = zmii_match, + }, .probe = zmii_probe, .remove = zmii_remove, }; diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index 911c082cee5a..f940dfa1f7f8 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -107,8 +107,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) case 57600: case 115200: - quot = (port->clk + (8 * speed)) / (16 * speed)\ - - ANOMALY_05000230; + /* + * IRDA is not affected by anomaly 05000230, so there is no + * need to tweak the divisor like he UART driver (which will + * slightly speed up the baud rate on us). + */ + quot = (port->clk + (8 * speed)) / (16 * speed); do { udelay(utime); diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index d0ea3d6dea95..ffae480587ae 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -360,6 +360,7 @@ struct ixgbe_adapter { u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) /* default to trying for four seconds */ #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) @@ -407,6 +408,8 @@ struct ixgbe_adapter { u16 eeprom_version; int node; + struct work_struct check_overtemp_task; + u32 interrupt_event; /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index f2b7ff44215b..9c02d6014cc4 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -1236,6 +1236,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, + .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82598_info = { diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index e9706eb8e4ff..a4e2901f2f08 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -2395,6 +2395,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .write_i2c_byte = &ixgbe_write_i2c_byte_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82599_info = { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9551cbb7bf01..d571d101de08 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -108,6 +108,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), + board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, @@ -1618,6 +1620,48 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) } } +/** + * ixgbe_check_overtemp_task - worker thread to check over tempurature + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_check_overtemp_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + check_overtemp_task); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: { + u32 autoneg; + bool link_up = false; + + if (hw->mac.ops.check_link) + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || + (eicr & IXGBE_EICR_LSC)) + /* Check if this is due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) + break; + } + return; + default: + if (!(eicr & IXGBE_EICR_GPI_SDP0)) + return; + break; + } + DPRINTK(DRV, ERR, "Network adapter has been stopped because it " + "has over heated. Restart the computer. If the problem " + "persists, power off the system and replace the " + "adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); + } +} + static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; @@ -1689,6 +1733,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) if (hw->mac.type == ixgbe_mac_82599EB) { ixgbe_check_sfp_event(adapter, eicr); + adapter->interrupt_event = eicr; + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) + schedule_work(&adapter->check_overtemp_task); /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { @@ -2190,6 +2238,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) u32 mask; mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP0; if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) mask |= IXGBE_EIMS_GPI_SDP1; if (adapter->hw.mac.type == ixgbe_mac_82599EB) { @@ -2250,6 +2300,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_sfp_event(adapter, eicr); ixgbe_check_fan_failure(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) + schedule_work(&adapter->check_overtemp_task); if (napi_schedule_prep(&(q_vector->napi))) { adapter->tx_ring[0]->total_packets = 0; @@ -3265,6 +3318,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } + /* Enable Thermal over heat sensor interrupt */ + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + } + /* Enable fan failure interrupt if media type is copper */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); @@ -3666,6 +3726,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) cancel_work_sync(&adapter->fdir_reinit_task); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + cancel_work_sync(&adapter->check_overtemp_task); + /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i]->reg_idx; @@ -4645,6 +4708,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; if (dev->features & NETIF_F_NTUPLE) { /* Flow Director perfect filter enabled */ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; @@ -6561,7 +6626,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; if (err == IXGBE_ERR_SFP_NOT_PRESENT && hw->mac.type == ixgbe_mac_82598EB) { /* @@ -6730,6 +6797,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 22d21af14783..09e1911ff510 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -135,6 +135,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) **/ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + return 0; + /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY @@ -1345,3 +1350,28 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, return status; } +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occured. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 phy_data = 0; + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + MDIO_MMD_PMAPMD, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; +out: + return status; +} diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index c9c545941407..ef4ba834c593 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -80,6 +80,8 @@ #define IXGBE_I2C_T_SU_STO 4 #define IXGBE_I2C_T_BUF 5 +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); @@ -106,6 +108,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 39b9be897439..2eb6e151016c 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -51,6 +51,7 @@ #define IXGBE_DEV_ID_82599_KX4 0x10F7 #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 #define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 @@ -2470,6 +2471,7 @@ struct ixgbe_phy_operations { s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*check_overtemp)(struct ixgbe_hw *); }; struct ixgbe_eeprom_info { @@ -2518,6 +2520,7 @@ struct ixgbe_phy_info { enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; + bool reset_if_overtemp; }; #include "ixgbe_mbx.h" @@ -2605,6 +2608,7 @@ struct ixgbe_info { #define IXGBE_ERR_FDIR_REINIT_FAILED -23 #define IXGBE_ERR_EEPROM_VERSION -24 #define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h index c03358434acb..522abe2ff25a 100644 --- a/drivers/net/ll_temac.h +++ b/drivers/net/ll_temac.h @@ -295,6 +295,10 @@ This option defaults to enabled (set) */ #define MULTICAST_CAM_TABLE_NUM 4 +/* TEMAC Synthesis features */ +#define TEMAC_FEATURE_RX_CSUM (1 << 0) +#define TEMAC_FEATURE_TX_CSUM (1 << 1) + /* TX/RX CURDESC_PTR points to first descriptor */ /* TX/RX TAILDESC_PTR points to last descriptor in linked list */ @@ -353,6 +357,7 @@ struct temac_local { struct mutex indirect_mutex; u32 options; /* Current options word */ int last_link; + unsigned int temac_features; /* Buffer descriptors */ struct cdmac_bd *tx_bd_v; diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index b59b24d667f0..52dcc8495647 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev) CHNL_CTRL_IRQ_COAL_EN); /* 0x10220483 */ /* 0x00100483 */ - lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | + lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN | @@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev) if (cur_p->app4) dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; ndev->stats.tx_packets++; ndev->stats.tx_bytes += cur_p->len; @@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev) netif_wake_queue(ndev); } +static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) +{ + struct cdmac_bd *cur_p; + int tail; + + tail = lp->tx_bd_tail; + cur_p = &lp->tx_bd_v[tail]; + + do { + if (cur_p->app0) + return NETDEV_TX_BUSY; + + tail++; + if (tail >= TX_BD_NUM) + tail = 0; + + cur_p = &lp->tx_bd_v[tail]; + num_frag--; + } while (num_frag >= 0); + + return 0; +} + static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); @@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { + if (temac_check_tx_bd_space(lp, num_frag)) { if (!netif_queue_stopped(ndev)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; @@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = ip_hdr(skb); - int length = 0, start = 0, insert = 0; - - switch (ip->protocol) { - case IPPROTO_TCP: - start = sizeof(struct iphdr) + ETH_HLEN; - insert = sizeof(struct iphdr) + ETH_HLEN + 16; - length = ip->tot_len - sizeof(struct iphdr); - break; - case IPPROTO_UDP: - start = sizeof(struct iphdr) + ETH_HLEN; - insert = sizeof(struct iphdr) + ETH_HLEN + 6; - length = ip->tot_len - sizeof(struct iphdr); - break; - default: - break; - } - cur_p->app1 = ((start << 16) | insert); - cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr, - length, ip->protocol, 0); - skb->data[insert] = 0; - skb->data[insert + 1] = 0; + unsigned int csum_start_off = skb_transport_offset(skb); + unsigned int csum_index_off = csum_start_off + skb->csum_offset; + + cur_p->app0 |= 1; /* TX Checksum Enabled */ + cur_p->app1 = (csum_start_off << 16) | csum_index_off; + cur_p->app2 = 0; /* initial checksum seed */ } + cur_p->app0 |= STS_CTRL_APP0_SOP; cur_p->len = skb_headlen(skb); cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, @@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev) skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; + /* if we're doing rx csum offload, set it up */ + if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && + (skb->protocol == __constant_htons(ETH_P_IP)) && + (skb->len > 64)) { + + skb->csum = cur_p->app3 & 0xFFFF; + skb->ip_summed = CHECKSUM_COMPLETE; + } + netif_rx(skb); ndev->stats.rx_packets++; @@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) struct temac_local *lp; struct net_device *ndev; const void *addr; + __be32 *p; int size, rc = 0; /* Init network device structure */ @@ -920,14 +942,26 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) mutex_init(&lp->indirect_mutex); /* map device registers */ - lp->regs = of_iomap(op->node, 0); + lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map temac regs.\n"); goto nodev; } + /* Setup checksum offload, but default to off if not specified */ + lp->temac_features = 0; + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); + if (p && be32_to_cpu(*p)) { + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_IP_CSUM; + } + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); + if (p && be32_to_cpu(*p)) + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ - np = of_parse_phandle(op->node, "llink-connected", 0); + np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); goto nodev; @@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) lp->rx_irq = irq_of_parse_and_map(np, 0); lp->tx_irq = irq_of_parse_and_map(np, 1); - if (!lp->rx_irq || !lp->tx_irq) { + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { dev_err(&op->dev, "could not determine irqs\n"); rc = -ENOMEM; goto nodev; @@ -959,7 +993,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) of_node_put(np); /* Finished with the DMA node; drop the reference */ /* Retrieve the MAC address */ - addr = of_get_property(op->node, "local-mac-address", &size); + addr = of_get_property(op->dev.of_node, "local-mac-address", &size); if ((!addr) || (size != 6)) { dev_err(&op->dev, "could not find MAC address\n"); rc = -ENODEV; @@ -967,11 +1001,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) } temac_set_mac_address(ndev, (void *)addr); - rc = temac_mdio_setup(lp, op->node); + rc = temac_mdio_setup(lp, op->dev.of_node); if (rc) dev_warn(&op->dev, "error registering MDIO bus\n"); - lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0); + lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); if (lp->phy_node) dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); @@ -1024,12 +1058,12 @@ static struct of_device_id temac_of_match[] __devinitdata = { MODULE_DEVICE_TABLE(of, temac_of_match); static struct of_platform_driver temac_of_driver = { - .match_table = temac_of_match, .probe = temac_of_probe, .remove = __devexit_p(temac_of_remove), .driver = { .owner = THIS_MODULE, .name = "xilinx_temac", + .of_match_table = temac_of_match, }, }; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 4e238afab4a3..87e8d4cb4057 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -634,11 +634,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err < 0) - return err; + goto destroy_port; list_add_tail(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); + return 0; + +destroy_port: + if (list_empty(&port->vlans)) + macvlan_port_destroy(lowerdev); + + return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 57288ca1395f..b07e4dee80aa 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c @@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], cur_order, gfp_mask); - if (!ret) { - ++chunk->npages; - - if (coherent) - ++chunk->nsg; - else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { - chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, - chunk->npages, - PCI_DMA_BIDIRECTIONAL); + if (ret) { + if (--cur_order < 0) + goto fail; + else + continue; + } - if (chunk->nsg <= 0) - goto fail; + ++chunk->npages; - chunk = NULL; - } + if (coherent) + ++chunk->nsg; + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { + chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); - npages -= 1 << cur_order; - } else { - --cur_order; - if (cur_order < 0) + if (chunk->nsg <= 0) goto fail; } + + if (chunk->npages == MLX4_ICM_CHUNK_LEN) + chunk = NULL; + + npages -= 1 << cur_order; } if (!coherent && chunk) { diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 3898108f98ce..1a57c3da1f49 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -928,7 +928,7 @@ static const struct net_device_ops myri_ops = { static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; static unsigned version_printed; struct net_device *dev; struct myri_eth *mp; @@ -1161,8 +1161,11 @@ static const struct of_device_id myri_sbus_match[] = { MODULE_DEVICE_TABLE(of, myri_sbus_match); static struct of_platform_driver myri_sbus_driver = { - .name = "myri", - .match_table = myri_sbus_match, + .driver = { + .name = "myri", + .owner = THIS_MODULE, + .of_match_table = myri_sbus_match, + }, .probe = myri_sbus_probe, .remove = __devexit_p(myri_sbus_remove), }; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 30abb4e436f1..63e8e3893bd6 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -9115,7 +9115,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) const u32 *int_prop; int i; - int_prop = of_get_property(op->node, "interrupts", NULL); + int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); if (!int_prop) return -ENODEV; @@ -9266,7 +9266,7 @@ static int __devinit niu_get_of_props(struct niu *np) int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) - dp = np->op->node; + dp = np->op->dev.of_node; else dp = pci_device_to_OF_node(np->pdev); @@ -10083,10 +10083,10 @@ static int __devinit niu_of_probe(struct of_device *op, niu_driver_version(); - reg = of_get_property(op->node, "reg", NULL); + reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { dev_err(&op->dev, "%s: No 'reg' property, aborting\n", - op->node->full_name); + op->dev.of_node->full_name); return -ENODEV; } @@ -10099,7 +10099,7 @@ static int __devinit niu_of_probe(struct of_device *op, np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); - parent_id.of = of_get_parent(op->node); + parent_id.of = of_get_parent(op->dev.of_node); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_NIU); @@ -10234,8 +10234,11 @@ static const struct of_device_id niu_match[] = { MODULE_DEVICE_TABLE(of, niu_match); static struct of_platform_driver niu_of_driver = { - .name = "niu", - .match_table = niu_match, + .driver = { + .name = "niu", + .owner = THIS_MODULE, + .of_match_table = niu_match, + }, .probe = niu_of_probe, .remove = __devexit_p(niu_of_remove), }; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 35897134a5dd..fc5fef2a8175 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -199,12 +199,12 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, if (!pdata) return -ENOMEM; - ret = of_get_gpio(ofdev->node, 0); + ret = of_get_gpio(ofdev->dev.of_node, 0); if (ret < 0) goto out_free; pdata->mdc = ret; - ret = of_get_gpio(ofdev->node, 1); + ret = of_get_gpio(ofdev->dev.of_node, 1); if (ret < 0) goto out_free; pdata->mdio = ret; @@ -213,7 +213,7 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev, if (!new_bus) goto out_free; - ret = of_mdiobus_register(new_bus, ofdev->node); + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) mdio_gpio_bus_deinit(&ofdev->dev); @@ -241,8 +241,11 @@ static struct of_device_id mdio_ofgpio_match[] = { MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); static struct of_platform_driver mdio_ofgpio_driver = { - .name = "mdio-gpio", - .match_table = mdio_ofgpio_match, + .driver = { + .name = "mdio-gpio", + .owner = THIS_MODULE, + .of_match_table = mdio_ofgpio_match, + }, .probe = mdio_ofgpio_probe, .remove = __devexit_p(mdio_ofgpio_remove), }; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 5441688daba7..c5f8eb102bf7 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2926,5 +2926,5 @@ EXPORT_SYMBOL(ppp_output_wakeup); EXPORT_SYMBOL(ppp_register_compressor); EXPORT_SYMBOL(ppp_unregister_compressor); MODULE_LICENSE("GPL"); -MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); -MODULE_ALIAS("/dev/ppp"); +MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); +MODULE_ALIAS("devname:ppp"); diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index b1b93ff2351f..805b64d1e893 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -289,6 +289,7 @@ static void pppoe_flush_dev(struct net_device *dev) struct pppoe_net *pn; int i; + pn = pppoe_pernet(dev_net(dev)); write_lock_bh(&pn->hash_lock); for (i = 0; i < PPPOE_HASH_SIZE; i++) { struct pppox_sock *po = pn->hash_table[i]; diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 586ed0915a29..501a55ffce57 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -1294,6 +1294,9 @@ static int sh_mdio_release(struct net_device *ndev) /* remove mdio bus info from net_device */ dev_set_drvdata(&ndev->dev, NULL); + /* free interrupts memory */ + kfree(bus->irq); + /* free bitbang info */ free_mdio_bitbang(bus); diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index 4591fe9bf0b9..367e96f317d4 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -1131,8 +1131,8 @@ static int __devinit bigmac_ether_init(struct of_device *op, goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ - bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); - bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); + bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); + bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) @@ -1184,7 +1184,7 @@ static int __devinit bigmac_ether_init(struct of_device *op, } /* Get the board revision of this BigMAC. */ - bp->board_rev = of_getintprop_default(bp->bigmac_op->node, + bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, "board-version", 1); /* Init auto-negotiation timer state. */ @@ -1290,8 +1290,11 @@ static const struct of_device_id bigmac_sbus_match[] = { MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct of_platform_driver bigmac_sbus_driver = { - .name = "sunbmac", - .match_table = bigmac_sbus_match, + .driver = { + .name = "sunbmac", + .owner = THIS_MODULE, + .of_match_table = bigmac_sbus_match, + }, .probe = bigmac_sbus_probe, .remove = __devexit_p(bigmac_sbus_remove), }; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 915c5909c7a8..3d9650b8d38f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2481,7 +2481,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info else { const struct linux_prom_registers *regs; struct of_device *op = hp->happy_dev; - regs = of_get_property(op->node, "regs", NULL); + regs = of_get_property(op->dev.of_node, "regs", NULL); if (regs) sprintf(info->bus_info, "SBUS:%d", regs->which_io); @@ -2641,14 +2641,14 @@ static const struct net_device_ops hme_netdev_ops = { #ifdef CONFIG_SBUS static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) { - struct device_node *dp = op->node, *sbus_dp; + struct device_node *dp = op->dev.of_node, *sbus_dp; struct quattro *qp = NULL; struct happy_meal *hp; struct net_device *dev; int i, qfe_slot = -1; int err = -ENODEV; - sbus_dp = to_of_device(op->dev.parent)->node; + sbus_dp = to_of_device(op->dev.parent)->dev.of_node; /* We can match PCI devices too, do not accept those here. */ if (strcmp(sbus_dp->name, "sbus")) @@ -3237,7 +3237,7 @@ static void happy_meal_pci_exit(void) #ifdef CONFIG_SBUS static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe = (match->data != NULL); @@ -3291,8 +3291,11 @@ static const struct of_device_id hme_sbus_match[] = { MODULE_DEVICE_TABLE(of, hme_sbus_match); static struct of_platform_driver hme_sbus_driver = { - .name = "hme", - .match_table = hme_sbus_match, + .driver = { + .name = "hme", + .owner = THIS_MODULE, + .of_match_table = hme_sbus_match, + }, .probe = hme_sbus_probe, .remove = __devexit_p(hme_sbus_remove), }; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 386af7bbe678..7d9c33dd9d1a 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1323,7 +1323,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op, struct of_device *ledma, struct of_device *lebuffer) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; static unsigned version_printed; struct lance_private *lp; struct net_device *dev; @@ -1410,7 +1410,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op, lp->burst_sizes = 0; if (lp->ledma) { - struct device_node *ledma_dp = ledma->node; + struct device_node *ledma_dp = ledma->dev.of_node; struct device_node *sbus_dp; unsigned int sbmask; const char *prop; @@ -1506,7 +1506,7 @@ fail: static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct of_device *parent = to_of_device(op->dev.parent); - struct device_node *parent_dp = parent->node; + struct device_node *parent_dp = parent->dev.of_node; int err; if (!strcmp(parent_dp->name, "ledma")) { @@ -1545,8 +1545,11 @@ static const struct of_device_id sunlance_sbus_match[] = { MODULE_DEVICE_TABLE(of, sunlance_sbus_match); static struct of_platform_driver sunlance_sbus_driver = { - .name = "sunlance", - .match_table = sunlance_sbus_match, + .driver = { + .name = "sunlance", + .owner = THIS_MODULE, + .of_match_table = sunlance_sbus_match, + }, .probe = sunlance_sbus_probe, .remove = __devexit_p(sunlance_sbus_remove), }; diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index a7542d25c845..72b579c8d812 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -695,7 +695,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->version, "3.0"); op = qep->op; - regs = of_get_property(op->node, "reg", NULL); + regs = of_get_property(op->dev.of_node, "reg", NULL); if (regs) sprintf(info->bus_info, "SBUS:%d", regs->which_io); @@ -799,7 +799,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child) if (qec_global_reset(qecp->gregs)) goto fail; - qecp->qec_bursts = qec_get_burst(op->node); + qecp->qec_bursts = qec_get_burst(op->dev.of_node); qec_init_once(qecp, op); @@ -857,7 +857,7 @@ static int __devinit qec_ether_init(struct of_device *op) res = -ENODEV; - i = of_getintprop_default(op->node, "channel#", -1); + i = of_getintprop_default(op->dev.of_node, "channel#", -1); if (i == -1) goto fail; qe->channel = i; @@ -977,8 +977,11 @@ static const struct of_device_id qec_sbus_match[] = { MODULE_DEVICE_TABLE(of, qec_sbus_match); static struct of_platform_driver qec_sbus_driver = { - .name = "qec", - .match_table = qec_sbus_match, + .driver = { + .name = "qec", + .owner = THIS_MODULE, + .of_match_table = qec_sbus_match, + }, .probe = qec_sbus_probe, .remove = __devexit_p(qec_sbus_remove), }; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 97b25533e5fb..6ad6fe706312 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -526,6 +526,8 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, struct sk_buff *skb; int err; + sock_update_classid(sk); + /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; @@ -1649,3 +1651,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); +MODULE_ALIAS("devname:net/tun"); diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 932602db54b3..4a34833b85dd 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -3719,7 +3719,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = { static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) { struct device *device = &ofdev->dev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct net_device *dev = NULL; struct ucc_geth_private *ugeth = NULL; struct ucc_geth_info *ug_info; @@ -3963,8 +3963,11 @@ static struct of_device_id ucc_geth_match[] = { MODULE_DEVICE_TABLE(of, ucc_geth_match); static struct of_platform_driver ucc_geth_driver = { - .name = DRV_NAME, - .match_table = ucc_geth_match, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = ucc_geth_match, + }, .probe = ucc_geth_probe, .remove = ucc_geth_remove, .suspend = ucc_geth_suspend, diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 31b73310ec77..1f802e90474c 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -322,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (u16) (header & 0x0000ffff); if ((skb->len) - ((size + 1) & 0xfffe) == 0) { - u8 alignment = (u32)skb->data & 0x3; + u8 alignment = (unsigned long)skb->data & 0x3; if (alignment != 0x2) { /* * not 16bit aligned so use the room provided by @@ -351,7 +351,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { - u8 alignment = (u32)packet & 0x3; + u8 alignment = (unsigned long)packet & 0x3; ax_skb->len = size; if (alignment != 0x2) { diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9964df199511..0a3c41faea9c 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -475,6 +475,9 @@ static const struct usb_device_id hso_ids[] = { {USB_DEVICE(0x0af0, 0x8302)}, {USB_DEVICE(0x0af0, 0x8304)}, {USB_DEVICE(0x0af0, 0x8400)}, + {USB_DEVICE(0x0af0, 0x8600)}, + {USB_DEVICE(0x0af0, 0x8800)}, + {USB_DEVICE(0x0af0, 0x8900)}, {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, {USB_DEVICE(0x0af0, 0xd155)}, diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 6537593fae66..8cc9e319f435 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1027,12 +1027,12 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; spin_lock_irqsave(&i2400m->rx_lock, flags); - roq = &i2400m->rx_roq[ro_cin]; - if (roq == NULL) { + if (i2400m->rx_roq == NULL) { kfree_skb(skb); /* rx_roq is already destroyed */ spin_unlock_irqrestore(&i2400m->rx_lock, flags); goto error; } + roq = &i2400m->rx_roq[ro_cin]; kref_get(&i2400m->rx_roq_refcount); spin_unlock_irqrestore(&i2400m->rx_lock, flags); diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a441aad922c2..3b7ab20a5c54 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -5162,13 +5162,6 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file) enable_MAC(ai, 1); } -static inline u8 hexVal(char c) { - if (c>='0' && c<='9') return c -= '0'; - if (c>='a' && c<='f') return c -= 'a'-10; - if (c>='A' && c<='F') return c -= 'A'-10; - return 0; -} - static void proc_APList_on_close( struct inode *inode, struct file *file ) { struct proc_data *data = (struct proc_data *)file->private_data; struct proc_dir_entry *dp = PDE(inode); @@ -5188,11 +5181,11 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) { switch(j%3) { case 0: APList_rid.ap[i][j/3]= - hexVal(data->wbuffer[j+i*6*3])<<4; + hex_to_bin(data->wbuffer[j+i*6*3])<<4; break; case 1: APList_rid.ap[i][j/3]|= - hexVal(data->wbuffer[j+i*6*3]); + hex_to_bin(data->wbuffer[j+i*6*3]); break; } } @@ -5340,10 +5333,10 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) { switch(i%3) { case 0: - key[i/3] = hexVal(data->wbuffer[i+j])<<4; + key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; break; case 1: - key[i/3] |= hexVal(data->wbuffer[i+j]); + key[i/3] |= hex_to_bin(data->wbuffer[i+j]); break; } } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 5f04cf38a5bc..cc6d41dec332 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1214,6 +1214,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) struct ath5k_hw *ah = sc->ah; struct sk_buff *skb = bf->skb; struct ath5k_desc *ds; + int ret; if (!skb) { skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); @@ -1240,9 +1241,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; - ah->ah_setup_rx_desc(ah, ds, - skb_tailroom(skb), /* buffer size */ - 0); + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); + if (ret) + return ret; if (sc->rxlink != NULL) *sc->rxlink = bf->daddr; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index c8a4558f79ba..f43d85a302c4 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -76,22 +76,13 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, ds = bf->bf_desc; flags = ATH9K_TXDESC_NOACK; - if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || - (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && - (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { - ds->ds_link = bf->bf_daddr; /* self-linked */ - flags |= ATH9K_TXDESC_VEOL; - /* Let hardware handle antenna switching. */ - antenna = 0; - } else { - ds->ds_link = 0; - /* - * Switch antenna every beacon. - * Should only switch every beacon period, not for every SWBA - * XXX assumes two antennae - */ - antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); - } + ds->ds_link = 0; + /* + * Switch antenna every beacon. + * Should only switch every beacon period, not for every SWBA + * XXX assumes two antennae + */ + antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); sband = &sc->sbands[common->hw->conf.channel->band]; rate = sband->bitrates[rateidx].hw_value; @@ -215,36 +206,6 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, return bf; } -/* - * Startup beacon transmission for adhoc mode when they are sent entirely - * by the hardware using the self-linked descriptor + veol trick. -*/ -static void ath_beacon_start_adhoc(struct ath_softc *sc, - struct ieee80211_vif *vif) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath_buf *bf; - struct ath_vif *avp; - struct sk_buff *skb; - - avp = (void *)vif->drv_priv; - - if (avp->av_bcbuf == NULL) - return; - - bf = avp->av_bcbuf; - skb = bf->bf_mpdu; - - ath_beacon_setup(sc, avp, bf, 0); - - /* NB: caller is known to have already stopped tx dma */ - ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); - ath9k_hw_txstart(ah, sc->beacon.beaconq); - ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", - sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); -} - int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) { struct ath_softc *sc = aphy->sc; @@ -265,7 +226,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) list_del(&avp->av_bcbuf->list); if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || - !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { + sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC || + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { int slot; /* * Assign the vif to a beacon xmit slot. As @@ -274,17 +236,11 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) avp->av_bslot = 0; for (slot = 0; slot < ATH_BCBUF; slot++) if (sc->beacon.bslot[slot] == NULL) { - /* - * XXX hack, space out slots to better - * deal with misses - */ - if (slot+1 < ATH_BCBUF && - sc->beacon.bslot[slot+1] == NULL) { - avp->av_bslot = slot+1; - break; - } avp->av_bslot = slot; + /* NB: keep looking for a double slot */ + if (slot == 0 || !sc->beacon.bslot[slot-1]) + break; } BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); sc->beacon.bslot[avp->av_bslot] = vif; @@ -721,8 +677,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, * self-linked tx descriptor and let the hardware deal with things. */ intval |= ATH9K_BEACON_ENA; - if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) - ah->imask |= ATH9K_INT_SWBA; + ah->imask |= ATH9K_INT_SWBA; ath_beaconq_config(sc); @@ -732,10 +687,6 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, ath9k_beacon_init(sc, nexttbtt, intval); sc->beacon.bmisscnt = 0; ath9k_hw_set_interrupts(ah, ah->imask); - - /* FIXME: Handle properly when vif is NULL */ - if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) - ath_beacon_start_adhoc(sc, vif); } void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 46dc41a16faa..77b359162d6c 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -107,12 +107,14 @@ static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev, static void hif_usb_tx_cb(struct urb *urb) { struct tx_buf *tx_buf = (struct tx_buf *) urb->context; - struct hif_device_usb *hif_dev = tx_buf->hif_dev; + struct hif_device_usb *hif_dev; struct sk_buff *skb; - if (!hif_dev || !tx_buf) + if (!tx_buf || !tx_buf->hif_dev) return; + hif_dev = tx_buf->hif_dev; + switch (urb->status) { case 0: break; @@ -607,6 +609,10 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) return 0; err: + if (tx_buf) { + kfree(tx_buf->buf); + kfree(tx_buf); + } ath9k_hif_usb_dealloc_tx_urbs(hif_dev); return -ENOMEM; } diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index ad556aa8da39..c251603ab032 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -23,6 +23,7 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/leds.h> +#include <linux/slab.h> #include <net/mac80211.h> #include "common.h" diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 893b552981a0..abfa0493236f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -752,7 +752,6 @@ static int ath_key_config(struct ath_common *common, struct ath_hw *ah = common->ah; struct ath9k_keyval hk; const u8 *mac = NULL; - u8 gmac[ETH_ALEN]; int ret = 0; int idx; @@ -776,30 +775,9 @@ static int ath_key_config(struct ath_common *common, memcpy(hk.kv_val, key->key, key->keylen); if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { - - if (key->ap_addr) { - /* - * Group keys on hardware that supports multicast frame - * key search use a mac that is the sender's address with - * the high bit set instead of the app-specified address. - */ - memcpy(gmac, key->ap_addr, ETH_ALEN); - gmac[0] |= 0x80; - mac = gmac; - - if (key->alg == ALG_TKIP) - idx = ath_reserve_key_cache_slot_tkip(common); - else - idx = ath_reserve_key_cache_slot(common); - if (idx < 0) - mac = NULL; /* no free key cache entries */ - } - - if (!mac) { - /* For now, use the default keys for broadcast keys. This may - * need to change with virtual interfaces. */ - idx = key->keyidx; - } + /* For now, use the default keys for broadcast keys. This may + * need to change with virtual interfaces. */ + idx = key->keyidx; } else if (key->keyidx) { if (WARN_ON(!sta)) return -EOPNOTSUPP; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 257b10ba6f57..1ec836cf1c0d 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -28,7 +28,6 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ - { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ { 0 } }; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index ba139132c85f..ca6065b71b46 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -19,6 +19,12 @@ #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) +static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) +{ + return sc->ps_enabled && + (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); +} + static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, struct ieee80211_hdr *hdr) { @@ -616,8 +622,8 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) hdr = (struct ieee80211_hdr *)skb->data; /* Process Beacon and CAB receive in PS state */ - if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && - ieee80211_is_beacon(hdr->frame_control)) + if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) + && ieee80211_is_beacon(hdr->frame_control)) ath_rx_ps_beacon(sc, skb); else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && (ieee80211_is_data(hdr->frame_control) || @@ -932,9 +938,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) sc->rx.rxotherant = 0; } - if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | - PS_WAIT_FOR_CAB | - PS_WAIT_FOR_PSPOLL_DATA))) + if (unlikely(ath9k_check_auto_sleep(sc) || + (sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA)))) ath_rx_ps(sc, skb); ath_rx_send_to_mac80211(hw, sc, skb, rxs); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c index a273e373b7b0..c92b2c0cbd91 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/sched.h> +#include <linux/gfp.h> #include <net/mac80211.h> #include "iwl-dev.h" diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 107e173112f6..5d3f51ff2f0d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -376,6 +376,11 @@ void iwl_bg_start_internal_scan(struct work_struct *work) mutex_lock(&priv->mutex); + if (priv->is_internal_short_scan == true) { + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); + goto unlock; + } + if (!iwl_is_ready_rf(priv)) { IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); goto unlock; @@ -497,17 +502,27 @@ void iwl_bg_scan_completed(struct work_struct *work) { struct iwl_priv *priv = container_of(work, struct iwl_priv, scan_completed); + bool internal = false; IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); cancel_delayed_work(&priv->scan_check); - if (!priv->is_internal_short_scan) - ieee80211_scan_completed(priv->hw, false); - else { + mutex_lock(&priv->mutex); + if (priv->is_internal_short_scan) { priv->is_internal_short_scan = false; IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); + internal = true; } + mutex_unlock(&priv->mutex); + + /* + * Do not hold mutex here since this will cause mac80211 to call + * into driver again into functions that will attempt to take + * mutex. + */ + if (!internal) + ieee80211_scan_completed(priv->hw, false); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 85ed235ac901..83a26361a9b5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -431,7 +431,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs, struct iwl_link_quality_cmd *link_cmd; unsigned long flags; - if (*sta_id_r) + if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 2d2890878dea..4bd61ee627c0 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2572,14 +2572,18 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) { - union iwreq_data evt; + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); - netif_carrier_off(usbdev->net); + if (priv->connected) { + priv->connected = false; + memset(priv->bssid, 0, ETH_ALEN); + + deauthenticate(usbdev); - evt.data.flags = 0; - evt.data.length = 0; - memset(evt.ap_addr.sa_data, 0, ETH_ALEN); - wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); + cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL); + } + + netif_carrier_off(usbdev->net); } static void rndis_wlan_worker(struct work_struct *work) diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 4ba7b038928f..ad2c98af7e9d 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -926,7 +926,7 @@ static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - u32 reg; + u32 reg, reg2; unsigned int i; char put_to_sleep; char bbp_state; @@ -947,11 +947,12 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®2); + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); msleep(10); } diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 89d132d4af12..41da3d218c65 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1084,7 +1084,7 @@ static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - u32 reg; + u32 reg, reg2; unsigned int i; char put_to_sleep; char bbp_state; @@ -1105,11 +1105,12 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); - bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); - rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®2); + bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); msleep(10); } diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 0f8b84b7224c..699161327d65 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -413,7 +413,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, */ rt2x00_desc_read(txi, 0, &word); rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, - skb->len + TXWI_DESC_SIZE); + skb->len - TXINFO_DESC_SIZE); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a016f7ccde29..f71eee67f977 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -206,7 +206,7 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) /* * Free irq line. */ - free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev); + free_irq(rt2x00dev->irq, rt2x00dev); /* * Free DMA diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 2e3076f67535..6a74baf4e934 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -1689,7 +1689,7 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - u32 reg; + u32 reg, reg2; unsigned int i; char put_to_sleep; @@ -1706,10 +1706,11 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®2); + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); if (state == !put_to_sleep) return 0; + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); msleep(10); } diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index e35bd19c3c5a..6e0d82efe924 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -1366,7 +1366,7 @@ static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - u32 reg; + u32 reg, reg2; unsigned int i; char put_to_sleep; @@ -1383,10 +1383,11 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®); - state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®2); + state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); if (state == !put_to_sleep) return 0; + rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); msleep(10); } diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c index 57f4bfd959c8..b98fb643fab0 100644 --- a/drivers/net/wireless/wl12xx/wl1271_rx.c +++ b/drivers/net/wireless/wl12xx/wl1271_rx.c @@ -113,6 +113,8 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, beacon ? "beacon" : ""); + skb_trim(skb, skb->len - desc->pad_len); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); ieee80211_rx_ni(wl->hw, skb); } diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c index a7db68d37ee9..d04c5b262050 100644 --- a/drivers/net/xilinx_emaclite.c +++ b/drivers/net/xilinx_emaclite.c @@ -1088,7 +1088,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev) */ static bool get_bool(struct of_device *ofdev, const char *s) { - u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL); + u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); if (p) { return (bool)*p; @@ -1130,14 +1130,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev, dev_info(dev, "Device Tree Probing\n"); /* Get iospace for the device */ - rc = of_address_to_resource(ofdev->node, 0, &r_mem); + rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); if (rc) { dev_err(dev, "invalid address\n"); return rc; } /* Get IRQ for the device */ - rc = of_irq_to_resource(ofdev->node, 0, &r_irq); + rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); if (rc == NO_IRQ) { dev_err(dev, "no IRQ found\n"); return rc; @@ -1182,7 +1182,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev, lp->next_rx_buf_to_use = 0x0; lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); - mac_address = of_get_mac_address(ofdev->node); + mac_address = of_get_mac_address(ofdev->dev.of_node); if (mac_address) /* Set the MAC address. */ @@ -1197,7 +1197,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev, /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); - lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); + lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); rc = xemaclite_mdio_setup(lp, &ofdev->dev); if (rc) dev_warn(&ofdev->dev, "error registering MDIO bus\n"); @@ -1291,8 +1291,11 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = { MODULE_DEVICE_TABLE(of, xemaclite_of_match); static struct of_platform_driver xemaclite_of_driver = { - .name = DRIVER_NAME, - .match_table = xemaclite_of_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = xemaclite_of_match, + }, .probe = xemaclite_of_probe, .remove = __devexit_p(xemaclite_of_remove), }; diff --git a/drivers/of/device.c b/drivers/of/device.c index 224ae6bc67b6..7d18f8e0b013 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -10,8 +10,7 @@ #include <asm/errno.h> /** - * of_match_device - Tell if an of_device structure has a matching - * of_match structure + * of_match_device - Tell if a struct device matches an of_device_id list * @ids: array of of device match structures to search in * @dev: the of device structure to match against * @@ -19,11 +18,11 @@ * system is in its list of supported devices. */ const struct of_device_id *of_match_device(const struct of_device_id *matches, - const struct of_device *dev) + const struct device *dev) { - if (!dev->node) + if (!dev->of_node) return NULL; - return of_match_node(matches, dev->node); + return of_match_node(matches, dev->of_node); } EXPORT_SYMBOL(of_match_device); @@ -54,7 +53,7 @@ static ssize_t devspec_show(struct device *dev, struct of_device *ofdev; ofdev = to_of_device(dev); - return sprintf(buf, "%s\n", ofdev->node->full_name); + return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); } static ssize_t name_show(struct device *dev, @@ -63,7 +62,7 @@ static ssize_t name_show(struct device *dev, struct of_device *ofdev; ofdev = to_of_device(dev); - return sprintf(buf, "%s\n", ofdev->node->name); + return sprintf(buf, "%s\n", ofdev->dev.of_node->name); } static ssize_t modalias_show(struct device *dev, @@ -97,14 +96,14 @@ void of_release_dev(struct device *dev) struct of_device *ofdev; ofdev = to_of_device(dev); - of_node_put(ofdev->node); + of_node_put(ofdev->dev.of_node); kfree(ofdev); } EXPORT_SYMBOL(of_release_dev); int of_device_register(struct of_device *ofdev) { - BUG_ON(ofdev->node == NULL); + BUG_ON(ofdev->dev.of_node == NULL); device_initialize(&ofdev->dev); @@ -112,7 +111,7 @@ int of_device_register(struct of_device *ofdev) * the parent. If there is no parent defined, set the node * explicitly */ if (!ofdev->dev.parent) - set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->node)); + set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node)); return device_add(&ofdev->dev); } @@ -132,11 +131,11 @@ ssize_t of_device_get_modalias(struct of_device *ofdev, ssize_t tsize, csize, repend; /* Name & Type */ - csize = snprintf(str, len, "of:N%sT%s", - ofdev->node->name, ofdev->node->type); + csize = snprintf(str, len, "of:N%sT%s", ofdev->dev.of_node->name, + ofdev->dev.of_node->type); /* Get compatible property if any */ - compat = of_get_property(ofdev->node, "compatible", &cplen); + compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen); if (!compat) return csize; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index dee4fb56b094..b6987bba8556 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -556,6 +556,21 @@ void __init unflatten_device_tree(void) pr_debug(" -> unflatten_device_tree()\n"); + if (!initial_boot_params) { + pr_debug("No device tree pointer\n"); + return; + } + + pr_debug("Unflattening device tree:\n"); + pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic)); + pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize)); + pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version)); + + if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) { + pr_err("Invalid device tree blob header\n"); + return; + } + /* First pass, scan for size */ start = ((unsigned long)initial_boot_params) + be32_to_cpu(initial_boot_params->off_dt_struct); diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index a3a708e590d0..ab6522c8e4fe 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c @@ -42,7 +42,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap, info.addr = be32_to_cpup(addr); - dev_archdata_set_node(&dev_ad, node); + info.of_node = node; info.archdata = &dev_ad; request_module("%s", info.type); @@ -68,7 +68,7 @@ EXPORT_SYMBOL(of_register_i2c_devices); static int of_dev_node_match(struct device *dev, void *data) { - return dev_archdata_get_node(&dev->archdata) == data; + return dev->of_node == data; } /* must call put_device() when done with returned i2c_client device */ diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index b4748337223b..42a6715f8e84 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -79,7 +79,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) /* Associate the OF node with the device structure so it * can be looked up later */ of_node_get(child); - dev_archdata_set_node(&phy->dev.archdata, child); + phy->dev.of_node = child; /* All data is now stored in the phy struct; register it */ rc = phy_device_register(phy); @@ -100,7 +100,7 @@ EXPORT_SYMBOL(of_mdiobus_register); /* Helper function for of_phy_find_device */ static int of_phy_match(struct device *dev, void *phy_np) { - return dev_archdata_get_node(&dev->archdata) == phy_np; + return dev->of_node == phy_np; } /** @@ -166,7 +166,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, if (!dev->dev.parent) return NULL; - net_np = dev_archdata_get_node(&dev->dev.parent->archdata); + net_np = dev->dev.parent->of_node; if (!net_np) return NULL; diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c index f65f48b98448..5fed7e3c7da3 100644 --- a/drivers/of/of_spi.c +++ b/drivers/of/of_spi.c @@ -79,7 +79,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) /* Store a pointer to the node in the device structure */ of_node_get(nc); - spi->dev.archdata.of_node = nc; + spi->dev.of_node = nc; /* Register the new device */ request_module(spi->modalias); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index d58ade170c4b..7dacc1ebe91e 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -21,14 +21,12 @@ extern struct device_attribute of_platform_device_attrs[]; static int of_platform_bus_match(struct device *dev, struct device_driver *drv) { - struct of_device *of_dev = to_of_device(dev); - struct of_platform_driver *of_drv = to_of_platform_driver(drv); - const struct of_device_id *matches = of_drv->match_table; + const struct of_device_id *matches = drv->of_match_table; if (!matches) return 0; - return of_match_device(matches, of_dev) != NULL; + return of_match_device(matches, dev) != NULL; } static int of_platform_device_probe(struct device *dev) @@ -46,7 +44,7 @@ static int of_platform_device_probe(struct device *dev) of_dev_get(of_dev); - match = of_match_device(drv->match_table, of_dev); + match = of_match_device(drv->driver.of_match_table, dev); if (match) error = drv->probe(of_dev, match); if (error) @@ -386,11 +384,6 @@ int of_bus_type_init(struct bus_type *bus, const char *name) int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) { - /* initialize common driver fields */ - if (!drv->driver.name) - drv->driver.name = drv->name; - if (!drv->driver.owner) - drv->driver.owner = drv->owner; drv->driver.bus = bus; /* register with core */ diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index 1586e1caa2f5..8bef6d60f88b 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -18,6 +18,8 @@ #include <linux/parport.h> #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> + #include <asm/setup.h> #include <asm/amigahw.h> #include <asm/irq.h> @@ -31,7 +33,6 @@ #define DPRINTK(x...) do { } while (0) #endif -static struct parport *this_port = NULL; static void amiga_write_data(struct parport *p, unsigned char data) { @@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = { /* ----------- Initialisation code --------------------------------- */ -static int __init parport_amiga_init(void) +static int __init amiga_parallel_probe(struct platform_device *pdev) { struct parport *p; int err; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL)) - return -ENODEV; - - err = -EBUSY; - if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel")) - goto out_mem; - ciaa.ddrb = 0xff; ciab.ddra &= 0xf8; mb(); @@ -246,41 +240,63 @@ static int __init parport_amiga_init(void) p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, PARPORT_DMA_NONE, &pp_amiga_ops); if (!p) - goto out_port; + return -EBUSY; - err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); + err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, + p); if (err) goto out_irq; - this_port = p; printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); /* XXX: set operating mode */ parport_announce_port(p); + platform_set_drvdata(pdev, p); + return 0; out_irq: parport_put_port(p); -out_port: - release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100); -out_mem: return err; } -static void __exit parport_amiga_exit(void) +static int __exit amiga_parallel_remove(struct platform_device *pdev) +{ + struct parport *port = platform_get_drvdata(pdev); + + parport_remove_port(port); + if (port->irq != PARPORT_IRQ_NONE) + free_irq(IRQ_AMIGA_CIAA_FLG, port); + parport_put_port(port); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static struct platform_driver amiga_parallel_driver = { + .remove = __exit_p(amiga_parallel_remove), + .driver = { + .name = "amiga-parallel", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_parallel_init(void) +{ + return platform_driver_probe(&amiga_parallel_driver, + amiga_parallel_probe); +} + +module_init(amiga_parallel_init); + +static void __exit amiga_parallel_exit(void) { - parport_remove_port(this_port); - if (this_port->irq != PARPORT_IRQ_NONE) - free_irq(IRQ_AMIGA_CIAA_FLG, this_port); - parport_put_port(this_port); - release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100); + platform_driver_unregister(&amiga_parallel_driver); } +module_exit(amiga_parallel_exit); MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); MODULE_LICENSE("GPL"); - -module_init(parport_amiga_init) -module_exit(parport_amiga_exit) +MODULE_ALIAS("platform:amiga-parallel"); diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index 065f229580d5..9a5b4b894161 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -382,8 +382,11 @@ static const struct of_device_id bpp_match[] = { MODULE_DEVICE_TABLE(of, bpp_match); static struct of_platform_driver bpp_sbus_driver = { - .name = "bpp", - .match_table = bpp_match, + .driver = { + .name = "bpp", + .owner = THIS_MODULE, + .of_match_table = bpp_match, + }, .probe = bpp_probe, .remove = __devexit_p(bpp_remove), }; diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 7aaae2d2bd67..80c11d131499 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) } #endif +#ifdef CONFIG_ACPI_APEI +extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); +#else +static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) +{ + if (pci_dev->__aer_firmware_first_valid) + return pci_dev->__aer_firmware_first; + return 0; +} +#endif + +static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, + int enable) +{ + pci_dev->__aer_firmware_first = !!enable; + pci_dev->__aer_firmware_first_valid = 1; +} #endif /* _AERDRV_H_ */ diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 04814087658d..f278d7b0d95d 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -16,6 +16,7 @@ #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/delay.h> +#include <acpi/apei.h> #include "aerdrv.h" /** @@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev) return 0; } + +#ifdef CONFIG_ACPI_APEI +static inline int hest_match_pci(struct acpi_hest_aer_common *p, + struct pci_dev *pci) +{ + return (0 == pci_domain_nr(pci->bus) && + p->bus == pci->bus->number && + p->device == PCI_SLOT(pci->devfn) && + p->function == PCI_FUNC(pci->devfn)); +} + +struct aer_hest_parse_info { + struct pci_dev *pci_dev; + int firmware_first; +}; + +static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + struct aer_hest_parse_info *info = data; + struct acpi_hest_aer_common *p; + u8 pcie_type = 0; + u8 bridge = 0; + int ff = 0; + + switch (hest_hdr->type) { + case ACPI_HEST_TYPE_AER_ROOT_PORT: + pcie_type = PCI_EXP_TYPE_ROOT_PORT; + break; + case ACPI_HEST_TYPE_AER_ENDPOINT: + pcie_type = PCI_EXP_TYPE_ENDPOINT; + break; + case ACPI_HEST_TYPE_AER_BRIDGE: + if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) + bridge = 1; + break; + default: + return 0; + } + + p = (struct acpi_hest_aer_common *)(hest_hdr + 1); + if (p->flags & ACPI_HEST_GLOBAL) { + if ((info->pci_dev->is_pcie && + info->pci_dev->pcie_type == pcie_type) || bridge) + ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + } else + if (hest_match_pci(p, info->pci_dev)) + ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + info->firmware_first = ff; + + return 0; +} + +static void aer_set_firmware_first(struct pci_dev *pci_dev) +{ + int rc; + struct aer_hest_parse_info info = { + .pci_dev = pci_dev, + .firmware_first = 0, + }; + + rc = apei_hest_parse(aer_hest_parse, &info); + + if (rc) + pci_dev->__aer_firmware_first = 0; + else + pci_dev->__aer_firmware_first = info.firmware_first; + pci_dev->__aer_firmware_first_valid = 1; +} + +int pcie_aer_get_firmware_first(struct pci_dev *dev) +{ + if (!dev->__aer_firmware_first_valid) + aer_set_firmware_first(dev); + return dev->__aer_firmware_first; +} +#endif diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index df2d686fe3dd..8af4f619bba2 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; - if (dev->aer_firmware_first) + if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); @@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; - if (dev->aer_firmware_first) + if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_pcie_cap(dev); @@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work) */ int aer_init(struct pcie_device *dev) { - if (dev->port->aer_firmware_first) { + if (pcie_aer_get_firmware_first(dev->port)) { dev_printk(KERN_DEBUG, &dev->device, "PCIe errors handled by platform firmware.\n"); goto out; @@ -785,7 +785,7 @@ out: if (forceload) { dev_printk(KERN_DEBUG, &dev->device, "aerdrv forceload requested.\n"); - dev->port->aer_firmware_first = 0; + pcie_aer_force_firmware_first(dev->port, 0); return 0; } return -ENXIO; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c82548afcd5c..f4adba2d1dd3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/cpumask.h> #include <linux/pci-aspm.h> -#include <acpi/acpi_hest.h> #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ @@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) pdev->is_hotplug_bridge = 1; } -static void set_pci_aer_firmware_first(struct pci_dev *pdev) -{ - if (acpi_hest_firmware_first_pci(pdev)) - pdev->aer_firmware_first = 1; -} - #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** @@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev) dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); - set_pci_aer_firmware_first(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index 2e59fe947d28..f94d8281cfb0 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -185,7 +185,7 @@ static int __devinit electra_cf_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device *device = &ofdev->dev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct electra_cf_socket *cf; struct resource mem, io; int status; @@ -357,8 +357,11 @@ static const struct of_device_id electra_cf_match[] = { MODULE_DEVICE_TABLE(of, electra_cf_match); static struct of_platform_driver electra_cf_driver = { - .name = (char *)driver_name, - .match_table = electra_cf_match, + .driver = { + .name = (char *)driver_name, + .owner = THIS_MODULE, + .of_match_table = electra_cf_match, + }, .probe = electra_cf_probe, .remove = electra_cf_remove, }; diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index 41cc954a5ffe..1a648b90b634 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -1298,8 +1298,11 @@ static const struct of_device_id m8xx_pcmcia_match[] = { MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); static struct of_platform_driver m8xx_pcmcia_driver = { - .name = driver_name, - .match_table = m8xx_pcmcia_match, + .driver = { + .name = driver_name, + .owner = THIS_MODULE, + .match_table = m8xx_pcmcia_match, + }, .probe = m8xx_probe, .remove = m8xx_remove, }; diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index ef0c5f133691..d007a2a03830 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c @@ -813,8 +813,7 @@ static u_int ds_poll(struct file *file, poll_table *wait) /*====================================================================*/ -static int ds_ioctl(struct inode *inode, struct file *file, - u_int cmd, u_long arg) +static int ds_ioctl(struct file *file, u_int cmd, u_long arg) { struct pcmcia_socket *s; void __user *uarg = (char __user *)arg; @@ -1021,13 +1020,25 @@ free_out: return err; } /* ds_ioctl */ +static long ds_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = ds_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + + /*====================================================================*/ static const struct file_operations ds_fops = { .owner = THIS_MODULE, .open = ds_open, .release = ds_release, - .ioctl = ds_ioctl, + .unlocked_ioctl = ds_unlocked_ioctl, .read = ds_read, .write = ds_write, .poll = ds_poll, diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index faaa9b4d0d07..8e9ba177d817 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -57,6 +57,11 @@ config WM8350_POWER Say Y here to enable support for the power management unit provided by the Wolfson Microelectronics WM8350 PMIC. +config TEST_POWER + tristate "Test power driver" + help + This driver is used for testing. It's safe to say M here. + config BATTERY_DS2760 tristate "DS2760 battery driver (HP iPAQ & others)" select W1 @@ -65,10 +70,10 @@ config BATTERY_DS2760 Say Y here to enable support for batteries with ds2760 chip. config BATTERY_DS2782 - tristate "DS2782 standalone gas-gauge" + tristate "DS2782/DS2786 standalone gas-gauge" depends on I2C help - Say Y here to enable support for the DS2782 standalone battery + Say Y here to enable support for the DS2782/DS2786 standalone battery gas-gauge. config BATTERY_PMU @@ -125,6 +130,12 @@ config BATTERY_MAX17040 in handheld and portable equipment. The MAX17040 is configured to operate with a single lithium cell +config BATTERY_Z2 + tristate "Z2 battery driver" + depends on I2C && MACH_ZIPIT2 + help + Say Y to include support for the battery on the Zipit Z2. + config CHARGER_PCF50633 tristate "NXP PCF50633 MBC" depends on MFD_PCF50633 diff --git a/drivers/power/Makefile b/drivers/power/Makefile index a2ba7c85c97a..00050809a6c7 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_MAX8925_POWER) += max8925_power.o obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o obj-$(CONFIG_WM831X_POWER) += wm831x_power.o obj-$(CONFIG_WM8350_POWER) += wm8350_power.o +obj-$(CONFIG_TEST_POWER) += test_power.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o @@ -31,4 +32,5 @@ obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o +obj-$(CONFIG_BATTERY_Z2) += z2_battery.o obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 3bf8d1f622e3..4d3b27228a2e 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -304,6 +304,28 @@ static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di, w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); } +static void ds2760_battery_write_active_full(struct ds2760_device_info *di, + int active_full) +{ + unsigned char tmp[2] = { + active_full >> 8, + active_full & 0xff + }; + + if (tmp[0] == di->raw[DS2760_ACTIVE_FULL] && + tmp[1] == di->raw[DS2760_ACTIVE_FULL + 1]) + return; + + w1_ds2760_write(di->w1_dev, tmp, DS2760_ACTIVE_FULL, sizeof(tmp)); + w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0); + w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0); + + /* Write to the di->raw[] buffer directly - the DS2760_ACTIVE_FULL + * values won't be read back by ds2760_battery_read_status() */ + di->raw[DS2760_ACTIVE_FULL] = tmp[0]; + di->raw[DS2760_ACTIVE_FULL + 1] = tmp[1]; +} + static void ds2760_battery_work(struct work_struct *work) { struct ds2760_device_info *di = container_of(work, @@ -426,6 +448,45 @@ static int ds2760_battery_get_property(struct power_supply *psy, return 0; } +static int ds2760_battery_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct ds2760_device_info *di = to_ds2760_device_info(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + /* the interface counts in uAh, convert the value */ + ds2760_battery_write_active_full(di, val->intval / 1000L); + break; + + case POWER_SUPPLY_PROP_CHARGE_NOW: + /* ds2760_battery_set_current_accum() does the conversion */ + ds2760_battery_set_current_accum(di, val->intval); + break; + + default: + return -EPERM; + } + + return 0; +} + +static int ds2760_battery_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_NOW: + return 1; + + default: + break; + } + + return 0; +} + static enum power_supply_property ds2760_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -460,6 +521,9 @@ static int ds2760_battery_probe(struct platform_device *pdev) di->bat.properties = ds2760_battery_props; di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props); di->bat.get_property = ds2760_battery_get_property; + di->bat.set_property = ds2760_battery_set_property; + di->bat.property_is_writeable = + ds2760_battery_property_is_writeable; di->bat.set_charged = ds2760_battery_set_charged; di->bat.external_power_changed = ds2760_battery_external_power_changed; diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 99c89976a902..d762a0cbc6af 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c @@ -5,6 +5,8 @@ * * Author: Ryan Mallon <ryan@bluewatersys.com> * + * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il> + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -20,12 +22,13 @@ #include <linux/idr.h> #include <linux/power_supply.h> #include <linux/slab.h> +#include <linux/ds2782_battery.h> #define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */ -#define DS2782_REG_VOLT_MSB 0x0c -#define DS2782_REG_TEMP_MSB 0x0a -#define DS2782_REG_CURRENT_MSB 0x0e +#define DS278x_REG_VOLT_MSB 0x0c +#define DS278x_REG_TEMP_MSB 0x0a +#define DS278x_REG_CURRENT_MSB 0x0e /* EEPROM Block */ #define DS2782_REG_RSNSP 0x69 /* Sense resistor value */ @@ -33,18 +36,33 @@ /* Current unit measurement in uA for a 1 milli-ohm sense resistor */ #define DS2782_CURRENT_UNITS 1563 -#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery) +#define DS2786_REG_RARC 0x02 /* Remaining active relative capacity */ + +#define DS2786_CURRENT_UNITS 25 + +struct ds278x_info; + +struct ds278x_battery_ops { + int (*get_current)(struct ds278x_info *info, int *current_uA); + int (*get_voltage)(struct ds278x_info *info, int *voltage_uA); + int (*get_capacity)(struct ds278x_info *info, int *capacity_uA); + +}; + +#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery) -struct ds2782_info { +struct ds278x_info { struct i2c_client *client; struct power_supply battery; + struct ds278x_battery_ops *ops; int id; + int rsns; }; static DEFINE_IDR(battery_id); static DEFINE_MUTEX(battery_lock); -static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val) +static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val) { int ret; @@ -58,7 +76,7 @@ static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val) return 0; } -static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb, +static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb, s16 *val) { int ret; @@ -73,7 +91,7 @@ static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb, return 0; } -static int ds2782_get_temp(struct ds2782_info *info, int *temp) +static int ds278x_get_temp(struct ds278x_info *info, int *temp) { s16 raw; int err; @@ -84,14 +102,14 @@ static int ds2782_get_temp(struct ds2782_info *info, int *temp) * celsius. The temperature value is stored as a 10 bit number, plus * sign in the upper bits of a 16 bit register. */ - err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw); + err = ds278x_read_reg16(info, DS278x_REG_TEMP_MSB, &raw); if (err) return err; *temp = ((raw / 32) * 125) / 100; return 0; } -static int ds2782_get_current(struct ds2782_info *info, int *current_uA) +static int ds2782_get_current(struct ds278x_info *info, int *current_uA) { int sense_res; int err; @@ -102,7 +120,7 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA) * The units of measurement for current are dependent on the value of * the sense resistor. */ - err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw); + err = ds278x_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw); if (err) return err; if (sense_res_raw == 0) { @@ -113,14 +131,14 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA) dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n", sense_res); - err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw); + err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw); if (err) return err; *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res); return 0; } -static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA) +static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA) { s16 raw; int err; @@ -129,36 +147,77 @@ static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA) * Voltage is measured in units of 4.88mV. The voltage is stored as * a 10-bit number plus sign, in the upper bits of a 16-bit register */ - err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw); + err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); if (err) return err; *voltage_uA = (raw / 32) * 4800; return 0; } -static int ds2782_get_capacity(struct ds2782_info *info, int *capacity) +static int ds2782_get_capacity(struct ds278x_info *info, int *capacity) { int err; u8 raw; - err = ds2782_read_reg(info, DS2782_REG_RARC, &raw); + err = ds278x_read_reg(info, DS2782_REG_RARC, &raw); if (err) return err; *capacity = raw; return raw; } -static int ds2782_get_status(struct ds2782_info *info, int *status) +static int ds2786_get_current(struct ds278x_info *info, int *current_uA) +{ + int err; + s16 raw; + + err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw); + if (err) + return err; + *current_uA = (raw / 16) * (DS2786_CURRENT_UNITS / info->rsns); + return 0; +} + +static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA) +{ + s16 raw; + int err; + + /* + * Voltage is measured in units of 1.22mV. The voltage is stored as + * a 10-bit number plus sign, in the upper bits of a 16-bit register + */ + err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); + if (err) + return err; + *voltage_uA = (raw / 8) * 1220; + return 0; +} + +static int ds2786_get_capacity(struct ds278x_info *info, int *capacity) +{ + int err; + u8 raw; + + err = ds278x_read_reg(info, DS2786_REG_RARC, &raw); + if (err) + return err; + /* Relative capacity is displayed with resolution 0.5 % */ + *capacity = raw/2 ; + return 0; +} + +static int ds278x_get_status(struct ds278x_info *info, int *status) { int err; int current_uA; int capacity; - err = ds2782_get_current(info, ¤t_uA); + err = info->ops->get_current(info, ¤t_uA); if (err) return err; - err = ds2782_get_capacity(info, &capacity); + err = info->ops->get_capacity(info, &capacity); if (err) return err; @@ -174,32 +233,32 @@ static int ds2782_get_status(struct ds2782_info *info, int *status) return 0; } -static int ds2782_battery_get_property(struct power_supply *psy, +static int ds278x_battery_get_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { - struct ds2782_info *info = to_ds2782_info(psy); + struct ds278x_info *info = to_ds278x_info(psy); int ret; switch (prop) { case POWER_SUPPLY_PROP_STATUS: - ret = ds2782_get_status(info, &val->intval); + ret = ds278x_get_status(info, &val->intval); break; case POWER_SUPPLY_PROP_CAPACITY: - ret = ds2782_get_capacity(info, &val->intval); + ret = info->ops->get_capacity(info, &val->intval); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = ds2782_get_voltage(info, &val->intval); + ret = info->ops->get_voltage(info, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_NOW: - ret = ds2782_get_current(info, &val->intval); + ret = info->ops->get_current(info, &val->intval); break; case POWER_SUPPLY_PROP_TEMP: - ret = ds2782_get_temp(info, &val->intval); + ret = ds278x_get_temp(info, &val->intval); break; default: @@ -209,7 +268,7 @@ static int ds2782_battery_get_property(struct power_supply *psy, return ret; } -static enum power_supply_property ds2782_battery_props[] = { +static enum power_supply_property ds278x_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_VOLTAGE_NOW, @@ -217,18 +276,18 @@ static enum power_supply_property ds2782_battery_props[] = { POWER_SUPPLY_PROP_TEMP, }; -static void ds2782_power_supply_init(struct power_supply *battery) +static void ds278x_power_supply_init(struct power_supply *battery) { battery->type = POWER_SUPPLY_TYPE_BATTERY; - battery->properties = ds2782_battery_props; - battery->num_properties = ARRAY_SIZE(ds2782_battery_props); - battery->get_property = ds2782_battery_get_property; + battery->properties = ds278x_battery_props; + battery->num_properties = ARRAY_SIZE(ds278x_battery_props); + battery->get_property = ds278x_battery_get_property; battery->external_power_changed = NULL; } -static int ds2782_battery_remove(struct i2c_client *client) +static int ds278x_battery_remove(struct i2c_client *client) { - struct ds2782_info *info = i2c_get_clientdata(client); + struct ds278x_info *info = i2c_get_clientdata(client); power_supply_unregister(&info->battery); kfree(info->battery.name); @@ -237,19 +296,45 @@ static int ds2782_battery_remove(struct i2c_client *client) idr_remove(&battery_id, info->id); mutex_unlock(&battery_lock); - i2c_set_clientdata(client, info); - kfree(info); return 0; } -static int ds2782_battery_probe(struct i2c_client *client, +enum ds278x_num_id { + DS2782 = 0, + DS2786, +}; + +static struct ds278x_battery_ops ds278x_ops[] = { + [DS2782] = { + .get_current = ds2782_get_current, + .get_voltage = ds2782_get_voltage, + .get_capacity = ds2782_get_capacity, + }, + [DS2786] = { + .get_current = ds2786_get_current, + .get_voltage = ds2786_get_voltage, + .get_capacity = ds2786_get_capacity, + } +}; + +static int ds278x_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct ds2782_info *info; + struct ds278x_platform_data *pdata = client->dev.platform_data; + struct ds278x_info *info; int ret; int num; + /* + * ds2786 should have the sense resistor value set + * in the platform data + */ + if (id->driver_data == DS2786 && !pdata) { + dev_err(&client->dev, "missing platform data for ds2786\n"); + return -EINVAL; + } + /* Get an ID for this battery */ ret = idr_pre_get(&battery_id, GFP_KERNEL); if (ret == 0) { @@ -269,15 +354,20 @@ static int ds2782_battery_probe(struct i2c_client *client, goto fail_info; } - info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num); + info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num); if (!info->battery.name) { ret = -ENOMEM; goto fail_name; } + if (id->driver_data == DS2786) + info->rsns = pdata->rsns; + i2c_set_clientdata(client, info); info->client = client; - ds2782_power_supply_init(&info->battery); + info->id = num; + info->ops = &ds278x_ops[id->driver_data]; + ds278x_power_supply_init(&info->battery); ret = power_supply_register(&client->dev, &info->battery); if (ret) { @@ -290,7 +380,6 @@ static int ds2782_battery_probe(struct i2c_client *client, fail_register: kfree(info->battery.name); fail_name: - i2c_set_clientdata(client, info); kfree(info); fail_info: mutex_lock(&battery_lock); @@ -300,31 +389,32 @@ fail_id: return ret; } -static const struct i2c_device_id ds2782_id[] = { - {"ds2782", 0}, +static const struct i2c_device_id ds278x_id[] = { + {"ds2782", DS2782}, + {"ds2786", DS2786}, {}, }; -static struct i2c_driver ds2782_battery_driver = { +static struct i2c_driver ds278x_battery_driver = { .driver = { .name = "ds2782-battery", }, - .probe = ds2782_battery_probe, - .remove = ds2782_battery_remove, - .id_table = ds2782_id, + .probe = ds278x_battery_probe, + .remove = ds278x_battery_remove, + .id_table = ds278x_id, }; -static int __init ds2782_init(void) +static int __init ds278x_init(void) { - return i2c_add_driver(&ds2782_battery_driver); + return i2c_add_driver(&ds278x_battery_driver); } -module_init(ds2782_init); +module_init(ds278x_init); -static void __exit ds2782_exit(void) +static void __exit ds278x_exit(void) { - i2c_del_driver(&ds2782_battery_driver); + i2c_del_driver(&ds278x_battery_driver); } -module_exit(ds2782_exit); +module_exit(ds278x_exit); MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>"); MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver"); diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index a232de6a5703..69f8aa3a6a4b 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c @@ -404,6 +404,13 @@ static int usb_wakeup_enabled; static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) { + if (pdata->suspend) { + int ret = pdata->suspend(state); + + if (ret) + return ret; + } + if (device_may_wakeup(&pdev->dev)) { if (ac_irq) ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); @@ -423,6 +430,9 @@ static int pda_power_resume(struct platform_device *pdev) disable_irq_wake(ac_irq->start); } + if (pdata->resume) + return pdata->resume(); + return 0; } #else diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h index f38ba482be75..018de2b26998 100644 --- a/drivers/power/power_supply.h +++ b/drivers/power/power_supply.h @@ -12,15 +12,12 @@ #ifdef CONFIG_SYSFS -extern int power_supply_create_attrs(struct power_supply *psy); -extern void power_supply_remove_attrs(struct power_supply *psy); +extern void power_supply_init_attrs(struct device_type *dev_type); extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); #else -static inline int power_supply_create_attrs(struct power_supply *psy) -{ return 0; } -static inline void power_supply_remove_attrs(struct power_supply *psy) {} +static inline void power_supply_init_attrs(struct device_type *dev_type) {} #define power_supply_uevent NULL #endif /* CONFIG_SYSFS */ diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index cce75b40b435..91606bb55318 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/device.h> #include <linux/err.h> #include <linux/power_supply.h> @@ -22,6 +23,8 @@ struct class *power_supply_class; EXPORT_SYMBOL_GPL(power_supply_class); +static struct device_type power_supply_dev_type; + static int __power_supply_changed_work(struct device *dev, void *data) { struct power_supply *psy = (struct power_supply *)data; @@ -144,22 +147,39 @@ struct power_supply *power_supply_get_by_name(char *name) } EXPORT_SYMBOL_GPL(power_supply_get_by_name); +static void power_supply_dev_release(struct device *dev) +{ + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + kfree(dev); +} + int power_supply_register(struct device *parent, struct power_supply *psy) { - int rc = 0; + struct device *dev; + int rc; - psy->dev = device_create(power_supply_class, parent, 0, psy, - "%s", psy->name); - if (IS_ERR(psy->dev)) { - rc = PTR_ERR(psy->dev); - goto dev_create_failed; - } + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; - INIT_WORK(&psy->changed_work, power_supply_changed_work); + device_initialize(dev); - rc = power_supply_create_attrs(psy); + dev->class = power_supply_class; + dev->type = &power_supply_dev_type; + dev->parent = parent; + dev->release = power_supply_dev_release; + dev_set_drvdata(dev, psy); + psy->dev = dev; + + rc = kobject_set_name(&dev->kobj, "%s", psy->name); + if (rc) + goto kobject_set_name_failed; + + rc = device_add(dev); if (rc) - goto create_attrs_failed; + goto device_add_failed; + + INIT_WORK(&psy->changed_work, power_supply_changed_work); rc = power_supply_create_triggers(psy); if (rc) @@ -170,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy) goto success; create_triggers_failed: - power_supply_remove_attrs(psy); -create_attrs_failed: device_unregister(psy->dev); -dev_create_failed: +kobject_set_name_failed: +device_add_failed: + kfree(dev); success: return rc; } @@ -183,7 +203,6 @@ void power_supply_unregister(struct power_supply *psy) { flush_scheduled_work(); power_supply_remove_triggers(psy); - power_supply_remove_attrs(psy); device_unregister(psy->dev); } EXPORT_SYMBOL_GPL(power_supply_unregister); @@ -196,6 +215,7 @@ static int __init power_supply_class_init(void) return PTR_ERR(power_supply_class); power_supply_class->dev_uevent = power_supply_uevent; + power_supply_init_attrs(&power_supply_dev_type); return 0; } diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 5b6e352ac7c1..9d30eeb8c810 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -31,9 +31,9 @@ #define POWER_SUPPLY_ATTR(_name) \ { \ - .attr = { .name = #_name, .mode = 0444 }, \ + .attr = { .name = #_name }, \ .show = power_supply_show_property, \ - .store = NULL, \ + .store = power_supply_store_property, \ } static struct device_attribute power_supply_attrs[]; @@ -41,6 +41,9 @@ static struct device_attribute power_supply_attrs[]; static ssize_t power_supply_show_property(struct device *dev, struct device_attribute *attr, char *buf) { + static char *type_text[] = { + "Battery", "UPS", "Mains", "USB" + }; static char *status_text[] = { "Unknown", "Charging", "Discharging", "Not charging", "Full" }; @@ -58,12 +61,15 @@ static ssize_t power_supply_show_property(struct device *dev, static char *capacity_level_text[] = { "Unknown", "Critical", "Low", "Normal", "High", "Full" }; - ssize_t ret; + ssize_t ret = 0; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; union power_supply_propval value; - ret = psy->get_property(psy, off, &value); + if (off == POWER_SUPPLY_PROP_TYPE) + value.intval = psy->type; + else + ret = psy->get_property(psy, off, &value); if (ret < 0) { if (ret == -ENODATA) @@ -85,12 +91,37 @@ static ssize_t power_supply_show_property(struct device *dev, return sprintf(buf, "%s\n", technology_text[value.intval]); else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) return sprintf(buf, "%s\n", capacity_level_text[value.intval]); + else if (off == POWER_SUPPLY_PROP_TYPE) + return sprintf(buf, "%s\n", type_text[value.intval]); else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); return sprintf(buf, "%d\n", value.intval); } +static ssize_t power_supply_store_property(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { + ssize_t ret; + struct power_supply *psy = dev_get_drvdata(dev); + const ptrdiff_t off = attr - power_supply_attrs; + union power_supply_propval value; + long long_val; + + /* TODO: support other types than int */ + ret = strict_strtol(buf, 10, &long_val); + if (ret < 0) + return ret; + + value.intval = long_val; + + ret = psy->set_property(psy, off, &value); + if (ret < 0) + return ret; + + return count; +} + /* Must be in the same order as POWER_SUPPLY_PROP_* */ static struct device_attribute power_supply_attrs[] = { /* Properties of type `int' */ @@ -132,67 +163,61 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(time_to_empty_avg), POWER_SUPPLY_ATTR(time_to_full_now), POWER_SUPPLY_ATTR(time_to_full_avg), + POWER_SUPPLY_ATTR(type), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), POWER_SUPPLY_ATTR(serial_number), }; -static ssize_t power_supply_show_static_attrs(struct device *dev, - struct device_attribute *attr, - char *buf) { - static char *type_text[] = { "Battery", "UPS", "Mains", "USB" }; +static struct attribute * +__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; + +static mode_t power_supply_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int attrno) +{ + struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = dev_get_drvdata(dev); + mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; + int i; - return sprintf(buf, "%s\n", type_text[psy->type]); -} + if (attrno == POWER_SUPPLY_PROP_TYPE) + return mode; -static struct device_attribute power_supply_static_attrs[] = { - __ATTR(type, 0444, power_supply_show_static_attrs, NULL), -}; + for (i = 0; i < psy->num_properties; i++) { + int property = psy->properties[i]; -int power_supply_create_attrs(struct power_supply *psy) -{ - int rc = 0; - int i, j; - - for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++) { - rc = device_create_file(psy->dev, - &power_supply_static_attrs[i]); - if (rc) - goto statics_failed; - } + if (property == attrno) { + if (psy->property_is_writeable && + psy->property_is_writeable(psy, property) > 0) + mode |= S_IWUSR; - for (j = 0; j < psy->num_properties; j++) { - rc = device_create_file(psy->dev, - &power_supply_attrs[psy->properties[j]]); - if (rc) - goto dynamics_failed; + return mode; + } } - goto succeed; - -dynamics_failed: - while (j--) - device_remove_file(psy->dev, - &power_supply_attrs[psy->properties[j]]); -statics_failed: - while (i--) - device_remove_file(psy->dev, &power_supply_static_attrs[i]); -succeed: - return rc; + return 0; } -void power_supply_remove_attrs(struct power_supply *psy) +static struct attribute_group power_supply_attr_group = { + .attrs = __power_supply_attrs, + .is_visible = power_supply_attr_is_visible, +}; + +static const struct attribute_group *power_supply_attr_groups[] = { + &power_supply_attr_group, + NULL, +}; + +void power_supply_init_attrs(struct device_type *dev_type) { int i; - for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++) - device_remove_file(psy->dev, &power_supply_static_attrs[i]); + dev_type->groups = power_supply_attr_groups; - for (i = 0; i < psy->num_properties; i++) - device_remove_file(psy->dev, - &power_supply_attrs[psy->properties[i]]); + for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) + __power_supply_attrs[i] = &power_supply_attrs[i].attr; } static char *kstruprdup(const char *str, gfp_t gfp) @@ -236,36 +261,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) if (!prop_buf) return -ENOMEM; - for (j = 0; j < ARRAY_SIZE(power_supply_static_attrs); j++) { - struct device_attribute *attr; - char *line; - - attr = &power_supply_static_attrs[j]; - - ret = power_supply_show_static_attrs(dev, attr, prop_buf); - if (ret < 0) - goto out; - - line = strchr(prop_buf, '\n'); - if (line) - *line = 0; - - attrname = kstruprdup(attr->attr.name, GFP_KERNEL); - if (!attrname) { - ret = -ENOMEM; - goto out; - } - - dev_dbg(dev, "Static prop %s=%s\n", attrname, prop_buf); - - ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); - kfree(attrname); - if (ret) - goto out; - } - - dev_dbg(dev, "%zd dynamic props\n", psy->num_properties); - for (j = 0; j < psy->num_properties; j++) { struct device_attribute *attr; char *line; diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c new file mode 100644 index 000000000000..0cd9f67d33e5 --- /dev/null +++ b/drivers/power/test_power.c @@ -0,0 +1,163 @@ +/* + * Power supply driver for testing. + * + * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/power_supply.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/vermagic.h> + +static int test_power_ac_online = 1; +static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING; + +static int test_power_get_ac_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = test_power_ac_online; + break; + default: + return -EINVAL; + } + return 0; +} + +static int test_power_get_battery_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_MODEL_NAME: + val->strval = "Test battery"; + break; + case POWER_SUPPLY_PROP_MANUFACTURER: + val->strval = "Linux"; + break; + case POWER_SUPPLY_PROP_SERIAL_NUMBER: + val->strval = UTS_RELEASE; + break; + case POWER_SUPPLY_PROP_STATUS: + val->intval = test_power_battery_status; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = 50; + break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + val->intval = 3600; + break; + default: + pr_info("%s: some properties deliberately report errors.\n", + __func__); + return -EINVAL; + } + return 0; +} + +static enum power_supply_property test_power_ac_props[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static enum power_supply_property test_power_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_EMPTY, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_SERIAL_NUMBER, +}; + +static char *test_power_ac_supplied_to[] = { + "test_battery", +}; + +static struct power_supply test_power_supplies[] = { + { + .name = "test_ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .supplied_to = test_power_ac_supplied_to, + .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to), + .properties = test_power_ac_props, + .num_properties = ARRAY_SIZE(test_power_ac_props), + .get_property = test_power_get_ac_property, + }, { + .name = "test_battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = test_power_battery_props, + .num_properties = ARRAY_SIZE(test_power_battery_props), + .get_property = test_power_get_battery_property, + }, +}; + +static int __init test_power_init(void) +{ + int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) { + ret = power_supply_register(NULL, &test_power_supplies[i]); + if (ret) { + pr_err("%s: failed to register %s\n", __func__, + test_power_supplies[i].name); + goto failed; + } + } + + return 0; +failed: + while (--i >= 0) + power_supply_unregister(&test_power_supplies[i]); + return ret; +} +module_init(test_power_init); + +static void __exit test_power_exit(void) +{ + int i; + + /* Let's see how we handle changes... */ + test_power_ac_online = 0; + test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING; + for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) + power_supply_changed(&test_power_supplies[i]); + pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", + __func__); + ssleep(10); + + for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) + power_supply_unregister(&test_power_supplies[i]); +} +module_exit(test_power_exit); + +MODULE_DESCRIPTION("Power supply driver for testing"); +MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c index 2eab35aab311..ee04936b2db5 100644 --- a/drivers/power/tosa_battery.c +++ b/drivers/power/tosa_battery.c @@ -61,7 +61,7 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat) mutex_lock(&bat_lock); gpio_set_value(bat->gpio_bat, 1); msleep(5); - value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data, + value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent), bat->adc_bat); gpio_set_value(bat->gpio_bat, 0); mutex_unlock(&bat_lock); @@ -81,7 +81,7 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat) mutex_lock(&bat_lock); gpio_set_value(bat->gpio_temp, 1); msleep(5); - value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data, + value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent), bat->adc_temp); gpio_set_value(bat->gpio_temp, 0); mutex_unlock(&bat_lock); diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c index 875c4d0f776b..fbcc36dae470 100644 --- a/drivers/power/wm831x_power.c +++ b/drivers/power/wm831x_power.c @@ -537,9 +537,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) goto err_battery; irq = platform_get_irq_byname(pdev, "SYSLO"); - ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, - IRQF_TRIGGER_RISING, "SYSLO", - power); + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, + IRQF_TRIGGER_RISING, "System power low", + power); if (ret != 0) { dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", irq, ret); @@ -547,9 +547,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) } irq = platform_get_irq_byname(pdev, "PWR SRC"); - ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq, - IRQF_TRIGGER_RISING, "Power source", - power); + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, + IRQF_TRIGGER_RISING, "Power source", + power); if (ret != 0) { dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", irq, ret); @@ -558,10 +558,10 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); - ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq, - IRQF_TRIGGER_RISING, - wm831x_bat_irqs[i], - power); + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, + IRQF_TRIGGER_RISING, + wm831x_bat_irqs[i], + power); if (ret != 0) { dev_err(&pdev->dev, "Failed to request %s IRQ %d: %d\n", @@ -575,13 +575,13 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev) err_bat_irq: for (; i >= 0; i--) { irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); - wm831x_free_irq(wm831x, irq, power); + free_irq(irq, power); } irq = platform_get_irq_byname(pdev, "PWR SRC"); - wm831x_free_irq(wm831x, irq, power); + free_irq(irq, power); err_syslo: irq = platform_get_irq_byname(pdev, "SYSLO"); - wm831x_free_irq(wm831x, irq, power); + free_irq(irq, power); err_usb: power_supply_unregister(usb); err_battery: @@ -596,19 +596,18 @@ err_kmalloc: static __devexit int wm831x_power_remove(struct platform_device *pdev) { struct wm831x_power *wm831x_power = platform_get_drvdata(pdev); - struct wm831x *wm831x = wm831x_power->wm831x; int irq, i; for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); - wm831x_free_irq(wm831x, irq, wm831x_power); + free_irq(irq, wm831x_power); } irq = platform_get_irq_byname(pdev, "PWR SRC"); - wm831x_free_irq(wm831x, irq, wm831x_power); + free_irq(irq, wm831x_power); irq = platform_get_irq_byname(pdev, "SYSLO"); - wm831x_free_irq(wm831x, irq, wm831x_power); + free_irq(irq, wm831x_power); power_supply_unregister(&wm831x_power->battery); power_supply_unregister(&wm831x_power->wall); diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 94c70650aafc..4e8afce0c818 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -308,6 +308,9 @@ static void __exit wm97xx_bat_exit(void) platform_driver_unregister(&wm97xx_bat_driver); } +/* The interface is deprecated, as well as linux/wm97xx_batt.h */ +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); + void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) { gpdata = data; diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c new file mode 100644 index 000000000000..9cca465436e3 --- /dev/null +++ b/drivers/power/z2_battery.c @@ -0,0 +1,328 @@ +/* + * Battery measurement code for Zipit Z2 + * + * Copyright (C) 2009 Peter Edwards <sweetlilmre@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <linux/i2c.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <asm/irq.h> +#include <asm/mach/irq.h> +#include <linux/z2_battery.h> + +#define Z2_DEFAULT_NAME "Z2" + +struct z2_charger { + struct z2_battery_info *info; + int bat_status; + struct i2c_client *client; + struct power_supply batt_ps; + struct mutex work_lock; + struct work_struct bat_work; +}; + +static unsigned long z2_read_bat(struct z2_charger *charger) +{ + int data; + data = i2c_smbus_read_byte_data(charger->client, + charger->info->batt_I2C_reg); + if (data < 0) + return 0; + + return data * charger->info->batt_mult / charger->info->batt_div; +} + +static int z2_batt_get_property(struct power_supply *batt_ps, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct z2_charger *charger = container_of(batt_ps, struct z2_charger, + batt_ps); + struct z2_battery_info *info = charger->info; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = charger->bat_status; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = info->batt_tech; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + if (info->batt_I2C_reg >= 0) + val->intval = z2_read_bat(charger); + else + return -EINVAL; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + if (info->max_voltage >= 0) + val->intval = info->max_voltage; + else + return -EINVAL; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + if (info->min_voltage >= 0) + val->intval = info->min_voltage; + else + return -EINVAL; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void z2_batt_ext_power_changed(struct power_supply *batt_ps) +{ + struct z2_charger *charger = container_of(batt_ps, struct z2_charger, + batt_ps); + schedule_work(&charger->bat_work); +} + +static void z2_batt_update(struct z2_charger *charger) +{ + int old_status = charger->bat_status; + struct z2_battery_info *info; + + info = charger->info; + + mutex_lock(&charger->work_lock); + + charger->bat_status = (info->charge_gpio >= 0) ? + (gpio_get_value(info->charge_gpio) ? + POWER_SUPPLY_STATUS_CHARGING : + POWER_SUPPLY_STATUS_DISCHARGING) : + POWER_SUPPLY_STATUS_UNKNOWN; + + if (old_status != charger->bat_status) { + pr_debug("%s: %i -> %i\n", charger->batt_ps.name, old_status, + charger->bat_status); + power_supply_changed(&charger->batt_ps); + } + + mutex_unlock(&charger->work_lock); +} + +static void z2_batt_work(struct work_struct *work) +{ + struct z2_charger *charger; + charger = container_of(work, struct z2_charger, bat_work); + z2_batt_update(charger); +} + +static irqreturn_t z2_charge_switch_irq(int irq, void *devid) +{ + struct z2_charger *charger = devid; + schedule_work(&charger->bat_work); + return IRQ_HANDLED; +} + +static int z2_batt_ps_init(struct z2_charger *charger, int props) +{ + int i = 0; + enum power_supply_property *prop; + struct z2_battery_info *info = charger->info; + + if (info->batt_tech >= 0) + props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */ + if (info->batt_I2C_reg >= 0) + props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */ + if (info->max_voltage >= 0) + props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */ + if (info->min_voltage >= 0) + props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */ + + prop = kzalloc(props * sizeof(*prop), GFP_KERNEL); + if (!prop) + return -ENOMEM; + + prop[i++] = POWER_SUPPLY_PROP_PRESENT; + if (info->charge_gpio >= 0) + prop[i++] = POWER_SUPPLY_PROP_STATUS; + if (info->batt_tech >= 0) + prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY; + if (info->batt_I2C_reg >= 0) + prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW; + if (info->max_voltage >= 0) + prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX; + if (info->min_voltage >= 0) + prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN; + + if (!info->batt_name) { + dev_info(&charger->client->dev, + "Please consider setting proper battery " + "name in platform definition file, falling " + "back to name \" Z2_DEFAULT_NAME \"\n"); + charger->batt_ps.name = Z2_DEFAULT_NAME; + } else + charger->batt_ps.name = info->batt_name; + + charger->batt_ps.properties = prop; + charger->batt_ps.num_properties = props; + charger->batt_ps.type = POWER_SUPPLY_TYPE_BATTERY; + charger->batt_ps.get_property = z2_batt_get_property; + charger->batt_ps.external_power_changed = z2_batt_ext_power_changed; + charger->batt_ps.use_for_apm = 1; + + return 0; +} + +static int __devinit z2_batt_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret = 0; + int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ + struct z2_charger *charger; + struct z2_battery_info *info = client->dev.platform_data; + + if (info == NULL) { + dev_err(&client->dev, + "Please set platform device platform_data" + " to a valid z2_battery_info pointer!\n"); + return -EINVAL; + } + + charger = kzalloc(sizeof(*charger), GFP_KERNEL); + if (charger == NULL) + return -ENOMEM; + + charger->bat_status = POWER_SUPPLY_STATUS_UNKNOWN; + charger->info = info; + charger->client = client; + i2c_set_clientdata(client, charger); + + mutex_init(&charger->work_lock); + + if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) { + ret = gpio_request(info->charge_gpio, "BATT CHRG"); + if (ret) + goto err; + + ret = gpio_direction_input(info->charge_gpio); + if (ret) + goto err2; + + set_irq_type(gpio_to_irq(info->charge_gpio), + IRQ_TYPE_EDGE_BOTH); + ret = request_irq(gpio_to_irq(info->charge_gpio), + z2_charge_switch_irq, IRQF_DISABLED, + "AC Detect", charger); + if (ret) + goto err3; + } + + ret = z2_batt_ps_init(charger, props); + if (ret) + goto err3; + + INIT_WORK(&charger->bat_work, z2_batt_work); + + ret = power_supply_register(&client->dev, &charger->batt_ps); + if (ret) + goto err4; + + schedule_work(&charger->bat_work); + + return 0; + +err4: + kfree(charger->batt_ps.properties); +err3: + if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) + free_irq(gpio_to_irq(info->charge_gpio), charger); +err2: + if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) + gpio_free(info->charge_gpio); +err: + kfree(charger); + return ret; +} + +static int __devexit z2_batt_remove(struct i2c_client *client) +{ + struct z2_charger *charger = i2c_get_clientdata(client); + struct z2_battery_info *info = charger->info; + + flush_scheduled_work(); + power_supply_unregister(&charger->batt_ps); + + kfree(charger->batt_ps.properties); + if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) { + free_irq(gpio_to_irq(info->charge_gpio), charger); + gpio_free(info->charge_gpio); + } + + kfree(charger); + + return 0; +} + +#ifdef CONFIG_PM +static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) +{ + flush_scheduled_work(); + return 0; +} + +static int z2_batt_resume(struct i2c_client *client) +{ + struct z2_charger *charger = i2c_get_clientdata(client); + + schedule_work(&charger->bat_work); + return 0; +} +#else +#define z2_batt_suspend NULL +#define z2_batt_resume NULL +#endif + +static const struct i2c_device_id z2_batt_id[] = { + { "aer915", 0 }, + { } +}; + +static struct i2c_driver z2_batt_driver = { + .driver = { + .name = "z2-battery", + .owner = THIS_MODULE, + }, + .probe = z2_batt_probe, + .remove = z2_batt_remove, + .suspend = z2_batt_suspend, + .resume = z2_batt_resume, + .id_table = z2_batt_id, +}; + +static int __init z2_batt_init(void) +{ + return i2c_add_driver(&z2_batt_driver); +} + +static void __exit z2_batt_exit(void) +{ + i2c_del_driver(&z2_batt_driver); +} + +module_init(z2_batt_init); +module_exit(z2_batt_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>"); +MODULE_DESCRIPTION("Zipit Z2 battery driver"); diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index c32822ad84a4..070211a5955c 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT ---help--- Amount of time a discovery node waits for a host to complete enumeration before giving up. + +config RAPIDIO_ENABLE_RX_TX_PORTS + bool "Enable RapidIO Input/Output Ports" + depends on RAPIDIO + ---help--- + The RapidIO specification describes a Output port transmit + enable and a Input port receive enable. The recommended state + for Input ports and Output ports should be disabled. When + this switch is set the RapidIO subsystem will enable all + ports for Input/Output direction to allow other traffic + than Maintenance transfers. + +source "drivers/rapidio/switches/Kconfig" + +config RAPIDIO_DEBUG + bool "RapidIO subsystem debug messages" + depends on RAPIDIO + help + Say Y here if you want the RapidIO subsystem to produce a bunch of + debug messages to the system log. Select this if you are having a + problem with the RapidIO subsystem and want to see more of what is + going on. + + If you are unsure about this, say N here. diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index 7c0e1818de51..b6139fe187bf 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile @@ -4,3 +4,7 @@ obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o obj-$(CONFIG_RAPIDIO) += switches/ + +ifeq ($(CONFIG_RAPIDIO_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 45415096c294..8070e074c739 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -4,6 +4,14 @@ * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine <alexandre.bounine@idt.com> + * - Added Port-Write/Error Management initialization and handling + * + * Copyright 2009 Sysgo AG + * Thomas Moll <thomas.moll@sysgo.com> + * - Added Input- Output- enable functionality, to allow full communication + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your @@ -31,15 +39,16 @@ LIST_HEAD(rio_devices); static LIST_HEAD(rio_switches); -#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef - static void rio_enum_timeout(unsigned long); +static void rio_init_em(struct rio_dev *rdev); + DEFINE_SPINLOCK(rio_global_list_lock); static int next_destid = 0; static int next_switchid = 0; static int next_net = 0; +static int next_comptag; static struct timer_list rio_enum_timer = TIMER_INITIALIZER(rio_enum_timeout, 0, 0); @@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = { -1, }; -static int rio_sport_phys_table[] = { - RIO_EFB_PAR_EP_FREE_ID, - RIO_EFB_SER_EP_FREE_ID, - -1, -}; - /** * rio_get_device_id - Get the base/extended device id for a device * @port: RIO master port @@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port) u32 result; int ret = 0; - /* Write component tag CSR magic complete value */ - rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, - RIO_ENUM_CMPL_MAGIC); - list_for_each_entry(rdev, &rio_devices, global_list) - rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, - RIO_ENUM_CMPL_MAGIC); + /* Assign component tag to all devices */ + next_comptag = 1; + rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++); + + list_for_each_entry(rdev, &rio_devices, global_list) { + /* Mark device as discovered */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + &result); + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + result | RIO_PORT_GEN_DISCOVERED); + + rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag); + rdev->comp_tag = next_comptag++; + if (next_comptag >= 0x10000) { + pr_err("RIO: Component Tag Counter Overflow\n"); + break; + } + } /* Release host device id locks */ rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, @@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev) } /** - * rio_route_set_ops- Sets routing operations for a particular vendor switch + * rio_switch_init - Sets switch operations for a particular vendor switch * @rdev: RIO device + * @do_enum: Enumeration/Discovery mode flag * - * Searches the RIO route ops table for known switch types. If the vid - * and did match a switch table entry, then set the add_entry() and - * get_entry() ops to the table entry values. + * Searches the RIO switch ops table for known switch types. If the vid + * and did match a switch table entry, then call switch initialization + * routine to setup switch-specific routines. */ -static void rio_route_set_ops(struct rio_dev *rdev) +static void rio_switch_init(struct rio_dev *rdev, int do_enum) { - struct rio_route_ops *cur = __start_rio_route_ops; - struct rio_route_ops *end = __end_rio_route_ops; + struct rio_switch_ops *cur = __start_rio_switch_ops; + struct rio_switch_ops *end = __end_rio_switch_ops; while (cur < end) { if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { - pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); - rdev->rswitch->add_entry = cur->add_hook; - rdev->rswitch->get_entry = cur->get_hook; + pr_debug("RIO: calling init routine for %s\n", + rio_name(rdev)); + cur->init_hook(rdev, do_enum); + break; } cur++; } + if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { + pr_debug("RIO: adding STD routing ops for %s\n", + rio_name(rdev)); + rdev->rswitch->add_entry = rio_std_route_add_entry; + rdev->rswitch->get_entry = rio_std_route_get_entry; + rdev->rswitch->clr_table = rio_std_route_clr_table; + } + if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) printk(KERN_ERR "RIO: missing routing ops for %s\n", rio_name(rdev)); @@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev) } /** + * rio_enable_rx_tx_port - enable input reciever and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +inline int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) { +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + + if (local) { + rio_local_read_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + return -EIO; + } + + if (regval & RIO_PORT_N_CTL_P_TYP_SER) { + /* serial */ + regval = regval | RIO_PORT_N_CTL_EN_RX_SER + | RIO_PORT_N_CTL_EN_TX_SER; + } else { + /* parallel */ + regval = regval | RIO_PORT_N_CTL_EN_RX_PAR + | RIO_PORT_N_CTL_EN_TX_PAR; + } + + if (local) { + rio_local_write_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + return -EIO; + } +#endif + return 0; +} + +/** * rio_setup_device- Allocates and sets up a RIO device * @net: RIO network * @port: Master port to send transactions @@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rdev->asm_rev = result >> 16; rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, &rdev->pef); - if (rdev->pef & RIO_PEF_EXT_FEATURES) + if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = result & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, + hopcount); + + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + } rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, &rdev->src_ops); @@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, if (rio_is_switch(rdev)) { rio_mport_read_config_32(port, destid, hopcount, RIO_SWP_INFO_CAR, &rdev->swpinfo); - rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); + rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL); if (!rswitch) goto cleanup; rswitch->switchid = next_switchid; rswitch->hopcount = hopcount; rswitch->destid = destid; + rswitch->port_ok = 0; rswitch->route_table = kzalloc(sizeof(u8)* RIO_MAX_ROUTE_ENTRIES(port->sys_size), GFP_KERNEL); @@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rdev->rswitch = rswitch; dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, rdev->rswitch->switchid); - rio_route_set_ops(rdev); + rio_switch_init(rdev, do_enum); + + if (do_enum && rdev->rswitch->clr_table) + rdev->rswitch->clr_table(port, destid, hopcount, + RIO_GLOBAL_TABLE); list_add_tail(&rswitch->node, &rio_switches); - } else + } else { + if (do_enum) + /*Enable Input Output Port (transmitter reviever)*/ + rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); + dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, rdev->destid); + } rdev->dev.bus = &rio_bus_type; @@ -414,23 +516,29 @@ cleanup: * * Reads the port error status CSR for a particular switch port to * determine if the port has an active link. Returns - * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is + * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is * inactive. */ static int rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) { - u32 result; + u32 result = 0; u32 ext_ftr_ptr; - int *entry = rio_sport_phys_table; - - do { - if ((ext_ftr_ptr = - rio_mport_get_feature(port, 0, destid, hopcount, *entry))) + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0); + while (ext_ftr_ptr) { + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &result); + result = RIO_GET_BLOCK_ID(result); + if ((result == RIO_EFB_SER_EP_FREE_ID) || + (result == RIO_EFB_SER_EP_FREE_ID_V13P) || + (result == RIO_EFB_SER_EP_FREC_ID)) break; - } while (*++entry >= 0); + + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, + ext_ftr_ptr); + } if (ext_ftr_ptr) rio_mport_read_config_32(port, destid, hopcount, @@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) RIO_PORT_N_ERR_STS_CSR(sport), &result); - return (result & PORT_N_ERR_STS_PORT_OK); + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +static int +rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms) +{ + u32 result; + int tcnt = 0; + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + while (result != port->host_deviceid) { + if (wait_ms != 0 && tcnt == wait_ms) { + pr_debug("RIO: timeout when locking device %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + /* Delay a bit */ + mdelay(1); + tcnt++; + /* Try to acquire device lock again */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + } + + return 0; +} + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +static int +rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + /* Release device lock */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + pr_debug("RIO: badness when releasing device lock %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + return 0; } /** @@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Port number to be routed + * @lock: lock switch device flag * * Calls the switch specific add_entry() method to add a route entry * on a switch. The route table can be specified using the @table @@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL * on failure. */ -static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, - u16 table, u16 route_destid, u8 route_port) +static int +rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, + u16 table, u16 route_destid, u8 route_port, int lock) { - return rswitch->add_entry(mport, rswitch->destid, + int rc; + + if (lock) { + rc = rio_lock_device(mport, rswitch->destid, + rswitch->hopcount, 1000); + if (rc) + return rc; + } + + rc = rswitch->add_entry(mport, rswitch->destid, rswitch->hopcount, table, route_destid, route_port); + if (lock) + rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + + return rc; } /** @@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Pointer to read port number into + * @lock: lock switch device flag * * Calls the switch specific get_entry() method to read a route entry * in a switch. The route table can be specified using the @table @@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit */ static int rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, - u16 route_destid, u8 * route_port) + u16 route_destid, u8 *route_port, int lock) { - return rswitch->get_entry(mport, rswitch->destid, + int rc; + + if (lock) { + rc = rio_lock_device(mport, rswitch->destid, + rswitch->hopcount, 1000); + if (rc) + return rc; + } + + rc = rswitch->get_entry(mport, rswitch->destid, rswitch->hopcount, table, route_destid, route_port); + if (lock) + rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + + return rc; } /** @@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, sw_inport = rio_get_swpinfo_inport(port, RIO_ANY_DESTID(port->sys_size), hopcount); rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, - port->host_deviceid, sw_inport); + port->host_deviceid, sw_inport, 0); rdev->rswitch->route_table[port->host_deviceid] = sw_inport; for (destid = 0; destid < next_destid; destid++) { if (destid == port->host_deviceid) continue; rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, - destid, sw_inport); + destid, sw_inport, 0); rdev->rswitch->route_table[destid] = sw_inport; } @@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, rio_name(rdev), rdev->vid, rdev->did, num_ports); sw_destid = next_destid; for (port_num = 0; port_num < num_ports; port_num++) { - if (sw_inport == port_num) + /*Enable Input Output Port (transmitter reviever)*/ + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + + if (sw_inport == port_num) { + rdev->rswitch->port_ok |= (1 << port_num); continue; + } cur_destid = next_destid; @@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, pr_debug( "RIO: scanning device on port %d\n", port_num); + rdev->rswitch->port_ok |= (1 << port_num); rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, RIO_ANY_DESTID(port->sys_size), - port_num); + port_num, 0); if (rio_enum_peer(net, port, hopcount + 1) < 0) return -1; @@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, destid, - port_num); + port_num, + 0); rdev->rswitch-> route_table[destid] = port_num; } } + } else { + /* If switch supports Error Management, + * set PORT_LOCKOUT bit for unused port + */ + if (rdev->em_efptr) + rio_set_port_lockout(rdev, port_num, 1); + + rdev->rswitch->port_ok &= ~(1 << port_num); } } + /* Direct Port-write messages to the enumeratiing host */ + if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) && + (rdev->em_efptr)) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TGT_DEVID, + (port->host_deviceid << 16) | + (port->sys_size << 15)); + } + + rio_init_em(rdev); + /* Check for empty switch */ if (next_destid == sw_destid) { next_destid++; @@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, * rio_enum_complete- Tests if enumeration of a network is complete * @port: Master port to send transaction * - * Tests the Component Tag CSR for presence of the magic enumeration - * complete flag. Return %1 if enumeration is complete or %0 if + * Tests the Component Tag CSR for non-zero value (enumeration + * complete flag). Return %1 if enumeration is complete or %0 if * enumeration is incomplete. */ static int rio_enum_complete(struct rio_mport *port) { u32 tag_csr; - int ret = 0; rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); - - if (tag_csr == RIO_ENUM_CMPL_MAGIC) - ret = 1; - - return ret; + return (tag_csr & 0xffff) ? 1 : 0; } /** @@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, pr_debug( "RIO: scanning device on port %d\n", port_num); + + rio_lock_device(port, destid, hopcount, 1000); + for (ndestid = 0; ndestid < RIO_ANY_DESTID(port->sys_size); ndestid++) { rio_route_get_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, ndestid, - &route_port); + &route_port, 0); if (route_port == port_num) break; } + rio_unlock_device(port, destid, hopcount); if (rio_disc_peer (net, port, ndestid, hopcount + 1) < 0) return -1; @@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, * * Reads the port error status CSR for the master port to * determine if the port has an active link. Returns - * %PORT_N_ERR_STS_PORT_OK if the master port is active + * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active * or %0 if it is inactive. */ static int rio_mport_is_active(struct rio_mport *port) @@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port) RIO_PORT_N_ERR_STS_CSR(port->index), &result); - return (result & PORT_N_ERR_STS_PORT_OK); + return result & RIO_PORT_N_ERR_STS_PORT_OK; } /** @@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port) continue; if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { + /* Skip if destid ends in empty switch*/ + if (rswitch->destid == destid) + continue; sport = rio_get_swpinfo_inport(port, rswitch->destid, rswitch->hopcount); if (rswitch->add_entry) { - rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); + rio_route_add_entry(port, rswitch, + RIO_GLOBAL_TABLE, destid, + sport, 0); rswitch->route_table[destid] = sport; } } @@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port) } /** + * rio_init_em - Initializes RIO Error Management (for switches) + * @rdev: RIO device + * + * For each enumerated switch, call device-specific error management + * initialization routine (if supplied by the switch driver). + */ +static void rio_init_em(struct rio_dev *rdev) +{ + if (rio_is_switch(rdev) && (rdev->em_efptr) && + (rdev->rswitch->em_init)) { + rdev->rswitch->em_init(rdev); + } +} + +/** + * rio_pw_enable - Enables/disables port-write handling by a master port + * @port: Master port associated with port-write handling + * @enable: 1=enable, 0=disable + */ +static void rio_pw_enable(struct rio_mport *port, int enable) +{ + if (port->ops->pwenable) + port->ops->pwenable(port, enable); +} + +/** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions * @@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport) rc = -ENOMEM; goto out; } + + /* Enable Input Output Port (transmitter reviever) */ + rio_enable_rx_tx_port(mport, 1, 0, 0, 0); + if (rio_enum_peer(net, mport, 0) < 0) { /* A higher priority host won enumeration, bail. */ printk(KERN_INFO @@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport) } rio_update_route_tables(mport); rio_clear_locks(mport); + rio_pw_enable(mport, 1); } else { printk(KERN_INFO "RIO: master port %d link inactive\n", mport->id); @@ -945,15 +1219,22 @@ static void rio_build_route_tables(void) u8 sport; list_for_each_entry(rdev, &rio_devices, global_list) - if (rio_is_switch(rdev)) - for (i = 0; - i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); - i++) { - if (rio_route_get_entry - (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, - i, &sport) < 0) - continue; - rdev->rswitch->route_table[i] = sport; + if (rio_is_switch(rdev)) { + rio_lock_device(rdev->net->hport, rdev->rswitch->destid, + rdev->rswitch->hopcount, 1000); + for (i = 0; + i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); + i++) { + if (rio_route_get_entry + (rdev->net->hport, rdev->rswitch, + RIO_GLOBAL_TABLE, i, &sport, 0) < 0) + continue; + rdev->rswitch->route_table[i] = sport; + } + + rio_unlock_device(rdev->net->hport, + rdev->rswitch->destid, + rdev->rswitch->hopcount); } } @@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport) del_timer_sync(&rio_enum_timer); pr_debug("done\n"); + + /* Read DestID assigned by enumerator */ + rio_local_read_config_32(mport, RIO_DID_CSR, + &mport->host_deviceid); + mport->host_deviceid = RIO_GET_DID(mport->sys_size, + mport->host_deviceid); + if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 0) < 0) { printk(KERN_INFO diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 6395c780008b..08fa453af974 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -5,6 +5,10 @@ * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine <alexandre.bounine@idt.com> + * - Added Port-Write/Error Management initialization and handling + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your @@ -333,6 +337,331 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) } /** + * rio_request_inb_pwrite - request inbound port-write message service + * @rdev: RIO device to which register inbound port-write callback routine + * @pwcback: Callback routine to execute when port-write is received + * + * Binds a port-write callback function to the RapidIO device. + * Returns 0 if the request has been satisfied. + */ +int rio_request_inb_pwrite(struct rio_dev *rdev, + int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) +{ + int rc = 0; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback != NULL) + rc = -ENOMEM; + else + rdev->pwcback = pwcback; + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); + +/** + * rio_release_inb_pwrite - release inbound port-write message service + * @rdev: RIO device which registered for inbound port-write callback + * + * Removes callback from the rio_dev structure. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_pwrite(struct rio_dev *rdev) +{ + int rc = -ENOMEM; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback) { + rdev->pwcback = NULL; + rc = 0; + } + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); + +/** + * rio_mport_get_physefb - Helper function that returns register offset + * for Physical Layer Extended Features Block. + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + */ +u32 +rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount) +{ + u32 ext_ftr_ptr; + u32 ftr_header; + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + + ftr_header = RIO_GET_BLOCK_ID(ftr_header); + switch (ftr_header) { + + case RIO_EFB_SER_EP_ID_V13P: + case RIO_EFB_SER_EP_REC_ID_V13P: + case RIO_EFB_SER_EP_FREE_ID_V13P: + case RIO_EFB_SER_EP_ID: + case RIO_EFB_SER_EP_REC_ID: + case RIO_EFB_SER_EP_FREE_ID: + case RIO_EFB_SER_EP_FREC_ID: + + return ext_ftr_ptr; + + default: + break; + } + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, + hopcount, ext_ftr_ptr); + } + + return ext_ftr_ptr; +} + +/** + * rio_get_comptag - Begin or continue searching for a RIO device by component tag + * @comp_tag: RIO component tag to match + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @comp_tag, a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. + */ +static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if (rdev->comp_tag == comp_tag) + goto exit; + n = n->next; + } + rdev = NULL; +exit: + spin_unlock(&rio_global_list_lock); + return rdev; +} + +/** + * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to set LOCKOUT bit + * @lock: Operation : set (=1) or clear (=0) + */ +int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) +{ + u8 hopcount = 0xff; + u16 destid = rdev->destid; + u32 regval; + + if (rdev->rswitch) { + destid = rdev->rswitch->destid; + hopcount = rdev->rswitch->hopcount; + } + + rio_mport_read_config_32(rdev->net->hport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + ®val); + if (lock) + regval |= RIO_PORT_N_CTL_LOCKOUT; + else + regval &= ~RIO_PORT_N_CTL_LOCKOUT; + + rio_mport_write_config_32(rdev->net->hport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + regval); + return 0; +} + +/** + * rio_inb_pwrite_handler - process inbound port-write message + * @pw_msg: pointer to inbound port-write message + * + * Processes an inbound port-write message. Returns 0 if the request + * has been satisfied. + */ +int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) +{ + struct rio_dev *rdev; + struct rio_mport *mport; + u8 hopcount; + u16 destid; + u32 err_status; + int rc, portnum; + + rdev = rio_get_comptag(pw_msg->em.comptag, NULL); + if (rdev == NULL) { + /* Someting bad here (probably enumeration error) */ + pr_err("RIO: %s No matching device for CTag 0x%08x\n", + __func__, pw_msg->em.comptag); + return -EIO; + } + + pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + +#ifdef DEBUG_PW + { + u32 i; + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { + pr_debug("0x%02x: %08x %08x %08x %08x", + i*4, pw_msg->raw[i], pw_msg->raw[i + 1], + pw_msg->raw[i + 2], pw_msg->raw[i + 3]); + i += 4; + } + pr_debug("\n"); + } +#endif + + /* Call an external service function (if such is registered + * for this device). This may be the service for endpoints that send + * device-specific port-write messages. End-point messages expected + * to be handled completely by EP specific device driver. + * For switches rc==0 signals that no standard processing required. + */ + if (rdev->pwcback != NULL) { + rc = rdev->pwcback(rdev, pw_msg, 0); + if (rc == 0) + return 0; + } + + /* For End-point devices processing stops here */ + if (!(rdev->pef & RIO_PEF_SWITCH)) + return 0; + + if (rdev->phys_efptr == 0) { + pr_err("RIO_PW: Bad switch initialization for %s\n", + rio_name(rdev)); + return 0; + } + + mport = rdev->net->hport; + destid = rdev->rswitch->destid; + hopcount = rdev->rswitch->hopcount; + + /* + * Process the port-write notification from switch + */ + + portnum = pw_msg->em.is_port & 0xFF; + + if (rdev->rswitch->em_handle) + rdev->rswitch->em_handle(rdev, portnum); + + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); + + if (pw_msg->em.errdetect) { + pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", + portnum, pw_msg->em.errdetect); + /* Clear EM Port N Error Detect CSR */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); + } + + if (pw_msg->em.ltlerrdet) { + pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", + pw_msg->em.ltlerrdet); + /* Clear EM L/T Layer Error Detect CSR */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); + } + + /* Clear Port Errors */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + err_status & RIO_PORT_N_ERR_STS_CLR_MASK); + + if (rdev->rswitch->port_ok & (1 << portnum)) { + if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) { + rdev->rswitch->port_ok &= ~(1 << portnum); + rio_set_port_lockout(rdev, portnum, 1); + + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ACK_STS_CSR(portnum), + RIO_PORT_N_ACK_CLEAR); + + /* Schedule Extraction Service */ + pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } else { + if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { + rdev->rswitch->port_ok |= (1 << portnum); + rio_set_port_lockout(rdev, portnum, 0); + + /* Schedule Insertion Service */ + pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } + + /* Clear Port-Write Pending bit */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + RIO_PORT_N_ERR_STS_PW_PEND); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); + +/** + * rio_mport_get_efb - get pointer to next extended features block + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @from: Offset of current Extended Feature block header (if 0 starts + * from ExtFeaturePtr) + */ +u32 +rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from) +{ + u32 reg_val; + + if (from == 0) { + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, + ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, ®_val); + return reg_val & RIO_EXT_FTR_PTR_MASK; + } else { + if (local) + rio_local_read_config_32(port, from, ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + from, ®_val); + return RIO_GET_BLOCK_ID(reg_val); + } +} + +/** * rio_mport_get_feature - query for devices' extended features * @port: Master port to issue transaction * @local: Indicate a local master port or remote device access @@ -451,6 +780,110 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); } +/** + * rio_std_route_add_entry - Add switch route table entry using standard + * registers defined in RIO specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: destination port for specified destID + */ +int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + } + + udelay(10); + return 0; +} + +/** + * rio_std_route_get_entry - Read switch route table entry (port number) + * assosiated with specified destID using standard registers defined in RIO + * specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: returned destination port for specified destID + */ +int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + *route_port = (u8)result; + } + + return 0; +} + +/** + * rio_std_route_clr_table - Clear swotch route table using standard registers + * defined in RIO specification rev.1.3. + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + */ +int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 max_destid = 0xff; + u32 i, pef, id_inc = 1, ext_cfg = 0; + u32 port_sel = RIO_INVALID_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &pef); + + if (mport->sys_size) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWITCH_RT_LIMIT, + &max_destid); + max_destid &= RIO_RT_MAX_DESTID; + } + + if (pef & RIO_PEF_EXT_RT) { + ext_cfg = 0x80000000; + id_inc = 4; + port_sel = (RIO_INVALID_ROUTE << 24) | + (RIO_INVALID_ROUTE << 16) | + (RIO_INVALID_ROUTE << 8) | + RIO_INVALID_ROUTE; + } + + for (i = 0; i <= max_destid;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + ext_cfg | i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + port_sel); + i += id_inc; + } + } + + udelay(10); + return 0; +} + static void rio_fixup_device(struct rio_dev *dev) { } diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index 7786d02581f2..f27b7a9c47d2 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -18,38 +18,50 @@ extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, u8 hopcount, int ftr); +extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount); +extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from); extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); extern int rio_enum_mport(struct rio_mport *mport); extern int rio_disc_mport(struct rio_mport *mport); +extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table, u16 route_destid, + u8 route_port); +extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table, u16 route_destid, + u8 *route_port); +extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table); +extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); /* Structures internal to the RIO core code */ extern struct device_attribute rio_dev_attrs[]; extern spinlock_t rio_global_list_lock; -extern struct rio_route_ops __start_rio_route_ops[]; -extern struct rio_route_ops __end_rio_route_ops[]; +extern struct rio_switch_ops __start_rio_switch_ops[]; +extern struct rio_switch_ops __end_rio_switch_ops[]; /* Helpers internal to the RIO core code */ -#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ - static struct rio_route_ops __rio_route_ops __used \ - __section(section)= { vid, did, add_hook, get_hook }; +#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \ + static const struct rio_switch_ops __rio_switch_##name __used \ + __section(section) = { vid, did, init_hook }; /** - * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations + * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine * @vid: RIO vendor ID * @did: RIO device ID - * @add_hook: Callback that adds a route entry - * @get_hook: Callback that gets a route entry + * @init_hook: Callback that performs switch-specific initialization * - * Manipulating switch route tables in RIO is switch specific. This - * registers a switch by vendor and device ID with two callbacks for - * modifying and retrieving route entries in a switch. A &struct - * rio_route_ops is initialized with the ops and placed into a - * RIO-specific kernel section. + * Manipulating switch route tables and error management in RIO + * is switch specific. This registers a switch by vendor and device ID with + * initialization callback for setting up switch operations and (if required) + * hardware initialization. A &struct rio_switch_ops is initialized with + * pointer to the init routine and placed into a RIO-specific kernel section. */ -#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ - DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ - vid, did, add_hook, get_hook) +#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \ + DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \ + vid, did, init_hook) #define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) #define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig new file mode 100644 index 000000000000..2b4e9b2b6631 --- /dev/null +++ b/drivers/rapidio/switches/Kconfig @@ -0,0 +1,28 @@ +# +# RapidIO switches configuration +# +config RAPIDIO_TSI57X + bool "IDT Tsi57x SRIO switches support" + depends on RAPIDIO + ---help--- + Includes support for IDT Tsi57x family of serial RapidIO switches. + +config RAPIDIO_CPS_XX + bool "IDT CPS-xx SRIO switches support" + depends on RAPIDIO + ---help--- + Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. + +config RAPIDIO_TSI568 + bool "Tsi568 SRIO switch support" + depends on RAPIDIO + default n + ---help--- + Includes support for IDT Tsi568 serial RapidIO switch. + +config RAPIDIO_TSI500 + bool "Tsi500 Parallel RapidIO switch support" + depends on RAPIDIO + default n + ---help--- + Includes support for IDT Tsi500 parallel RapidIO switch. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index b924f8301761..fe4adc3e8d5f 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile @@ -2,4 +2,11 @@ # Makefile for RIO switches # -obj-$(CONFIG_RAPIDIO) += tsi500.o +obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o +obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o +obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o +obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o + +ifeq ($(CONFIG_RAPIDIO_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c new file mode 100644 index 000000000000..2c790c144f89 --- /dev/null +++ b/drivers/rapidio/switches/idtcps.c @@ -0,0 +1,137 @@ +/* + * IDT CPS RapidIO switches support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include "../rio.h" + +#define CPS_DEFAULT_ROUTE 0xde +#define CPS_NO_ROUTE 0xdf + +#define IDTCPS_RIO_DOMAIN 0xf20020 + +static int +idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + result = (0xffffff00 & result) | (u32)route_port; + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, result); + } + + return 0; +} + +static int +idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (CPS_DEFAULT_ROUTE == (u8)result || + CPS_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + } + + return 0; +} + +static int +idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0x80000000; i <= 0x800000ff;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (CPS_DEFAULT_ROUTE << 24) | + (CPS_DEFAULT_ROUTE << 16) | + (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE); + i += 4; + } + } + + return 0; +} + +static int +idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = idtcps_route_add_entry; + rdev->rswitch->get_entry = idtcps_route_get_entry; + rdev->rswitch->clr_table = idtcps_route_clr_table; + rdev->rswitch->set_domain = idtcps_set_domain; + rdev->rswitch->get_domain = idtcps_get_domain; + rdev->rswitch->em_init = NULL; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init); diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c index c77c23bd9840..914eddd5aa42 100644 --- a/drivers/rapidio/switches/tsi500.c +++ b/drivers/rapidio/switches/tsi500.c @@ -1,6 +1,10 @@ /* * RapidIO Tsi500 switch support * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * - Modified switch operations initialization. + * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * @@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab return ret; } -DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); +static int tsi500_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi500_route_add_entry; + rdev->rswitch->get_entry = tsi500_route_get_entry; + rdev->rswitch->clr_table = NULL; + rdev->rswitch->set_domain = NULL; + rdev->rswitch->get_domain = NULL; + rdev->rswitch->em_init = NULL; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init); diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c new file mode 100644 index 000000000000..f7fd7898606e --- /dev/null +++ b/drivers/rapidio/switches/tsi568.c @@ -0,0 +1,146 @@ +/* + * RapidIO Tsi568 switch support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI568_SP_MODE_BC 0x10004 +#define TSI568_SP_MODE_PW_DIS 0x08000000 + +static int +tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, &result); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + } + + *route_port = result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), + RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi568_em_init(struct rio_dev *rdev) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 regval; + + pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount); + + /* Make sure that Port-Writes are disabled (for all ports) */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI568_SP_MODE_BC, ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS); + + return 0; +} + +static int tsi568_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi568_route_add_entry; + rdev->rswitch->get_entry = tsi568_route_get_entry; + rdev->rswitch->clr_table = tsi568_route_clr_table; + rdev->rswitch->set_domain = NULL; + rdev->rswitch->get_domain = NULL; + rdev->rswitch->em_init = tsi568_em_init; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c new file mode 100644 index 000000000000..d34df722d95f --- /dev/null +++ b/drivers/rapidio/switches/tsi57x.c @@ -0,0 +1,315 @@ +/* + * RapidIO Tsi57x switch family support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/rio_ids.h> +#include <linux/delay.h> +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI578_SP_MODE(n) (0x11004 + n*0x100) +#define TSI578_SP_MODE_GLBL 0x10004 +#define TSI578_SP_MODE_PW_DIS 0x08000000 +#define TSI578_SP_MODE_LUT_512 0x01000000 + +#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) +#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) +#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) +#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) + +#define TSI578_GLBL_ROUTE_BASE 0x10078 + +static int +tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + /* Use local RT of the ingress port to avoid possible + race condition */ + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &result); + table = (result & RIO_SWP_INFO_PORT_NUM_MASK); + } + + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + + *route_port = (u8)result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + + /* Turn off flat (LUT_512) mode */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE_GLBL, ®val); + rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL, + regval & ~TSI578_SP_MODE_LUT_512); + /* Set switch domain base */ + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, + (u32)(sw_domain << 24)); + return 0; +} + +static int +tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, ®val); + + *sw_domain = (u8)(regval >> 24); + + return 0; +} + +static int +tsi57x_em_init(struct rio_dev *rdev) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 regval; + int portnum; + + pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount); + + for (portnum = 0; portnum < 16; portnum++) { + /* Make sure that Port-Writes are enabled (for all ports) */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_MODE(portnum), + regval & ~TSI578_SP_MODE_PW_DIS); + + /* Clear all pending interrupts */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + ®val); + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + regval & 0x07120214); + + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), + regval & 0x000700bd); + + /* Enable all interrupts to allow ports to send a port-write */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_CTL_INDEP(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_CTL_INDEP(portnum), + regval | 0x000b0000); + + /* Skip next (odd) port if the current port is in x4 mode */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) + portnum++; + } + + return 0; +} + +static int +tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 intstat, err_status; + int sendcount, checkcount; + u8 route_port; + u32 regval; + + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + + if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && + (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | + RIO_PORT_N_ERR_STS_PW_INP_ES))) { + /* Remove any queued packets by locking/unlocking port */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval | RIO_PORT_N_CTL_LOCKOUT); + udelay(50); + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval); + } + + /* Read from link maintenance response register to clear + * valid bit + */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + + /* Send a Packet-Not-Accepted/Link-Request-Input-Status control + * symbol to recover from IES/OES + */ + sendcount = 3; + while (sendcount) { + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_CS_TX(portnum), 0x40fc8000); + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_mport_read_config_32( + mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) + goto exit_es; + } + + sendcount--; + } + } + +exit_es: + /* Clear implementation specific error status bits */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), &intstat); + pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", + destid, hopcount, portnum, intstat); + + if (intstat & 0x10000) { + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_LUT_PEINF(portnum), ®val); + regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); + route_port = rdev->rswitch->route_table[regval]; + pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", + rio_name(rdev), portnum, regval); + tsi57x_route_add_entry(mport, destid, hopcount, + RIO_GLOBAL_TABLE, regval, route_port); + } + + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), + intstat & 0x000700bd); + + return 0; +} + +static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi57x_route_add_entry; + rdev->rswitch->get_entry = tsi57x_route_get_entry; + rdev->rswitch->clr_table = tsi57x_route_clr_table; + rdev->rswitch->set_domain = tsi57x_set_domain; + rdev->rswitch->get_domain = tsi57x_get_domain; + rdev->rswitch->em_init = tsi57x_em_init; + rdev->rswitch->em_handle = tsi57x_em_handler; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init); diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 5fb83e2ced25..7d149a8d8d9b 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -23,9 +23,9 @@ struct pm8607_regulator_info { struct regulator_dev *regulator; struct i2c_client *i2c; - int min_uV; - int max_uV; - int step_uV; + unsigned int *vol_table; + unsigned int *vol_suspend; + int vol_reg; int vol_shift; int vol_nbits; @@ -36,83 +36,189 @@ struct pm8607_regulator_info { int slope_double; }; -static inline int check_range(struct pm8607_regulator_info *info, - int min_uV, int max_uV) -{ - if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV) - return -EINVAL; +static const unsigned int BUCK1_table[] = { + 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, + 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, + 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, + 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, + 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, + 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, + 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, + 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, +}; - return 0; -} +static const unsigned int BUCK1_suspend_table[] = { + 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, + 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, + 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, + 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, + 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, +}; + +static const unsigned int BUCK2_table[] = { + 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, + 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, + 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, + 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000, + 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, + 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000, + 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, + 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000, +}; + +static const unsigned int BUCK2_suspend_table[] = { + 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000, + 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000, + 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, + 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000, + 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, + 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000, + 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, + 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000, +}; + +static const unsigned int BUCK3_table[] = { + 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, + 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, + 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, + 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, + 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, +}; + +static const unsigned int BUCK3_suspend_table[] = { + 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000, + 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000, + 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000, + 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, + 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000, +}; + +static const unsigned int LDO1_table[] = { + 1800000, 1200000, 2800000, 0, +}; + +static const unsigned int LDO1_suspend_table[] = { + 1800000, 1200000, 0, 0, +}; + +static const unsigned int LDO2_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, +}; + +static const unsigned int LDO2_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO3_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, +}; + +static const unsigned int LDO3_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO4_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000, +}; + +static const unsigned int LDO4_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000, +}; + +static const unsigned int LDO5_table[] = { + 2900000, 3000000, 3100000, 3300000, +}; + +static const unsigned int LDO5_suspend_table[] = { + 2900000, 0, 0, 0, +}; + +static const unsigned int LDO6_table[] = { + 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000, +}; + +static const unsigned int LDO6_suspend_table[] = { + 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000, +}; + +static const unsigned int LDO7_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO7_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO8_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO8_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO9_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, +}; + +static const unsigned int LDO9_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, +}; + +static const unsigned int LDO10_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000, + 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, +}; + +static const unsigned int LDO10_suspend_table[] = { + 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000, + 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, +}; + +static const unsigned int LDO12_table[] = { + 1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000, + 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, +}; + +static const unsigned int LDO12_suspend_table[] = { + 1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000, + 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, +}; + +static const unsigned int LDO13_table[] = { + 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0, +}; + +static const unsigned int LDO13_suspend_table[] = { + 0, +}; + +static const unsigned int LDO14_table[] = { + 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000, +}; + +static const unsigned int LDO14_suspend_table[] = { + 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000, +}; static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); int ret = -EINVAL; - switch (info->desc.id) { - case PM8607_ID_BUCK1: - ret = (index < 0x1d) ? (index * 25000 + 800000) : - ((index < 0x20) ? 1500000 : - ((index < 0x40) ? ((index - 0x20) * 25000) : - -EINVAL)); - break; - case PM8607_ID_BUCK3: - ret = (index < 0x3d) ? (index * 25000) : - ((index < 0x40) ? 1500000 : -EINVAL); - if (ret < 0) - break; + if (info->vol_table && (index < (2 << info->vol_nbits))) { + ret = info->vol_table[index]; if (info->slope_double) ret <<= 1; - break; - case PM8607_ID_LDO1: - ret = (index == 0) ? 1800000 : - ((index == 1) ? 1200000 : - ((index == 2) ? 2800000 : -EINVAL)); - break; - case PM8607_ID_LDO5: - ret = (index == 0) ? 2900000 : - ((index == 1) ? 3000000 : - ((index == 2) ? 3100000 : 3300000)); - break; - case PM8607_ID_LDO7: - case PM8607_ID_LDO8: - ret = (index < 3) ? (index * 50000 + 1800000) : - ((index < 8) ? (index * 50000 + 2550000) : - -EINVAL); - break; - case PM8607_ID_LDO12: - ret = (index < 2) ? (index * 100000 + 1800000) : - ((index < 7) ? (index * 100000 + 2500000) : - ((index == 7) ? 3300000 : 1200000)); - break; - case PM8607_ID_LDO2: - case PM8607_ID_LDO3: - case PM8607_ID_LDO9: - ret = (index < 3) ? (index * 50000 + 1800000) : - ((index < 7) ? (index * 50000 + 2550000) : - 3300000); - break; - case PM8607_ID_LDO4: - ret = (index < 3) ? (index * 50000 + 1800000) : - ((index < 6) ? (index * 50000 + 2550000) : - ((index == 6) ? 2900000 : 3300000)); - break; - case PM8607_ID_LDO6: - ret = (index < 2) ? (index * 50000 + 1800000) : - ((index < 7) ? (index * 50000 + 2500000) : - 3300000); - break; - case PM8607_ID_LDO10: - ret = (index < 3) ? (index * 50000 + 1800000) : - ((index < 7) ? (index * 50000 + 2550000) : - ((index == 7) ? 3300000 : 1200000)); - break; - case PM8607_ID_LDO14: - ret = (index < 2) ? (index * 50000 + 1800000) : - ((index < 7) ? (index * 50000 + 2600000) : - 3300000); - break; } return ret; } @@ -120,174 +226,26 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); - int val = -ENOENT; - int ret; + int i, ret = -ENOENT; - switch (info->desc.id) { - case PM8607_ID_BUCK1: - if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */ - val = (min_uV - 775001) / 25000; - else { /* 25mV ~ 775mV / 25mV */ - val = (min_uV + 249999) / 25000; - val += 32; - } - break; - case PM8607_ID_BUCK3: - if (info->slope_double) - min_uV = min_uV >> 1; - val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */ - - break; - case PM8607_ID_LDO1: - if (min_uV > 1800000) - val = 2; - else if (min_uV > 1200000) - val = 0; - else - val = 1; - break; - case PM8607_ID_LDO5: - if (min_uV > 3100000) - val = 3; - else /* 2900mV ~ 3100mV / 100mV */ - val = (min_uV - 2800001) / 100000; - break; - case PM8607_ID_LDO7: - case PM8607_ID_LDO8: - if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */ - if (min_uV <= 1800000) - val = 0; /* 1800mv */ - else if (min_uV <= 1900000) - val = (min_uV - 1750001) / 50000; - else - val = 3; /* 2700mV */ - } else { /* 2700mV ~ 2900mV / 50mV */ - if (min_uV <= 2900000) { - val = (min_uV - 2650001) / 50000; - val += 3; - } else - val = -EINVAL; - } - break; - case PM8607_ID_LDO10: - if (min_uV > 2850000) - val = 7; - else if (min_uV <= 1200000) - val = 8; - else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */ - val = (min_uV - 1750001) / 50000; - else { /* 2700mV ~ 2850mV / 50mV */ - val = (min_uV - 2650001) / 50000; - val += 3; - } - break; - case PM8607_ID_LDO12: - if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */ - if (min_uV <= 1200000) - val = 8; /* 1200mV */ - else if (min_uV <= 1800000) - val = 0; /* 1800mV */ - else if (min_uV <= 1900000) - val = (min_uV - 1700001) / 100000; - else - val = 2; /* 2700mV */ - } else { /* 2700mV ~ 3100mV / 100mV */ - if (min_uV <= 3100000) { - val = (min_uV - 2600001) / 100000; - val += 2; - } else if (min_uV <= 3300000) - val = 7; - else - val = -EINVAL; - } - break; - case PM8607_ID_LDO2: - case PM8607_ID_LDO3: - case PM8607_ID_LDO9: - if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */ - if (min_uV <= 1800000) - val = 0; - else if (min_uV <= 1900000) - val = (min_uV - 1750001) / 50000; - else - val = 3; /* 2700mV */ - } else { /* 2700mV ~ 2850mV / 50mV */ - if (min_uV <= 2850000) { - val = (min_uV - 2650001) / 50000; - val += 3; - } else if (min_uV <= 3300000) - val = 7; - else - val = -EINVAL; - } - break; - case PM8607_ID_LDO4: - if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */ - if (min_uV <= 1800000) - val = 0; - else if (min_uV <= 1900000) - val = (min_uV - 1750001) / 50000; - else - val = 3; /* 2700mV */ - } else { /* 2700mV ~ 2800mV / 50mV */ - if (min_uV <= 2850000) { - val = (min_uV - 2650001) / 50000; - val += 3; - } else if (min_uV <= 2900000) - val = 6; - else if (min_uV <= 3300000) - val = 7; - else - val = -EINVAL; - } - break; - case PM8607_ID_LDO6: - if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */ - if (min_uV <= 1800000) - val = 0; - else if (min_uV <= 1850000) - val = (min_uV - 1750001) / 50000; - else - val = 2; /* 2600mV */ - } else { /* 2600mV ~ 2800mV / 50mV */ - if (min_uV <= 2800000) { - val = (min_uV - 2550001) / 50000; - val += 2; - } else if (min_uV <= 3300000) - val = 7; - else - val = -EINVAL; - } - break; - case PM8607_ID_LDO14: - if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */ - if (min_uV <= 1800000) - val = 0; - else if (min_uV <= 1850000) - val = (min_uV - 1750001) / 50000; - else - val = 2; /* 2700mV */ - } else { /* 2700mV ~ 2900mV / 50mV */ - if (min_uV <= 2900000) { - val = (min_uV - 2650001) / 50000; - val += 2; - } else if (min_uV <= 3300000) - val = 7; - else - val = -EINVAL; - } - break; + if (info->slope_double) { + min_uV = min_uV >> 1; + max_uV = max_uV >> 1; } - if (val >= 0) { - ret = pm8607_list_voltage(rdev, val); - if (ret > max_uV) { - pr_err("exceed voltage range (%d %d) uV", - min_uV, max_uV); - return -EINVAL; + if (info->vol_table) { + for (i = 0; i < (2 << info->vol_nbits); i++) { + if (!info->vol_table[i]) + break; + if ((min_uV <= info->vol_table[i]) + && (max_uV >= info->vol_table[i])) { + ret = i; + break; + } } - } else - pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV); - return val; + } + if (ret < 0) + pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV); + return ret; } static int pm8607_set_voltage(struct regulator_dev *rdev, @@ -297,7 +255,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev, uint8_t val, mask; int ret; - if (check_range(info, min_uV, max_uV)) { + if (min_uV > max_uV) { pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV); return -EINVAL; } @@ -375,18 +333,15 @@ static struct regulator_ops pm8607_regulator_ops = { .is_enabled = pm8607_is_enabled, }; -#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \ +#define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit) \ { \ .desc = { \ - .name = "BUCK" #_id, \ + .name = #vreg, \ .ops = &pm8607_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ - .id = PM8607_ID_BUCK##_id, \ + .id = PM8607_ID_##vreg, \ .owner = THIS_MODULE, \ }, \ - .min_uV = (min) * 1000, \ - .max_uV = (max) * 1000, \ - .step_uV = (step) * 1000, \ .vol_reg = PM8607_##vreg, \ .vol_shift = (0), \ .vol_nbits = (nbits), \ @@ -395,9 +350,11 @@ static struct regulator_ops pm8607_regulator_ops = { .enable_reg = PM8607_##ereg, \ .enable_bit = (ebit), \ .slope_double = (0), \ + .vol_table = (unsigned int *)&vreg##_table, \ + .vol_suspend = (unsigned int *)&vreg##_suspend_table, \ } -#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \ +#define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit) \ { \ .desc = { \ .name = "LDO" #_id, \ @@ -406,33 +363,34 @@ static struct regulator_ops pm8607_regulator_ops = { .id = PM8607_ID_LDO##_id, \ .owner = THIS_MODULE, \ }, \ - .min_uV = (min) * 1000, \ - .max_uV = (max) * 1000, \ - .step_uV = (step) * 1000, \ .vol_reg = PM8607_##vreg, \ .vol_shift = (shift), \ .vol_nbits = (nbits), \ .enable_reg = PM8607_##ereg, \ .enable_bit = (ebit), \ .slope_double = (0), \ + .vol_table = (unsigned int *)&LDO##_id##_table, \ + .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \ } static struct pm8607_regulator_info pm8607_regulator_info[] = { - PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0), - PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2), - - PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3), - PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4), - PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5), - PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6), - PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7), - PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0), - PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1), - PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2), - PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3), - PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4), - PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5), - PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6), + PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0), + PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1), + PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2), + + PM8607_LDO( 1, LDO1, 0, 2, SUPPLIES_EN11, 3), + PM8607_LDO( 2, LDO2, 0, 3, SUPPLIES_EN11, 4), + PM8607_LDO( 3, LDO3, 0, 3, SUPPLIES_EN11, 5), + PM8607_LDO( 4, LDO4, 0, 3, SUPPLIES_EN11, 6), + PM8607_LDO( 5, LDO5, 0, 2, SUPPLIES_EN11, 7), + PM8607_LDO( 6, LDO6, 0, 3, SUPPLIES_EN12, 0), + PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1), + PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2), + PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3), + PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4), + PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5), + PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0), + PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6), }; static inline struct pm8607_regulator_info *find_regulator_info(int id) @@ -484,60 +442,29 @@ static int __devexit pm8607_regulator_remove(struct platform_device *pdev) { struct pm8607_regulator_info *info = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); regulator_unregister(info->regulator); return 0; } -#define PM8607_REGULATOR_DRIVER(_name) \ -{ \ - .driver = { \ - .name = "88pm8607-" #_name, \ - .owner = THIS_MODULE, \ - }, \ - .probe = pm8607_regulator_probe, \ - .remove = __devexit_p(pm8607_regulator_remove), \ -} - -static struct platform_driver pm8607_regulator_driver[] = { - PM8607_REGULATOR_DRIVER(buck1), - PM8607_REGULATOR_DRIVER(buck2), - PM8607_REGULATOR_DRIVER(buck3), - PM8607_REGULATOR_DRIVER(ldo1), - PM8607_REGULATOR_DRIVER(ldo2), - PM8607_REGULATOR_DRIVER(ldo3), - PM8607_REGULATOR_DRIVER(ldo4), - PM8607_REGULATOR_DRIVER(ldo5), - PM8607_REGULATOR_DRIVER(ldo6), - PM8607_REGULATOR_DRIVER(ldo7), - PM8607_REGULATOR_DRIVER(ldo8), - PM8607_REGULATOR_DRIVER(ldo9), - PM8607_REGULATOR_DRIVER(ldo10), - PM8607_REGULATOR_DRIVER(ldo12), - PM8607_REGULATOR_DRIVER(ldo14), +static struct platform_driver pm8607_regulator_driver = { + .driver = { + .name = "88pm860x-regulator", + .owner = THIS_MODULE, + }, + .probe = pm8607_regulator_probe, + .remove = __devexit_p(pm8607_regulator_remove), }; static int __init pm8607_regulator_init(void) { - int i, count, ret; - - count = ARRAY_SIZE(pm8607_regulator_driver); - for (i = 0; i < count; i++) { - ret = platform_driver_register(&pm8607_regulator_driver[i]); - if (ret != 0) - pr_err("Failed to register regulator driver: %d\n", - ret); - } - return 0; + return platform_driver_register(&pm8607_regulator_driver); } subsys_initcall(pm8607_regulator_init); static void __exit pm8607_regulator_exit(void) { - int i, count; - - count = ARRAY_SIZE(pm8607_regulator_driver); - for (i = 0; i < count; i++) - platform_driver_unregister(&pm8607_regulator_driver[i]); + platform_driver_unregister(&pm8607_regulator_driver); } module_exit(pm8607_regulator_exit); diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 7de950959ed2..7b14a67bdca2 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -16,7 +16,7 @@ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> -#include <linux/mfd/ab3100.h> +#include <linux/mfd/abx500.h> /* LDO registers and some handy masking definitions for AB3100 */ #define AB3100_LDO_A 0x40 @@ -41,7 +41,7 @@ * struct ab3100_regulator * A struct passed around the individual regulator functions * @platform_device: platform device holding this regulator - * @ab3100: handle to the AB3100 parent chip + * @dev: handle to the device * @plfdata: AB3100 platform data passed in at probe time * @regreg: regulator register number in the AB3100 * @fixed_voltage: a fixed voltage for this regulator, if this @@ -52,7 +52,7 @@ */ struct ab3100_regulator { struct regulator_dev *rdev; - struct ab3100 *ab3100; + struct device *dev; struct ab3100_platform_data *plfdata; u8 regreg; int fixed_voltage; @@ -183,7 +183,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) int err; u8 regval; - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, "failed to get regid %d value\n", @@ -197,7 +197,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) regval |= AB3100_REG_ON_MASK; - err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); if (err) { dev_warn(®->dev, "failed to set regid %d value\n", @@ -245,14 +245,14 @@ static int ab3100_disable_regulator(struct regulator_dev *reg) if (abreg->regreg == AB3100_LDO_D) { dev_info(®->dev, "disabling LDO D - shut down system\n"); /* Setting LDO D to 0x00 cuts the power to the SoC */ - return ab3100_set_register_interruptible(abreg->ab3100, + return abx500_set_register_interruptible(abreg->dev, 0, AB3100_LDO_D, 0x00U); } /* * All other regulators are handled here */ - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_err(®->dev, "unable to get register 0x%x\n", @@ -260,7 +260,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg) return err; } regval &= ~AB3100_REG_ON_MASK; - return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); } @@ -270,7 +270,7 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg) u8 regval; int err; - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_err(®->dev, "unable to get register 0x%x\n", @@ -305,7 +305,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg) * For variable types, read out setting and index into * supplied voltage list. */ - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, @@ -373,7 +373,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg, if (bestindex < 0) return bestindex; - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, @@ -386,7 +386,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg, regval &= ~0xE0; regval |= (bestindex << 5); - err = ab3100_set_register_interruptible(abreg->ab3100, + err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); if (err) dev_warn(®->dev, "failed to set regulator register %02x\n", @@ -414,7 +414,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, /* LDO E and BUCK have special suspend voltages you can set */ bestindex = ab3100_get_best_voltage_index(reg, uV, uV); - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, targetreg, ®val); if (err) { dev_warn(®->dev, @@ -427,7 +427,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, regval &= ~0xE0; regval |= (bestindex << 5); - err = ab3100_set_register_interruptible(abreg->ab3100, + err = abx500_set_register_interruptible(abreg->dev, 0, targetreg, regval); if (err) dev_warn(®->dev, "failed to set regulator register %02x\n", @@ -492,18 +492,21 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .id = AB3100_LDO_A, .ops = ®ulator_ops_fixed, .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_C", .id = AB3100_LDO_C, .ops = ®ulator_ops_fixed, .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_D", .id = AB3100_LDO_D, .ops = ®ulator_ops_fixed, .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_E", @@ -511,6 +514,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_F", @@ -518,6 +522,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_f_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_G", @@ -525,6 +530,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_g_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_H", @@ -532,6 +538,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_h_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_K", @@ -539,12 +546,14 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_k_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "LDO_EXT", .id = AB3100_LDO_EXT, .ops = ®ulator_ops_external, .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, { .name = "BUCK", @@ -552,6 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, }, }; @@ -564,13 +574,12 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { static int __devinit ab3100_regulators_probe(struct platform_device *pdev) { struct ab3100_platform_data *plfdata = pdev->dev.platform_data; - struct ab3100 *ab3100 = platform_get_drvdata(pdev); int err = 0; u8 data; int i; /* Check chip state */ - err = ab3100_get_register_interruptible(ab3100, + err = abx500_get_register_interruptible(&pdev->dev, 0, AB3100_LDO_D, &data); if (err) { dev_err(&pdev->dev, "could not read initial status of LDO_D\n"); @@ -585,7 +594,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) /* Set up regulators */ for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) { - err = ab3100_set_register_interruptible(ab3100, + err = abx500_set_register_interruptible(&pdev->dev, 0, ab3100_reg_init_order[i], plfdata->reg_initvals[i]); if (err) { @@ -607,7 +616,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) * see what it looks like for a certain machine, go * into the machine I2C setup. */ - reg->ab3100 = ab3100; + reg->dev = &pdev->dev; reg->plfdata = plfdata; /* diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c index d08cd9b66c6d..068d488a4f71 100644 --- a/drivers/regulator/bq24022.c +++ b/drivers/regulator/bq24022.c @@ -78,6 +78,7 @@ static struct regulator_desc bq24022_desc = { .name = "bq24022", .ops = &bq24022_ops, .type = REGULATOR_CURRENT, + .owner = THIS_MODULE, }; static int __init bq24022_probe(struct platform_device *pdev) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 51cf2bb37438..2248087b9be2 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -944,8 +944,13 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, has_dev = 0; list_for_each_entry(node, ®ulator_map_list, list) { - if (consumer_dev_name != node->dev_name) + if (node->dev_name && consumer_dev_name) { + if (strcmp(node->dev_name, consumer_dev_name) != 0) + continue; + } else if (node->dev_name || consumer_dev_name) { continue; + } + if (strcmp(node->supply, supply) != 0) continue; @@ -976,29 +981,6 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, return 0; } -static void unset_consumer_device_supply(struct regulator_dev *rdev, - const char *consumer_dev_name, struct device *consumer_dev) -{ - struct regulator_map *node, *n; - - if (consumer_dev && !consumer_dev_name) - consumer_dev_name = dev_name(consumer_dev); - - list_for_each_entry_safe(node, n, ®ulator_map_list, list) { - if (rdev != node->regulator) - continue; - - if (consumer_dev_name && node->dev_name && - strcmp(consumer_dev_name, node->dev_name)) - continue; - - list_del(&node->list); - kfree(node->dev_name); - kfree(node); - return; - } -} - static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; @@ -1008,7 +990,6 @@ static void unset_regulator_supplies(struct regulator_dev *rdev) list_del(&node->list); kfree(node->dev_name); kfree(node); - return; } } } @@ -1764,6 +1745,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode) { struct regulator_dev *rdev = regulator->rdev; int ret; + int regulator_curr_mode; mutex_lock(&rdev->mutex); @@ -1773,6 +1755,15 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode) goto out; } + /* return if the same mode is requested */ + if (rdev->desc->ops->get_mode) { + regulator_curr_mode = rdev->desc->ops->get_mode(rdev); + if (regulator_curr_mode == mode) { + ret = 0; + goto out; + } + } + /* constraints check */ ret = regulator_check_mode(rdev, mode); if (ret < 0) @@ -2328,7 +2319,37 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, goto scrub; /* set supply regulator if it exists */ + if (init_data->supply_regulator && init_data->supply_regulator_dev) { + dev_err(dev, + "Supply regulator specified by both name and dev\n"); + goto scrub; + } + + if (init_data->supply_regulator) { + struct regulator_dev *r; + int found = 0; + + list_for_each_entry(r, ®ulator_list, list) { + if (strcmp(rdev_get_name(r), + init_data->supply_regulator) == 0) { + found = 1; + break; + } + } + + if (!found) { + dev_err(dev, "Failed to find supply %s\n", + init_data->supply_regulator); + goto scrub; + } + + ret = set_supply(rdev, r); + if (ret < 0) + goto scrub; + } + if (init_data->supply_regulator_dev) { + dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n"); ret = set_supply(rdev, dev_get_drvdata(init_data->supply_regulator_dev)); if (ret < 0) @@ -2341,13 +2362,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, init_data->consumer_supplies[i].dev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); - if (ret < 0) { - for (--i; i >= 0; i--) - unset_consumer_device_supply(rdev, - init_data->consumer_supplies[i].dev_name, - init_data->consumer_supplies[i].dev); - goto scrub; - } + if (ret < 0) + goto unset_supplies; } list_add(&rdev->list, ®ulator_list); @@ -2355,6 +2371,9 @@ out: mutex_unlock(®ulator_list_mutex); return rdev; +unset_supplies: + unset_regulator_supplies(rdev); + scrub: device_unregister(&rdev->dev); /* device core frees rdev */ diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index ad036dd8da13..4597d508a229 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -440,8 +440,8 @@ static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev, dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", __func__, id, min_uV, max_uV); - if (min_uV > mc13783_regulators[id].voltages[0] && - max_uV < mc13783_regulators[id].voltages[0]) + if (min_uV >= mc13783_regulators[id].voltages[0] && + max_uV <= mc13783_regulators[id].voltages[0]) return 0; else return -EINVAL; @@ -649,6 +649,6 @@ static void __exit mc13783_regulator_exit(void) module_exit(mc13783_regulator_exit); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); +MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC"); MODULE_ALIAS("platform:mc13783-regulator"); diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 74841abcc9cc..14b4576281c5 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -22,68 +22,9 @@ #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> -#include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> - -/* Register definitions */ -#define TPS6507X_REG_PPATH1 0X01 -#define TPS6507X_REG_INT 0X02 -#define TPS6507X_REG_CHGCONFIG0 0X03 -#define TPS6507X_REG_CHGCONFIG1 0X04 -#define TPS6507X_REG_CHGCONFIG2 0X05 -#define TPS6507X_REG_CHGCONFIG3 0X06 -#define TPS6507X_REG_REG_ADCONFIG 0X07 -#define TPS6507X_REG_TSCMODE 0X08 -#define TPS6507X_REG_ADRESULT_1 0X09 -#define TPS6507X_REG_ADRESULT_2 0X0A -#define TPS6507X_REG_PGOOD 0X0B -#define TPS6507X_REG_PGOODMASK 0X0C -#define TPS6507X_REG_CON_CTRL1 0X0D -#define TPS6507X_REG_CON_CTRL2 0X0E -#define TPS6507X_REG_CON_CTRL3 0X0F -#define TPS6507X_REG_DEFDCDC1 0X10 -#define TPS6507X_REG_DEFDCDC2_LOW 0X11 -#define TPS6507X_REG_DEFDCDC2_HIGH 0X12 -#define TPS6507X_REG_DEFDCDC3_LOW 0X13 -#define TPS6507X_REG_DEFDCDC3_HIGH 0X14 -#define TPS6507X_REG_DEFSLEW 0X15 -#define TPS6507X_REG_LDO_CTRL1 0X16 -#define TPS6507X_REG_DEFLDO2 0X17 -#define TPS6507X_REG_WLED_CTRL1 0X18 -#define TPS6507X_REG_WLED_CTRL2 0X19 - -/* CON_CTRL1 bitfields */ -#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) -#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) -#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) -#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) -#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) - -/* DEFDCDC1 bitfields */ -#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7) -#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F - -/* DEFDCDC2_LOW bitfields */ -#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F - -/* DEFDCDC2_HIGH bitfields */ -#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F - -/* DEFDCDC3_LOW bitfields */ -#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F - -/* DEFDCDC3_HIGH bitfields */ -#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F - -/* TPS6507X_REG_LDO_CTRL1 bitfields */ -#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F - -/* TPS6507X_REG_DEFLDO2 bitfields */ -#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F - -/* VDCDC MASK */ -#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F +#include <linux/mfd/tps6507x.h> /* DCDC's */ #define TPS6507X_DCDC_1 0 @@ -162,101 +103,146 @@ struct tps_info { const u16 *table; }; -struct tps_pmic { +static const struct tps_info tps6507x_pmic_regs[] = { + { + .name = "VDCDC1", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "VDCDC2", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "VDCDC3", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(LDO1_VSEL_table), + .table = LDO1_VSEL_table, + }, + { + .name = "LDO2", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(LDO2_VSEL_table), + .table = LDO2_VSEL_table, + }, +}; + +struct tps6507x_pmic { struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; - struct i2c_client *client; + struct tps6507x_dev *mfd; struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; const struct tps_info *info[TPS6507X_NUM_REGULATOR]; struct mutex io_lock; }; - -static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg) +static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) { - return i2c_smbus_read_byte_data(tps->client, reg); + u8 val; + int err; + + err = tps->mfd->read_dev(tps->mfd, reg, 1, &val); + + if (err) + return err; + + return val; } -static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val) +static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { - return i2c_smbus_write_byte_data(tps->client, reg, val); + return tps->mfd->write_dev(tps->mfd, reg, 1, &val); } -static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask) +static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) { - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data |= mask; - err = tps_6507x_write(tps, reg, data); + err = tps6507x_pmic_write(tps, reg, data); if (err) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } -static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask) +static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) { - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data &= ~mask; - err = tps_6507x_write(tps, reg, data); + err = tps6507x_pmic_write(tps, reg, data); if (err) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } -static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg) +static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg) { int data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return data; } -static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val) +static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { int err; mutex_lock(&tps->io_lock); - err = tps_6507x_write(tps, reg, val); + err = tps6507x_pmic_write(tps, reg, val); if (err < 0) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return err; } -static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) +static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, dcdc = rdev_get_id(dev); u8 shift; @@ -264,7 +250,7 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; - data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1); + data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1); if (data < 0) return data; @@ -272,9 +258,9 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) return (data & 1<<shift) ? 1 : 0; } -static int tps6507x_ldo_is_enabled(struct regulator_dev *dev) +static int tps6507x_pmic_ldo_is_enabled(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, ldo = rdev_get_id(dev); u8 shift; @@ -282,7 +268,7 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; - data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1); + data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1); if (data < 0) return data; @@ -290,9 +276,9 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev) return (data & 1<<shift) ? 1 : 0; } -static int tps6507x_dcdc_enable(struct regulator_dev *dev) +static int tps6507x_pmic_dcdc_enable(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); u8 shift; @@ -300,12 +286,12 @@ static int tps6507x_dcdc_enable(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; - return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); + return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } -static int tps6507x_dcdc_disable(struct regulator_dev *dev) +static int tps6507x_pmic_dcdc_disable(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); u8 shift; @@ -313,12 +299,13 @@ static int tps6507x_dcdc_disable(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; - return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); + return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1, + 1 << shift); } -static int tps6507x_ldo_enable(struct regulator_dev *dev) +static int tps6507x_pmic_ldo_enable(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); u8 shift; @@ -326,12 +313,12 @@ static int tps6507x_ldo_enable(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; - return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); + return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); } -static int tps6507x_ldo_disable(struct regulator_dev *dev) +static int tps6507x_pmic_ldo_disable(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); u8 shift; @@ -339,12 +326,13 @@ static int tps6507x_ldo_disable(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - ldo; - return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); + return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1, + 1 << shift); } -static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev) +static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, dcdc = rdev_get_id(dev); u8 reg; @@ -362,7 +350,7 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev) return -EINVAL; } - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; @@ -370,10 +358,10 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev) return tps->info[dcdc]->table[data] * 1000; } -static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, dcdc = rdev_get_id(dev); u8 reg; @@ -411,19 +399,19 @@ static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[dcdc]->table_len) return -EINVAL; - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~TPS6507X_DEFDCDCX_DCDC_MASK; data |= vsel; - return tps_6507x_reg_write(tps, reg, data); + return tps6507x_pmic_reg_write(tps, reg, data); } -static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) +static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, ldo = rdev_get_id(dev); u8 reg, mask; @@ -437,7 +425,7 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) TPS6507X_REG_DEFLDO2_LDO2_MASK); } - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; @@ -445,10 +433,10 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) return tps->info[ldo]->table[data] * 1000; } -static int tps6507x_ldo_set_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, ldo = rdev_get_id(dev); u8 reg, mask; @@ -479,20 +467,20 @@ static int tps6507x_ldo_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[ldo]->table_len) return -EINVAL; - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~mask; data |= vsel; - return tps_6507x_reg_write(tps, reg, data); + return tps6507x_pmic_reg_write(tps, reg, data); } -static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev, unsigned selector) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) @@ -504,10 +492,10 @@ static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev, return tps->info[dcdc]->table[selector] * 1000; } -static int tps6507x_ldo_list_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev, unsigned selector) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) @@ -520,47 +508,54 @@ static int tps6507x_ldo_list_voltage(struct regulator_dev *dev, } /* Operations permitted on VDCDCx */ -static struct regulator_ops tps6507x_dcdc_ops = { - .is_enabled = tps6507x_dcdc_is_enabled, - .enable = tps6507x_dcdc_enable, - .disable = tps6507x_dcdc_disable, - .get_voltage = tps6507x_dcdc_get_voltage, - .set_voltage = tps6507x_dcdc_set_voltage, - .list_voltage = tps6507x_dcdc_list_voltage, +static struct regulator_ops tps6507x_pmic_dcdc_ops = { + .is_enabled = tps6507x_pmic_dcdc_is_enabled, + .enable = tps6507x_pmic_dcdc_enable, + .disable = tps6507x_pmic_dcdc_disable, + .get_voltage = tps6507x_pmic_dcdc_get_voltage, + .set_voltage = tps6507x_pmic_dcdc_set_voltage, + .list_voltage = tps6507x_pmic_dcdc_list_voltage, }; /* Operations permitted on LDOx */ -static struct regulator_ops tps6507x_ldo_ops = { - .is_enabled = tps6507x_ldo_is_enabled, - .enable = tps6507x_ldo_enable, - .disable = tps6507x_ldo_disable, - .get_voltage = tps6507x_ldo_get_voltage, - .set_voltage = tps6507x_ldo_set_voltage, - .list_voltage = tps6507x_ldo_list_voltage, +static struct regulator_ops tps6507x_pmic_ldo_ops = { + .is_enabled = tps6507x_pmic_ldo_is_enabled, + .enable = tps6507x_pmic_ldo_enable, + .disable = tps6507x_pmic_ldo_disable, + .get_voltage = tps6507x_pmic_ldo_get_voltage, + .set_voltage = tps6507x_pmic_ldo_set_voltage, + .list_voltage = tps6507x_pmic_ldo_list_voltage, }; -static int __devinit tps_6507x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static __devinit +int tps6507x_pmic_probe(struct platform_device *pdev) { + struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); static int desc_id; - const struct tps_info *info = (void *)id->driver_data; + const struct tps_info *info = &tps6507x_pmic_regs[0]; struct regulator_init_data *init_data; struct regulator_dev *rdev; - struct tps_pmic *tps; + struct tps6507x_pmic *tps; + struct tps6507x_board *tps_board; int i; int error; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) - return -EIO; + /** + * tps_board points to pmic related constants + * coming from the board-evm file. + */ + + tps_board = dev_get_platdata(tps6507x_dev->dev); + if (!tps_board) + return -EINVAL; /** * init_data points to array of regulator_init structures * coming from the board-evm file. */ - init_data = client->dev.platform_data; + init_data = tps_board->tps6507x_pmic_init_data; if (!init_data) - return -EIO; + return -EINVAL; tps = kzalloc(sizeof(*tps), GFP_KERNEL); if (!tps) @@ -569,7 +564,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, mutex_init(&tps->io_lock); /* common for all regulators */ - tps->client = client; + tps->mfd = tps6507x_dev; for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { /* Register the regulators */ @@ -578,15 +573,16 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, tps->desc[i].id = desc_id++; tps->desc[i].n_voltages = num_voltages[i]; tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? - &tps6507x_ldo_ops : &tps6507x_dcdc_ops); + &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; rdev = regulator_register(&tps->desc[i], - &client->dev, init_data, tps); + tps6507x_dev->dev, init_data, tps); if (IS_ERR(rdev)) { - dev_err(&client->dev, "failed to register %s\n", - id->name); + dev_err(tps6507x_dev->dev, + "failed to register %s regulator\n", + pdev->name); error = PTR_ERR(rdev); goto fail; } @@ -595,7 +591,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, tps->rdev[i] = rdev; } - i2c_set_clientdata(client, tps); + tps6507x_dev->pmic = tps; return 0; @@ -608,19 +604,17 @@ fail: } /** - * tps_6507x_remove - TPS6507x driver i2c remove handler + * tps6507x_remove - TPS6507x driver i2c remove handler * @client: i2c driver client device structure * * Unregister TPS driver as an i2c client device driver */ -static int __devexit tps_6507x_remove(struct i2c_client *client) +static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) { - struct tps_pmic *tps = i2c_get_clientdata(client); + struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); + struct tps6507x_pmic *tps = tps6507x_dev->pmic; int i; - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); - for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); @@ -629,83 +623,38 @@ static int __devexit tps_6507x_remove(struct i2c_client *client) return 0; } -static const struct tps_info tps6507x_regs[] = { - { - .name = "VDCDC1", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "VDCDC2", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "VDCDC3", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "LDO1", - .min_uV = 1000000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(LDO1_VSEL_table), - .table = LDO1_VSEL_table, - }, - { - .name = "LDO2", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(LDO2_VSEL_table), - .table = LDO2_VSEL_table, - }, -}; - -static const struct i2c_device_id tps_6507x_id[] = { - {.name = "tps6507x", - .driver_data = (unsigned long) tps6507x_regs,}, - { }, -}; -MODULE_DEVICE_TABLE(i2c, tps_6507x_id); - -static struct i2c_driver tps_6507x_i2c_driver = { +static struct platform_driver tps6507x_pmic_driver = { .driver = { - .name = "tps6507x", + .name = "tps6507x-pmic", .owner = THIS_MODULE, }, - .probe = tps_6507x_probe, - .remove = __devexit_p(tps_6507x_remove), - .id_table = tps_6507x_id, + .probe = tps6507x_pmic_probe, + .remove = __devexit_p(tps6507x_pmic_remove), }; /** - * tps_6507x_init + * tps6507x_pmic_init * * Module init function */ -static int __init tps_6507x_init(void) +static int __init tps6507x_pmic_init(void) { - return i2c_add_driver(&tps_6507x_i2c_driver); + return platform_driver_register(&tps6507x_pmic_driver); } -subsys_initcall(tps_6507x_init); +subsys_initcall(tps6507x_pmic_init); /** - * tps_6507x_cleanup + * tps6507x_pmic_cleanup * * Module exit function */ -static void __exit tps_6507x_cleanup(void) +static void __exit tps6507x_pmic_cleanup(void) { - i2c_del_driver(&tps_6507x_i2c_driver); + platform_driver_unregister(&tps6507x_pmic_driver); } -module_exit(tps_6507x_cleanup); +module_exit(tps6507x_pmic_cleanup); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TPS6507x voltage regulator driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:tps6507x-pmic"); diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 9729d760fb4d..7e5892efc437 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -49,6 +49,7 @@ struct twlreg_info { /* chip constraints on regulator behavior */ u16 min_mV; + u16 max_mV; /* used by regulator core */ struct regulator_desc desc; @@ -318,31 +319,8 @@ static const u16 VIO_VSEL_table[] = { static const u16 VINTANA2_VSEL_table[] = { 2500, 2750, }; -static const u16 VAUX1_6030_VSEL_table[] = { - 1000, 1300, 1800, 2500, - 2800, 2900, 3000, 3000, -}; -static const u16 VAUX2_6030_VSEL_table[] = { - 1200, 1800, 2500, 2750, - 2800, 2800, 2800, 2800, -}; -static const u16 VAUX3_6030_VSEL_table[] = { - 1000, 1200, 1300, 1800, - 2500, 2800, 3000, 3000, -}; -static const u16 VMMC_VSEL_table[] = { - 1200, 1800, 2800, 2900, - 3000, 3000, 3000, 3000, -}; -static const u16 VPP_VSEL_table[] = { - 1800, 1900, 2000, 2100, - 2200, 2300, 2400, 2500, -}; -static const u16 VUSIM_VSEL_table[] = { - 1200, 1800, 2500, 2900, -}; -static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index) +static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); int mV = info->table[index]; @@ -351,7 +329,7 @@ static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index) } static int -twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) +twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; @@ -375,7 +353,7 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) return -EDOM; } -static int twlldo_get_voltage(struct regulator_dev *rdev) +static int twl4030ldo_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, @@ -388,11 +366,67 @@ static int twlldo_get_voltage(struct regulator_dev *rdev) return LDO_MV(info->table[vsel]) * 1000; } -static struct regulator_ops twlldo_ops = { - .list_voltage = twlldo_list_voltage, +static struct regulator_ops twl4030ldo_ops = { + .list_voltage = twl4030ldo_list_voltage, - .set_voltage = twlldo_set_voltage, - .get_voltage = twlldo_get_voltage, + .set_voltage = twl4030ldo_set_voltage, + .get_voltage = twl4030ldo_get_voltage, + + .enable = twlreg_enable, + .disable = twlreg_disable, + .is_enabled = twlreg_is_enabled, + + .set_mode = twlreg_set_mode, + + .get_status = twlreg_get_status, +}; + +static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return ((info->min_mV + (index * 100)) * 1000); +} + +static int +twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel; + + if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV)) + return -EDOM; + + /* + * Use the below formula to calculate vsel + * mV = 1000mv + 100mv * (vsel - 1) + */ + vsel = (min_uV/1000 - 1000)/100 + 1; + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel); + +} + +static int twl6030ldo_get_voltage(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, + VREG_VOLTAGE); + + if (vsel < 0) + return vsel; + + /* + * Use the below formula to calculate vsel + * mV = 1000mv + 100mv * (vsel - 1) + */ + return (1000 + (100 * (vsel - 1))) * 1000; +} + +static struct regulator_ops twl6030ldo_ops = { + .list_voltage = twl6030ldo_list_voltage, + + .set_voltage = twl6030ldo_set_voltage, + .get_voltage = twl6030ldo_get_voltage, .enable = twlreg_enable, .disable = twlreg_disable, @@ -438,24 +472,16 @@ static struct regulator_ops twlfixed_ops = { /*----------------------------------------------------------------------*/ -#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \ - TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \ - remap_conf, TWL4030) #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf) \ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf, TWL4030) -#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \ - remap_conf) \ - TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \ - remap_conf, TWL6030) #define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf) \ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf, TWL6030) -#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \ - family) { \ +#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ .base = offset, \ .id = num, \ .table_len = ARRAY_SIZE(label##_VSEL_table), \ @@ -464,14 +490,32 @@ static struct regulator_ops twlfixed_ops = { .remap = remap_conf, \ .desc = { \ .name = #label, \ - .id = family##_REG_##label, \ + .id = TWL4030_REG_##label, \ .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ - .ops = &twlldo_ops, \ + .ops = &twl4030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } +#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \ + remap_conf) { \ + .base = offset, \ + .id = num, \ + .min_mV = min_mVolts, \ + .max_mV = max_mVolts, \ + .remap = remap_conf, \ + .desc = { \ + .name = #label, \ + .id = TWL6030_REG_##label, \ + .n_voltages = (max_mVolts - min_mVolts)/100, \ + .ops = &twl6030ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + + #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ family) { \ .base = offset, \ @@ -519,12 +563,12 @@ static struct twlreg_info twl_regs[] = { /* 6030 REG with base as PMC Slave Misc : 0x0030 */ /* Turnon-delay and remap configuration values for 6030 are not verified since the specification is not public */ - TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21), - TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21), - TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21), - TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21), - TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21), - TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21), + TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21), + TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21), + TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21), + TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21), + TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21), TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 50ac047cd136..10ba12c8c5e0 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -611,6 +611,13 @@ config RTC_DRV_AB3100 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC support. This chip contains a battery- and capacitor-backed RTC. +config RTC_DRV_AB8500 + tristate "ST-Ericsson AB8500 RTC" + depends on AB8500_CORE + help + Select this to enable the ST-Ericsson AB8500 power management IC RTC + support. This chip contains a battery- and capacitor-backed RTC. + config RTC_DRV_NUC900 tristate "NUC910/NUC920 RTC driver" depends on RTC_CLASS && ARCH_W90X900 @@ -640,7 +647,7 @@ config RTC_DRV_OMAP config RTC_DRV_S3C tristate "Samsung S3C series SoC RTC" - depends on ARCH_S3C2410 + depends on ARCH_S3C2410 || ARCH_S3C64XX help RTC (Realtime Clock) driver for the clock inbuilt into the Samsung S3C24XX series of SoCs. This can provide periodic diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 245311a1348f..5adbba7cf89c 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o # Keep the list ordered. obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o +obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c index 4704aac2b5af..d26780ea254b 100644 --- a/drivers/rtc/rtc-ab3100.c +++ b/drivers/rtc/rtc-ab3100.c @@ -9,7 +9,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/rtc.h> -#include <linux/mfd/ab3100.h> +#include <linux/mfd/abx500.h> /* Clock rate in Hz */ #define AB3100_RTC_CLOCK_RATE 32768 @@ -45,7 +45,6 @@ */ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2, AB3100_TI3, AB3100_TI4, AB3100_TI5}; unsigned char buf[6]; @@ -61,27 +60,26 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) buf[5] = (fat_time >> 40) & 0xFF; for (i = 0; i < 6; i++) { - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(dev, 0, regs[i], buf[i]); if (err) return err; } /* Set the flag to mark that the clock is now set */ - return ab3100_mask_and_set_register_interruptible(ab3100_data, + return abx500_mask_and_set_register_interruptible(dev, 0, AB3100_RTC, - 0xFE, 0x01); + 0x01, 0x01); } static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u8 rtcval; int err; - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(dev, 0, AB3100_RTC, &rtcval); if (err) return err; @@ -94,7 +92,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) u8 buf[6]; /* Read out time registers */ - err = ab3100_get_register_page_interruptible(ab3100_data, + err = abx500_get_register_page_interruptible(dev, 0, AB3100_TI0, buf, 6); if (err != 0) @@ -114,7 +112,6 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u64 fat_time; u8 buf[6]; @@ -122,7 +119,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) int err; /* Figure out if alarm is enabled or not */ - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(dev, 0, AB3100_RTC, &rtcval); if (err) return err; @@ -133,7 +130,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) /* No idea how this could be represented */ alarm->pending = 0; /* Read out alarm registers, only 4 bytes */ - err = ab3100_get_register_page_interruptible(ab3100_data, + err = abx500_get_register_page_interruptible(dev, 0, AB3100_AL0, buf, 4); if (err) return err; @@ -148,7 +145,6 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3}; unsigned char buf[4]; unsigned long secs; @@ -165,21 +161,19 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) /* Set the alarm */ for (i = 0; i < 4; i++) { - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(dev, 0, regs[i], buf[i]); if (err) return err; } /* Then enable the alarm */ - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), alarm->enabled << 2); } static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); - /* * It's not possible to enable/disable the alarm IRQ for this RTC. * It does not actually trigger any IRQ: instead its only function is @@ -188,12 +182,12 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) * and need to be handled there instead. */ if (enabled) - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), 1 << 2); else - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), 0); } @@ -210,10 +204,9 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) int err; u8 regval; struct rtc_device *rtc; - struct ab3100 *ab3100_data = platform_get_drvdata(pdev); /* The first RTC register needs special treatment */ - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(&pdev->dev, 0, AB3100_RTC, ®val); if (err) { dev_err(&pdev->dev, "unable to read RTC register\n"); @@ -231,7 +224,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) * This bit remains until RTC power is lost. */ regval = 1 | RTC_SETTING; - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(&pdev->dev, 0, AB3100_RTC, regval); /* Ignore any error on this write */ } diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c new file mode 100644 index 000000000000..2fda03125e55 --- /dev/null +++ b/drivers/rtc/rtc-ab8500.c @@ -0,0 +1,363 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com> + * + * RTC clock driver for the RTC part of the AB8500 Power management chip. + * Based on RTC clock driver for the AB3100 Analog Baseband Chip by + * Linus Walleij <linus.walleij@stericsson.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/mfd/ab8500.h> +#include <linux/delay.h> + +#define AB8500_RTC_SOFF_STAT_REG 0x0F00 +#define AB8500_RTC_CC_CONF_REG 0x0F01 +#define AB8500_RTC_READ_REQ_REG 0x0F02 +#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03 +#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04 +#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05 +#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06 +#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07 +#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08 +#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09 +#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A +#define AB8500_RTC_STAT_REG 0x0F0B +#define AB8500_RTC_BKUP_CHG_REG 0x0F0C +#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D +#define AB8500_RTC_CALIB_REG 0x0F0E +#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F +#define AB8500_REV_REG 0x1080 + +/* RtcReadRequest bits */ +#define RTC_READ_REQUEST 0x01 +#define RTC_WRITE_REQUEST 0x02 + +/* RtcCtrl bits */ +#define RTC_ALARM_ENA 0x04 +#define RTC_STATUS_DATA 0x01 + +#define COUNTS_PER_SEC (0xF000 / 60) +#define AB8500_RTC_EPOCH 2000 + +static const unsigned long ab8500_rtc_time_regs[] = { + AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG, + AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG, + AB8500_RTC_WATCH_TSECMID_REG +}; + +static const unsigned long ab8500_rtc_alarm_regs[] = { + AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG, + AB8500_RTC_ALRM_MIN_LOW_REG +}; + +/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ +static unsigned long get_elapsed_seconds(int year) +{ + unsigned long secs; + struct rtc_time tm = { + .tm_year = year - 1900, + .tm_mday = 1, + }; + + /* + * This function calculates secs from 1970 and not from + * 1900, even if we supply the offset from year 1900. + */ + rtc_tm_to_time(&tm, &secs); + return secs; +} + +static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + unsigned long timeout = jiffies + HZ; + int retval, i; + unsigned long mins, secs; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; + + /* Request a data read */ + retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, + RTC_READ_REQUEST); + if (retval < 0) + return retval; + + /* Early AB8500 chips will not clear the rtc read request bit */ + if (ab8500->revision == 0) { + msleep(1); + } else { + /* Wait for some cycles after enabling the rtc read in ab8500 */ + while (time_before(jiffies, timeout)) { + retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG); + if (retval < 0) + return retval; + + if (!(retval & RTC_READ_REQUEST)) + break; + + msleep(1); + } + } + + /* Read the Watchtime registers */ + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { + retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]); + if (retval < 0) + return retval; + buf[i] = retval; + } + + mins = (buf[0] << 16) | (buf[1] << 8) | buf[2]; + + secs = (buf[3] << 8) | buf[4]; + secs = secs / COUNTS_PER_SEC; + secs = secs + (mins * 60); + + /* Add back the initially subtracted number of seconds */ + secs += get_elapsed_seconds(AB8500_RTC_EPOCH); + + rtc_time_to_tm(secs, tm); + return rtc_valid_tm(tm); +} + +static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; + unsigned long no_secs, no_mins, secs = 0; + + if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) { + dev_dbg(dev, "year should be equal to or greater than %d\n", + AB8500_RTC_EPOCH); + return -EINVAL; + } + + /* Get the number of seconds since 1970 */ + rtc_tm_to_time(tm, &secs); + + /* + * Convert it to the number of seconds since 01-01-2000 00:00:00, since + * we only have a small counter in the RTC. + */ + secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); + + no_mins = secs / 60; + + no_secs = secs % 60; + /* Make the seconds count as per the RTC resolution */ + no_secs = no_secs * COUNTS_PER_SEC; + + buf[4] = no_secs & 0xFF; + buf[3] = (no_secs >> 8) & 0xFF; + + buf[2] = no_mins & 0xFF; + buf[1] = (no_mins >> 8) & 0xFF; + buf[0] = (no_mins >> 16) & 0xFF; + + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { + retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]); + if (retval < 0) + return retval; + } + + /* Request a data write */ + return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST); +} + +static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + int rtc_ctrl; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; + unsigned long secs, mins; + + /* Check if the alarm is enabled or not */ + rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG); + if (rtc_ctrl < 0) + return rtc_ctrl; + + if (rtc_ctrl & RTC_ALARM_ENA) + alarm->enabled = 1; + else + alarm->enabled = 0; + + alarm->pending = 0; + + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { + retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]); + if (retval < 0) + return retval; + buf[i] = retval; + } + + mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]); + secs = mins * 60; + + /* Add back the initially subtracted number of seconds */ + secs += get_elapsed_seconds(AB8500_RTC_EPOCH); + + rtc_time_to_tm(secs, &alarm->time); + + return rtc_valid_tm(&alarm->time); +} + +static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + + return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA, + enabled ? RTC_ALARM_ENA : 0); +} + +static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; + unsigned long mins, secs = 0; + + if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { + dev_dbg(dev, "year should be equal to or greater than %d\n", + AB8500_RTC_EPOCH); + return -EINVAL; + } + + /* Get the number of seconds since 1970 */ + rtc_tm_to_time(&alarm->time, &secs); + + /* + * Convert it to the number of seconds since 01-01-2000 00:00:00, since + * we only have a small counter in the RTC. + */ + secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); + + mins = secs / 60; + + buf[2] = mins & 0xFF; + buf[1] = (mins >> 8) & 0xFF; + buf[0] = (mins >> 16) & 0xFF; + + /* Set the alarm time */ + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { + retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]); + if (retval < 0) + return retval; + } + + return ab8500_rtc_irq_enable(dev, alarm->enabled); +} + +static irqreturn_t rtc_alarm_handler(int irq, void *data) +{ + struct rtc_device *rtc = data; + unsigned long events = RTC_IRQF | RTC_AF; + + dev_dbg(&rtc->dev, "%s\n", __func__); + rtc_update_irq(rtc, 1, events); + + return IRQ_HANDLED; +} + +static const struct rtc_class_ops ab8500_rtc_ops = { + .read_time = ab8500_rtc_read_time, + .set_time = ab8500_rtc_set_time, + .read_alarm = ab8500_rtc_read_alarm, + .set_alarm = ab8500_rtc_set_alarm, + .alarm_irq_enable = ab8500_rtc_irq_enable, +}; + +static int __devinit ab8500_rtc_probe(struct platform_device *pdev) +{ + struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); + int err; + struct rtc_device *rtc; + int rtc_ctrl; + int irq; + + irq = platform_get_irq_byname(pdev, "ALARM"); + if (irq < 0) + return irq; + + /* For RTC supply test */ + err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA, + RTC_STATUS_DATA); + if (err < 0) + return err; + + /* Wait for reset by the PorRtc */ + msleep(1); + + rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG); + if (rtc_ctrl < 0) + return rtc_ctrl; + + /* Check if the RTC Supply fails */ + if (!(rtc_ctrl & RTC_STATUS_DATA)) { + dev_err(&pdev->dev, "RTC supply failure\n"); + return -ENODEV; + } + + rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + dev_err(&pdev->dev, "Registration failed\n"); + err = PTR_ERR(rtc); + return err; + } + + err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0, + "ab8500-rtc", rtc); + if (err < 0) { + rtc_device_unregister(rtc); + return err; + } + + platform_set_drvdata(pdev, rtc); + + return 0; +} + +static int __devexit ab8500_rtc_remove(struct platform_device *pdev) +{ + struct rtc_device *rtc = platform_get_drvdata(pdev); + int irq = platform_get_irq_byname(pdev, "ALARM"); + + free_irq(irq, rtc); + rtc_device_unregister(rtc); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ab8500_rtc_driver = { + .driver = { + .name = "ab8500-rtc", + .owner = THIS_MODULE, + }, + .probe = ab8500_rtc_probe, + .remove = __devexit_p(ab8500_rtc_remove), +}; + +static int __init ab8500_rtc_init(void) +{ + return platform_driver_register(&ab8500_rtc_driver); +} + +static void __exit ab8500_rtc_exit(void) +{ + platform_driver_unregister(&ab8500_rtc_driver); +} + +module_init(ab8500_rtc_init); +module_exit(ab8500_rtc_exit); +MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>"); +MODULE_DESCRIPTION("AB8500 RTC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 96e8e70fbf1e..11b8ea29d2b7 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) } } + cmos_rtc.dev = dev; + dev_set_drvdata(dev, &cmos_rtc); + cmos_rtc.rtc = rtc_device_register(driver_name, dev, &cmos_rtc_ops, THIS_MODULE); if (IS_ERR(cmos_rtc.rtc)) { @@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup0; } - cmos_rtc.dev = dev; - dev_set_drvdata(dev, &cmos_rtc); rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); spin_lock_irq(&rtc_lock); diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index 532acf9b05d8..359d1e04626c 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -16,7 +16,6 @@ #include <linux/rtc.h> #include <linux/io.h> #include <linux/bcd.h> -#include <asm/rtc.h> #define DRV_NAME "rtc-ds1302" #define DRV_VERSION "0.1.1" @@ -34,14 +33,55 @@ #define RTC_ADDR_MIN 0x01 /* Address of minute register */ #define RTC_ADDR_SEC 0x00 /* Address of second register */ +#ifdef CONFIG_SH_SECUREEDGE5410 +#include <asm/rtc.h> +#include <mach/snapgear.h> + #define RTC_RESET 0x1000 #define RTC_IODATA 0x0800 #define RTC_SCLK 0x0400 -#ifdef CONFIG_SH_SECUREEDGE5410 -#include <mach/snapgear.h> #define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00) #define get_dp() SECUREEDGE_READ_IOPORT() +#define ds1302_set_tx() +#define ds1302_set_rx() + +static inline int ds1302_hw_init(void) +{ + return 0; +} + +static inline void ds1302_reset(void) +{ + set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); +} + +static inline void ds1302_clock(void) +{ + set_dp(get_dp() | RTC_SCLK); /* clock high */ + set_dp(get_dp() & ~RTC_SCLK); /* clock low */ +} + +static inline void ds1302_start(void) +{ + set_dp(get_dp() | RTC_RESET); +} + +static inline void ds1302_stop(void) +{ + set_dp(get_dp() & ~RTC_RESET); +} + +static inline void ds1302_txbit(int bit) +{ + set_dp((get_dp() & ~RTC_IODATA) | (bit ? RTC_IODATA : 0)); +} + +static inline int ds1302_rxbit(void) +{ + return !!(get_dp() & RTC_IODATA); +} + #else #error "Add support for your platform" #endif @@ -50,11 +90,11 @@ static void ds1302_sendbits(unsigned int val) { int i; + ds1302_set_tx(); + for (i = 8; (i); i--, val >>= 1) { - set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ? - RTC_IODATA : 0)); - set_dp(get_dp() | RTC_SCLK); /* clock high */ - set_dp(get_dp() & ~RTC_SCLK); /* clock low */ + ds1302_txbit(val & 0x1); + ds1302_clock(); } } @@ -63,10 +103,11 @@ static unsigned int ds1302_recvbits(void) unsigned int val; int i; + ds1302_set_rx(); + for (i = 0, val = 0; (i < 8); i++) { - val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i); - set_dp(get_dp() | RTC_SCLK); /* clock high */ - set_dp(get_dp() & ~RTC_SCLK); /* clock low */ + val |= (ds1302_rxbit() << i); + ds1302_clock(); } return val; @@ -76,23 +117,24 @@ static unsigned int ds1302_readbyte(unsigned int addr) { unsigned int val; - set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); + ds1302_reset(); - set_dp(get_dp() | RTC_RESET); + ds1302_start(); ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ); val = ds1302_recvbits(); - set_dp(get_dp() & ~RTC_RESET); + ds1302_stop(); return val; } static void ds1302_writebyte(unsigned int addr, unsigned int val) { - set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); - set_dp(get_dp() | RTC_RESET); + ds1302_reset(); + + ds1302_start(); ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE); ds1302_sendbits(val); - set_dp(get_dp() & ~RTC_RESET); + ds1302_stop(); } static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm) @@ -167,13 +209,20 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; + if (ds1302_hw_init()) { + dev_err(&pdev->dev, "Failed to init communication channel"); + return -EINVAL; + } + /* Reset */ - set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); + ds1302_reset(); /* Write a magic value to the DS1302 RAM, and see if it sticks. */ ds1302_writebyte(RTC_ADDR_RAM0, 0x42); - if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) + if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) { + dev_err(&pdev->dev, "Failed to probe"); return -ENODEV; + } rtc = rtc_device_register("ds1302", &pdev->dev, &ds1302_rtc_ops, THIS_MODULE); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 054e05294af8..468200c38ecb 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -462,39 +462,16 @@ isl1208_sysfs_store_usr(struct device *dev, static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, isl1208_sysfs_store_usr); -static int -isl1208_sysfs_register(struct device *dev) -{ - int err; - - err = device_create_file(dev, &dev_attr_atrim); - if (err) - return err; - - err = device_create_file(dev, &dev_attr_dtrim); - if (err) { - device_remove_file(dev, &dev_attr_atrim); - return err; - } - - err = device_create_file(dev, &dev_attr_usr); - if (err) { - device_remove_file(dev, &dev_attr_atrim); - device_remove_file(dev, &dev_attr_dtrim); - } - - return 0; -} - -static int -isl1208_sysfs_unregister(struct device *dev) -{ - device_remove_file(dev, &dev_attr_dtrim); - device_remove_file(dev, &dev_attr_atrim); - device_remove_file(dev, &dev_attr_usr); +static struct attribute *isl1208_rtc_attrs[] = { + &dev_attr_atrim.attr, + &dev_attr_dtrim.attr, + &dev_attr_usr.attr, + NULL +}; - return 0; -} +static const struct attribute_group isl1208_rtc_sysfs_files = { + .attrs = isl1208_rtc_attrs, +}; static int isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -529,7 +506,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_warn(&client->dev, "rtc power failure detected, " "please set clock.\n"); - rc = isl1208_sysfs_register(&client->dev); + rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); if (rc) goto exit_unregister; @@ -546,7 +523,7 @@ isl1208_remove(struct i2c_client *client) { struct rtc_device *rtc = i2c_get_clientdata(client); - isl1208_sysfs_unregister(&client->dev); + sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); rtc_device_unregister(rtc); return 0; diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 60fe266f0f49..6dc4e6241418 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -595,10 +595,6 @@ static void wdt_disable(void) static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - /* Can't seek (pwrite) on this device - if (ppos != &file->f_pos) - return -ESPIPE; - */ if (count) { wdt_ping(); return 1; @@ -623,7 +619,7 @@ static ssize_t wdt_read(struct file *file, char __user *buf, * according to their available features. We only actually usefully support * querying capabilities and current status. */ -static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, +static int wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_margin, rv; @@ -676,6 +672,18 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, return -ENOTTY; } +static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = wdt_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + /** * wdt_open: * @inode: inode of device @@ -695,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file) */ wdt_is_open = 1; unlock_kernel(); - return 0; + return nonseekable_open(inode, file); } return -ENODEV; } @@ -736,7 +744,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .read = wdt_read, - .ioctl = wdt_ioctl, + .unlocked_ioctl = wdt_unlocked_ioctl, .write = wdt_write, .open = wdt_open, .release = wdt_release, diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index d71fe61db1d6..25ec921db07c 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -379,7 +379,6 @@ static struct rtc_class_ops mxc_rtc_ops = { static int __init mxc_rtc_probe(struct platform_device *pdev) { - struct clk *clk; struct resource *res; struct rtc_device *rtc; struct rtc_plat_data *pdata = NULL; @@ -402,14 +401,15 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - clk = clk_get(&pdev->dev, "ckil"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); + pdata->clk = clk_get(&pdev->dev, "rtc"); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "unable to get clock!\n"); + ret = PTR_ERR(pdata->clk); goto exit_free_pdata; } - rate = clk_get_rate(clk); - clk_put(clk); + clk_enable(pdata->clk); + rate = clk_get_rate(pdata->clk); if (rate == 32768) reg = RTC_INPUT_CLK_32768HZ; @@ -420,7 +420,7 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) else { dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); ret = -EINVAL; - goto exit_free_pdata; + goto exit_put_clk; } reg |= RTC_ENABLE_BIT; @@ -428,18 +428,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) { dev_err(&pdev->dev, "hardware module can't be enabled!\n"); ret = -EIO; - goto exit_free_pdata; - } - - pdata->clk = clk_get(&pdev->dev, "rtc"); - if (IS_ERR(pdata->clk)) { - dev_err(&pdev->dev, "unable to get clock!\n"); - ret = PTR_ERR(pdata->clk); - goto exit_free_pdata; + goto exit_put_clk; } - clk_enable(pdata->clk); - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4969b6059c89..e5972b2c17b7 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -29,6 +29,11 @@ #include <asm/irq.h> #include <plat/regs-rtc.h> +enum s3c_cpu_type { + TYPE_S3C2410, + TYPE_S3C64XX, +}; + /* I have yet to find an S3C implementation with more than one * of these rtc blocks in */ @@ -37,6 +42,7 @@ static struct resource *s3c_rtc_mem; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; +static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -80,12 +86,25 @@ static int s3c_rtc_setpie(struct device *dev, int enabled) pr_debug("%s: pie=%d\n", __func__, enabled); spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; - if (enabled) - tmp |= S3C2410_TICNT_ENABLE; + if (s3c_rtc_cpu_type == TYPE_S3C64XX) { + tmp = readb(s3c_rtc_base + S3C2410_RTCCON); + tmp &= ~S3C64XX_RTCCON_TICEN; + + if (enabled) + tmp |= S3C64XX_RTCCON_TICEN; + + writeb(tmp, s3c_rtc_base + S3C2410_RTCCON); + } else { + tmp = readb(s3c_rtc_base + S3C2410_TICNT); + tmp &= ~S3C2410_TICNT_ENABLE; + + if (enabled) + tmp |= S3C2410_TICNT_ENABLE; + + writeb(tmp, s3c_rtc_base + S3C2410_TICNT); + } - writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); return 0; @@ -93,15 +112,21 @@ static int s3c_rtc_setpie(struct device *dev, int enabled) static int s3c_rtc_setfreq(struct device *dev, int freq) { - unsigned int tmp; + struct platform_device *pdev = to_platform_device(dev); + struct rtc_device *rtc_dev = platform_get_drvdata(pdev); + unsigned int tmp = 0; if (!is_power_of_2(freq)) return -EINVAL; spin_lock_irq(&s3c_rtc_pie_lock); - tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE; - tmp |= (128 / freq)-1; + if (s3c_rtc_cpu_type == TYPE_S3C2410) { + tmp = readb(s3c_rtc_base + S3C2410_TICNT); + tmp &= S3C2410_TICNT_ENABLE; + } + + tmp |= (rtc_dev->max_user_freq / freq)-1; writeb(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); @@ -283,10 +308,17 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) { - unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); + unsigned int ticnt; - seq_printf(seq, "periodic_IRQ\t: %s\n", - (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); + if (s3c_rtc_cpu_type == TYPE_S3C64XX) { + ticnt = readb(s3c_rtc_base + S3C2410_RTCCON); + ticnt &= S3C64XX_RTCCON_TICEN; + } else { + ticnt = readb(s3c_rtc_base + S3C2410_TICNT); + ticnt &= S3C2410_TICNT_ENABLE; + } + + seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no"); return 0; } @@ -353,10 +385,16 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en) if (!en) { tmp = readb(base + S3C2410_RTCCON); - writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON); - - tmp = readb(base + S3C2410_TICNT); - writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT); + if (s3c_rtc_cpu_type == TYPE_S3C64XX) + tmp &= ~S3C64XX_RTCCON_TICEN; + tmp &= ~S3C2410_RTCCON_RTCEN; + writeb(tmp, base + S3C2410_RTCCON); + + if (s3c_rtc_cpu_type == TYPE_S3C2410) { + tmp = readb(base + S3C2410_TICNT); + tmp &= ~S3C2410_TICNT_ENABLE; + writeb(tmp, base + S3C2410_TICNT); + } } else { /* re-enable the device, and check it is ok */ @@ -472,7 +510,12 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) goto err_nortc; } - rtc->max_user_freq = 128; + if (s3c_rtc_cpu_type == TYPE_S3C64XX) + rtc->max_user_freq = 32768; + else + rtc->max_user_freq = 128; + + s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data; platform_set_drvdata(pdev, rtc); return 0; @@ -492,20 +535,30 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) /* RTC Power management control */ -static int ticnt_save; +static int ticnt_save, ticnt_en_save; static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) { /* save TICNT for anyone using periodic interrupts */ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); + if (s3c_rtc_cpu_type == TYPE_S3C64XX) { + ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON); + ticnt_en_save &= S3C64XX_RTCCON_TICEN; + } s3c_rtc_enable(pdev, 0); return 0; } static int s3c_rtc_resume(struct platform_device *pdev) { + unsigned int tmp; + s3c_rtc_enable(pdev, 1); writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); + if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { + tmp = readb(s3c_rtc_base + S3C2410_RTCCON); + writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); + } return 0; } #else @@ -513,13 +566,27 @@ static int s3c_rtc_resume(struct platform_device *pdev) #define s3c_rtc_resume NULL #endif -static struct platform_driver s3c2410_rtc_driver = { +static struct platform_device_id s3c_rtc_driver_ids[] = { + { + .name = "s3c2410-rtc", + .driver_data = TYPE_S3C2410, + }, { + .name = "s3c64xx-rtc", + .driver_data = TYPE_S3C64XX, + }, + { } +}; + +MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids); + +static struct platform_driver s3c_rtc_driver = { .probe = s3c_rtc_probe, .remove = __devexit_p(s3c_rtc_remove), .suspend = s3c_rtc_suspend, .resume = s3c_rtc_resume, + .id_table = s3c_rtc_driver_ids, .driver = { - .name = "s3c2410-rtc", + .name = "s3c-rtc", .owner = THIS_MODULE, }, }; @@ -529,12 +596,12 @@ static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics static int __init s3c_rtc_init(void) { printk(banner); - return platform_driver_register(&s3c2410_rtc_driver); + return platform_driver_register(&s3c_rtc_driver); } static void __exit s3c_rtc_exit(void) { - platform_driver_unregister(&s3c2410_rtc_driver); + platform_driver_unregister(&s3c_rtc_driver); } module_init(s3c_rtc_init); diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index b16cfe57a484..82931dc65c0b 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c @@ -449,17 +449,17 @@ static int wm831x_rtc_probe(struct platform_device *pdev) goto err; } - ret = wm831x_request_irq(wm831x, per_irq, wm831x_per_irq, - IRQF_TRIGGER_RISING, "wm831x_rtc_per", - wm831x_rtc); + ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq, + IRQF_TRIGGER_RISING, "RTC period", + wm831x_rtc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n", per_irq, ret); } - ret = wm831x_request_irq(wm831x, alm_irq, wm831x_alm_irq, - IRQF_TRIGGER_RISING, "wm831x_rtc_alm", - wm831x_rtc); + ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq, + IRQF_TRIGGER_RISING, "RTC alarm", + wm831x_rtc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", alm_irq, ret); @@ -478,8 +478,8 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev) int per_irq = platform_get_irq_byname(pdev, "PER"); int alm_irq = platform_get_irq_byname(pdev, "ALM"); - wm831x_free_irq(wm831x_rtc->wm831x, alm_irq, wm831x_rtc); - wm831x_free_irq(wm831x_rtc->wm831x, per_irq, wm831x_rtc); + free_irq(alm_irq, wm831x_rtc); + free_irq(per_irq, wm831x_rtc); rtc_device_unregister(wm831x_rtc->rtc); kfree(wm831x_rtc); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 0e86247d791e..33975e922d65 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1186,6 +1186,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, dasd_schedule_device_bh(device); } +enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + + if (IS_ERR(device)) + goto out; + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + device->state != device->target || + !device->discipline->handle_unsolicited_interrupt){ + dasd_put_device(device); + goto out; + } + + dasd_device_clear_timer(device); + device->discipline->handle_unsolicited_interrupt(device, irb); + dasd_put_device(device); +out: + return UC_TODO_RETRY; +} +EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); + /* * If we have an error on a dasd_block layer request then we cancel * and return all further requests from the same dasd_block as well. diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 5b1cd8d6e971..ab84da5592e8 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3436,6 +3436,7 @@ static struct ccw_driver dasd_eckd_driver = { .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, + .uc_handler = dasd_generic_uc_handler, }; /* diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 32fac186ba3f..49b431d135e0 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -617,6 +617,7 @@ int dasd_generic_notify(struct ccw_device *, int); void dasd_generic_handle_state_change(struct dasd_device *); int dasd_generic_pm_freeze(struct ccw_device *); int dasd_generic_restore_device(struct ccw_device *); +enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); char *dasd_get_sense(struct irb *); diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5f97ea2ee6b1..97b25d68e3e7 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -123,8 +123,10 @@ ccwgroup_release (struct device *dev) for (i = 0; i < gdev->count; i++) { if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); } } @@ -262,11 +264,14 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, goto error; } /* Don't allow a device to belong to more than one group. */ + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { + spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); + spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ if (i < num_devices && !curr_buf) { @@ -303,8 +308,10 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 37df42af05ec..7f206ed44fdf 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -159,6 +159,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) { struct irb *irb = &cdev->private->irb; struct cmd_scsw *scsw = &irb->scsw.cmd; + enum uc_todo todo; /* Perform BASIC SENSE if needed. */ if (ccw_device_accumulate_and_sense(cdev, lcirb)) @@ -178,6 +179,20 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) /* Check for command reject. */ if (irb->ecw[0] & SNS0_CMD_REJECT) return IO_REJECTED; + /* Ask the driver what to do */ + if (cdev->drv && cdev->drv->uc_handler) { + todo = cdev->drv->uc_handler(cdev, lcirb); + switch (todo) { + case UC_TODO_RETRY: + return IO_STATUS_ERROR; + case UC_TODO_RETRY_ON_NEW_PATH: + return IO_PATH_ERROR; + case UC_TODO_STOP: + return IO_REJECTED; + default: + return IO_STATUS_ERROR; + } + } /* Assume that unexpected SENSE data implies an error. */ return IO_STATUS_ERROR; } diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 759262792633..fac06155773f 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -23,21 +23,6 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -static inline int stsch(struct subchannel_id schid, struct schib *addr) -{ - register struct subchannel_id reg1 asm ("1") = schid; - int ccode; - - asm volatile( - " stsch 0(%3)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode), "=m" (*addr) - : "d" (reg1), "a" (addr) - : "cc"); - return ccode; -} - static inline int stsch_err(struct subchannel_id schid, struct schib *addr) { register struct subchannel_id reg1 asm ("1") = schid; diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index b4951eb0358e..103fdf6b0b89 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c @@ -565,9 +565,9 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp) int devidx = 0; while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { - if (!strcmp(op->node->name, "temperature")) + if (!strcmp(op->dev.of_node->name, "temperature")) attach_one_temp(bp, op, temp_index++); - if (!strcmp(op->node->name, "fan-control")) + if (!strcmp(op->dev.of_node->name, "fan-control")) attach_one_fan(bp, op, fan_index++); } if (temp_index != 0 && fan_index != 0) { diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 7e30e5f6e032..8bfdd63a1fcb 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -97,7 +97,7 @@ struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device * client->bp = bp; client->op = op; - reg = of_get_property(op->node, "reg", NULL); + reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { kfree(client); return NULL; @@ -327,7 +327,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde spin_lock_init(&bp->lock); entry = 0; - for (dp = op->node->child; + for (dp = op->dev.of_node->child; dp && entry < 8; dp = dp->sibling, entry++) { struct of_device *child_op; @@ -414,8 +414,11 @@ static const struct of_device_id bbc_i2c_match[] = { MODULE_DEVICE_TABLE(of, bbc_i2c_match); static struct of_platform_driver bbc_i2c_driver = { - .name = "bbc_i2c", - .match_table = bbc_i2c_match, + .driver = { + .name = "bbc_i2c", + .owner = THIS_MODULE, + .of_match_table = bbc_i2c_match, + }, .probe = bbc_i2c_probe, .remove = __devexit_p(bbc_i2c_remove), }; diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 3e59189f4137..7baf1b644039 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -216,7 +216,7 @@ static int __devinit d7s_probe(struct of_device *op, writeb(regs, p->regs); printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", - op->node->full_name, + op->dev.of_node->full_name, (regs & D7S_FLIP) ? " (FLIPPED)" : "", op->resource[0].start, sol_compat ? "in sol_compat mode" : ""); @@ -266,8 +266,11 @@ static const struct of_device_id d7s_match[] = { MODULE_DEVICE_TABLE(of, d7s_match); static struct of_platform_driver d7s_driver = { - .name = DRIVER_NAME, - .match_table = d7s_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = d7s_match, + }, .probe = d7s_probe, .remove = __devexit_p(d7s_remove), }; diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index c6e2eff19409..c8166ecf5276 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -1043,7 +1043,7 @@ static int __devinit envctrl_probe(struct of_device *op, return -ENOMEM; index = 0; - dp = op->node->child; + dp = op->dev.of_node->child; while (dp) { if (!strcmp(dp->name, "gpio")) { i2c_childlist[index].i2ctype = I2C_GPIO; @@ -1131,8 +1131,11 @@ static const struct of_device_id envctrl_match[] = { MODULE_DEVICE_TABLE(of, envctrl_match); static struct of_platform_driver envctrl_driver = { - .name = DRIVER_NAME, - .match_table = envctrl_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = envctrl_match, + }, .probe = envctrl_probe, .remove = __devexit_p(envctrl_remove), }; diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index d3b62eb0fba7..368d66294d83 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -162,7 +162,7 @@ static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops }; static int __devinit flash_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct device_node *parent; parent = dp->parent; @@ -184,7 +184,7 @@ static int __devinit flash_probe(struct of_device *op, flash.busy = 0; printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", - op->node->full_name, + op->dev.of_node->full_name, flash.read_base, flash.read_size, flash.write_base, flash.write_size); @@ -207,8 +207,11 @@ static const struct of_device_id flash_match[] = { MODULE_DEVICE_TABLE(of, flash_match); static struct of_platform_driver flash_driver = { - .name = "flash", - .match_table = flash_match, + .driver = { + .name = "flash", + .owner = THIS_MODULE, + .of_match_table = flash_match, + }, .probe = flash_probe, .remove = __devexit_p(flash_remove), }; diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index fc2f676e984d..d53e62ab09da 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c @@ -298,9 +298,9 @@ static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsiz /* * SunOS and Solaris /dev/openprom ioctl calls. */ -static int openprom_sunos_ioctl(struct inode * inode, struct file * file, - unsigned int cmd, unsigned long arg, - struct device_node *dp) +static long openprom_sunos_ioctl(struct file * file, + unsigned int cmd, unsigned long arg, + struct device_node *dp) { DATA *data = file->private_data; struct openpromio *opp = NULL; @@ -316,6 +316,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file, if (bufsize < 0) return bufsize; + lock_kernel(); + switch (cmd) { case OPROMGETOPT: case OPROMGETPROP: @@ -365,6 +367,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file, } kfree(opp); + unlock_kernel(); + return error; } @@ -547,13 +551,14 @@ static int opiocgetnext(unsigned int cmd, void __user *argp) return 0; } -static int openprom_bsd_ioctl(struct inode * inode, struct file * file, +static int openprom_bsd_ioctl(struct file * file, unsigned int cmd, unsigned long arg) { DATA *data = (DATA *) file->private_data; void __user *argp = (void __user *)arg; int err; + lock_kernel(); switch (cmd) { case OPIOCGET: err = opiocget(argp, data); @@ -570,10 +575,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file, case OPIOCGETOPTNODE: BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); + err = 0; if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) - return -EFAULT; - - return 0; + err = -EFAULT; + break; case OPIOCGETNEXT: case OPIOCGETCHILD: @@ -581,9 +586,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file, break; default: - return -EINVAL; - + err = -EINVAL; + break; }; + unlock_kernel(); return err; } @@ -592,8 +598,8 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file, /* * Handoff control to the correct ioctl handler. */ -static int openprom_ioctl(struct inode * inode, struct file * file, - unsigned int cmd, unsigned long arg) +static long openprom_ioctl(struct file * file, + unsigned int cmd, unsigned long arg) { DATA *data = (DATA *) file->private_data; @@ -602,14 +608,14 @@ static int openprom_ioctl(struct inode * inode, struct file * file, case OPROMNXTOPT: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; - return openprom_sunos_ioctl(inode, file, cmd, arg, + return openprom_sunos_ioctl(file, cmd, arg, options_node); case OPROMSETOPT: case OPROMSETOPT2: if ((file->f_mode & FMODE_WRITE) == 0) return -EPERM; - return openprom_sunos_ioctl(inode, file, cmd, arg, + return openprom_sunos_ioctl(file, cmd, arg, options_node); case OPROMNEXT: @@ -618,7 +624,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file, case OPROMNXTPROP: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; - return openprom_sunos_ioctl(inode, file, cmd, arg, + return openprom_sunos_ioctl(file, cmd, arg, data->current_node); case OPROMU2P: @@ -630,7 +636,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file, case OPROMPATH2NODE: if ((file->f_mode & FMODE_READ) == 0) return -EPERM; - return openprom_sunos_ioctl(inode, file, cmd, arg, NULL); + return openprom_sunos_ioctl(file, cmd, arg, NULL); case OPIOCGET: case OPIOCNEXTPROP: @@ -639,12 +645,12 @@ static int openprom_ioctl(struct inode * inode, struct file * file, case OPIOCGETCHILD: if ((file->f_mode & FMODE_READ) == 0) return -EBADF; - return openprom_bsd_ioctl(inode,file,cmd,arg); + return openprom_bsd_ioctl(file,cmd,arg); case OPIOCSET: if ((file->f_mode & FMODE_WRITE) == 0) return -EBADF; - return openprom_bsd_ioctl(inode,file,cmd,arg); + return openprom_bsd_ioctl(file,cmd,arg); default: return -EINVAL; @@ -676,7 +682,7 @@ static long openprom_compat_ioctl(struct file *file, unsigned int cmd, case OPROMSETCUR: case OPROMPCI2NODE: case OPROMPATH2NODE: - rval = openprom_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); + rval = openprom_ioctl(file, cmd, arg); break; } @@ -709,7 +715,7 @@ static int openprom_release(struct inode * inode, struct file * file) static const struct file_operations openprom_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .ioctl = openprom_ioctl, + .unlocked_ioctl = openprom_ioctl, .compat_ioctl = openprom_compat_ioctl, .open = openprom_open, .release = openprom_release, diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 2c56fd56ec63..5f253665a1da 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -382,7 +382,7 @@ static int __devinit uctrl_probe(struct of_device *op, sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", - op->node->full_name, p->regs, p->irq); + op->dev.of_node->full_name, p->regs, p->irq); uctrl_get_event_status(p); uctrl_get_external_status(p); @@ -425,8 +425,11 @@ static const struct of_device_id uctrl_match[] = { MODULE_DEVICE_TABLE(of, uctrl_match); static struct of_platform_driver uctrl_driver = { - .name = "uctrl", - .match_table = uctrl_match, + .driver = { + .name = "uctrl", + .owner = THIS_MODULE, + .of_match_table = uctrl_match, + }, .probe = uctrl_probe, .remove = __devexit_p(uctrl_remove), }; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 1bb774becf25..e20b7bdd4c78 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -125,7 +125,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); static char *twa_aen_severity_lookup(unsigned char severity_code); static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); -static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); +static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int twa_chrdev_open(struct inode *inode, struct file *file); static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); @@ -220,7 +220,7 @@ static struct device_attribute *twa_host_attrs[] = { /* File operations struct for character device */ static const struct file_operations twa_fops = { .owner = THIS_MODULE, - .ioctl = twa_chrdev_ioctl, + .unlocked_ioctl = twa_chrdev_ioctl, .open = twa_chrdev_open, .release = NULL }; @@ -637,8 +637,9 @@ out: } /* End twa_check_srl() */ /* This function handles ioctl for the character device */ -static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + struct inode *inode = file->f_path.dentry->d_inode; long timeout; unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; dma_addr_t dma_handle; @@ -657,6 +658,8 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int int retval = TW_IOCTL_ERROR_OS_EFAULT; void __user *argp = (void __user *)arg; + lock_kernel(); + /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { retval = TW_IOCTL_ERROR_OS_EINTR; @@ -876,6 +879,7 @@ out3: out2: mutex_unlock(&tw_dev->ioctl_lock); out: + unlock_kernel(); return retval; } /* End twa_chrdev_ioctl() */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index d38000db9237..f481e734aad4 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -750,19 +750,22 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm /* This function handles ioctl for the character device This interface is used by smartmontools open source software */ -static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long timeout; unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; dma_addr_t dma_handle; int request_id = 0; TW_Ioctl_Driver_Command driver_command; + struct inode *inode = file->f_dentry->d_inode; TW_Ioctl_Buf_Apache *tw_ioctl; TW_Command_Full *full_command_packet; TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; int retval = -EFAULT; void __user *argp = (void __user *)arg; + lock_kernel(); + /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { retval = -EINTR; @@ -858,6 +861,7 @@ out3: out2: mutex_unlock(&tw_dev->ioctl_lock); out: + unlock_kernel(); return retval; } /* End twl_chrdev_ioctl() */ @@ -884,7 +888,7 @@ out: /* File operations struct for character device */ static const struct file_operations twl_fops = { .owner = THIS_MODULE, - .ioctl = twl_chrdev_ioctl, + .unlocked_ioctl = twl_chrdev_ioctl, .open = twl_chrdev_open, .release = NULL }; diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index d119a614bf7d..30d735ad35b5 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -881,7 +881,7 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) } /* End tw_allocate_memory() */ /* This function handles ioctl for the character device */ -static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int request_id; dma_addr_t dma_handle; @@ -889,6 +889,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int unsigned long flags; unsigned int data_buffer_length = 0; unsigned long data_buffer_length_adjusted = 0; + struct inode *inode = file->f_dentry->d_inode; unsigned long *cpu_addr; long timeout; TW_New_Ioctl *tw_ioctl; @@ -899,9 +900,12 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); + lock_kernel(); /* Only let one of these through at a time */ - if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) + if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { + unlock_kernel(); return -EINTR; + } /* First copy down the buffer length */ if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int))) @@ -1030,6 +1034,7 @@ out2: dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle); out: mutex_unlock(&tw_dev->ioctl_lock); + unlock_kernel(); return retval; } /* End tw_chrdev_ioctl() */ @@ -1052,7 +1057,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) /* File operations struct for character device */ static const struct file_operations tw_fops = { .owner = THIS_MODULE, - .ioctl = tw_chrdev_ioctl, + .unlocked_ioctl = tw_chrdev_ioctl, .open = tw_chrdev_open, .release = NULL }; diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 308541ff85cf..1bb5d3f0e260 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -1,34 +1,31 @@ #include <linux/types.h> -#include <linux/mm.h> -#include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/zorro.h> -#include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> -#include <linux/zorro.h> -#include <asm/irq.h> -#include <linux/spinlock.h> #include "scsi.h" -#include <scsi/scsi_host.h> #include "wd33c93.h" #include "a2091.h" -#include <linux/stat.h> - -static int a2091_release(struct Scsi_Host *instance); +struct a2091_hostdata { + struct WD33C93_hostdata wh; + struct a2091_scsiregs *regs; +}; static irqreturn_t a2091_intr(int irq, void *data) { struct Scsi_Host *instance = data; - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); - unsigned int status = regs->ISTR; + struct a2091_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; unsigned long flags; if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) @@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; - struct WD33C93_hostdata *hdata = shost_priv(instance); - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; - hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, - GFP_KERNEL); + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); /* can't allocate memory; use PIO */ - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } /* get the physical address of the bounce buffer */ - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } @@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cntr |= CNTR_DDIR; /* remember direction */ - hdata->dma_dir = dir_in; + wh->dma_dir = dir_in; regs->CNTR = cntr; @@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; - if (!hdata->dma_dir) + if (!wh->dma_dir) cntr |= CNTR_DDIR; /* disable SCSI interrupts */ regs->CNTR = cntr; /* flush if we were reading */ - if (hdata->dma_dir) { + if (wh->dma_dir) { regs->FLUSH = 1; while (!(regs->ISTR & ISTR_FE_FLG)) ; @@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, regs->CNTR = CNTR_PDMD | CNTR_INTEN; /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { - if (hdata->dma_dir) - memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; - } -} - -static int __init a2091_detect(struct scsi_host_template *tpnt) -{ - static unsigned char called = 0; - struct Scsi_Host *instance; - unsigned long address; - struct zorro_dev *z = NULL; - wd33c93_regs wdregs; - a2091_scsiregs *regs; - struct WD33C93_hostdata *hdata; - int num_a2091 = 0; - - if (!MACH_IS_AMIGA || called) - return 0; - called = 1; - - tpnt->proc_name = "A2091"; - tpnt->proc_info = &wd33c93_proc_info; - - while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { - if (z->id != ZORRO_PROD_CBM_A590_A2091_1 && - z->id != ZORRO_PROD_CBM_A590_A2091_2) - continue; - address = z->resource.start; - if (!request_mem_region(address, 256, "wd33c93")) - continue; - - instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (instance == NULL) - goto release; - instance->base = ZTWO_VADDR(address); - instance->irq = IRQ_AMIGA_PORTS; - instance->unique_id = z->slotaddr; - regs = (a2091_scsiregs *)(instance->base); - regs->DAWR = DAWR_A2091; - wdregs.SASR = ®s->SASR; - wdregs.SCMD = ®s->SCMD; - hdata = shost_priv(instance); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(instance, wdregs, dma_setup, dma_stop, - WD33C93_FS_8_10); - if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, - "A2091 SCSI", instance)) - goto unregister; - regs->CNTR = CNTR_PDMD | CNTR_INTEN; - num_a2091++; - continue; - -unregister: - scsi_unregister(instance); -release: - release_mem_region(address, 256); + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } - - return num_a2091; } static int a2091_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: kill this function, and let midlayer fall back to the same action, calling wd33c93_host_reset() */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } -#define HOSTS_C - -static struct scsi_host_template driver_template = { - .proc_name = "A2901", +static struct scsi_host_template a2091_scsi_template = { + .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", - .detect = a2091_detect, - .release = a2091_release, + .proc_info = wd33c93_proc_info, + .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a2091_bus_reset, @@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = { .use_clustering = DISABLE_CLUSTERING }; +static int __devinit a2091_probe(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct Scsi_Host *instance; + int error; + struct a2091_scsiregs *regs; + wd33c93_regs wdregs; + struct a2091_hostdata *hdata; -#include "scsi_module.c" + if (!request_mem_region(z->resource.start, 256, "wd33c93")) + return -EBUSY; -static int a2091_release(struct Scsi_Host *instance) + instance = scsi_host_alloc(&a2091_scsi_template, + sizeof(struct a2091_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); + regs->DAWR = DAWR_A2091; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); + error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, + "A2091 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(z->resource.start, 256); + return error; +} + +static void __devexit a2091_remove(struct zorro_dev *z) { -#ifdef MODULE - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct a2091_hostdata *hdata = shost_priv(instance); - regs->CNTR = 0; - release_mem_region(ZTWO_PADDR(instance->base), 256); + hdata->regs->CNTR = 0; + scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); -#endif - return 1; + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + +static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_CBM_A590_A2091_1 }, + { ZORRO_PROD_CBM_A590_A2091_2 }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); + +static struct zorro_driver a2091_driver = { + .name = "a2091", + .id_table = a2091_zorro_tbl, + .probe = a2091_probe, + .remove = __devexit_p(a2091_remove), +}; + +static int __init a2091_init(void) +{ + return zorro_register_driver(&a2091_driver); +} +module_init(a2091_init); + +static void __exit a2091_exit(void) +{ + zorro_unregister_driver(&a2091_driver); } +module_exit(a2091_exit); +MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h index 1c3daa1fd754..794b8e65c711 100644 --- a/drivers/scsi/a2091.h +++ b/drivers/scsi/a2091.h @@ -25,7 +25,7 @@ */ #define A2091_XFER_MASK (0xff000001) -typedef struct { +struct a2091_scsiregs { unsigned char pad1[64]; volatile unsigned short ISTR; volatile unsigned short CNTR; @@ -44,7 +44,7 @@ typedef struct { volatile unsigned short CINT; unsigned char pad7[2]; volatile unsigned short FLUSH; -} a2091_scsiregs; +}; #define DAWR_A2091 (3) diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index bc6eb69f5fd0..d9468027fb61 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -1,53 +1,52 @@ #include <linux/types.h> #include <linux/mm.h> -#include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/ioport.h> #include <linux/init.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> -#include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> -#include <asm/irq.h> #include "scsi.h" -#include <scsi/scsi_host.h> #include "wd33c93.h" #include "a3000.h" -#include <linux/stat.h> - -#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) - -static struct Scsi_Host *a3000_host = NULL; - -static int a3000_release(struct Scsi_Host *instance); +struct a3000_hostdata { + struct WD33C93_hostdata wh; + struct a3000_scsiregs *regs; +}; -static irqreturn_t a3000_intr(int irq, void *dummy) +static irqreturn_t a3000_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + struct a3000_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; unsigned long flags; - unsigned int status = DMA(a3000_host)->ISTR; if (!(status & ISTR_INT_P)) return IRQ_NONE; if (status & ISTR_INTS) { - spin_lock_irqsave(a3000_host->host_lock, flags); - wd33c93_intr(a3000_host); - spin_unlock_irqrestore(a3000_host->host_lock, flags); + spin_lock_irqsave(instance->host_lock, flags); + wd33c93_intr(instance); + spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } - printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); + pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); return IRQ_NONE; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { - struct WD33C93_hostdata *hdata = shost_priv(a3000_host); + struct Scsi_Host *instance = cmd->device->host; + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) * buffer */ if (addr & A3000_XFER_MASK) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; - hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, - GFP_KERNEL); + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); /* can't allocate memory; use PIO */ - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); } /* setup dma direction */ @@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cntr |= CNTR_DDIR; /* remember direction */ - hdata->dma_dir = dir_in; + wh->dma_dir = dir_in; - DMA(a3000_host)->CNTR = cntr; + regs->CNTR = cntr; /* setup DMA *physical* address */ - DMA(a3000_host)->ACR = addr; + regs->ACR = addr; if (dir_in) { /* invalidate any cache */ @@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) /* start DMA */ mb(); /* make sure setup is completed */ - DMA(a3000_host)->ST_DMA = 1; + regs->ST_DMA = 1; mb(); /* make sure DMA has started before next IO */ /* return success */ @@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; - if (!hdata->dma_dir) + if (!wh->dma_dir) cntr |= CNTR_DDIR; - DMA(instance)->CNTR = cntr; + regs->CNTR = cntr; mb(); /* make sure CNTR is updated before next IO */ /* flush if we were reading */ - if (hdata->dma_dir) { - DMA(instance)->FLUSH = 1; + if (wh->dma_dir) { + regs->FLUSH = 1; mb(); /* don't allow prefetch */ - while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) + while (!(regs->ISTR & ISTR_FE_FLG)) barrier(); mb(); /* no IO until FLUSH is done */ } @@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, /* I think that this CINT is only necessary if you are * using the terminal count features. HM 7 Mar 1994 */ - DMA(instance)->CINT = 1; + regs->CINT = 1; /* stop DMA */ - DMA(instance)->SP_DMA = 1; + regs->SP_DMA = 1; mb(); /* make sure DMA is stopped before next IO */ /* restore the CONTROL bits (minus the direction flag) */ - DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; + regs->CNTR = CNTR_PDMD | CNTR_INTEN; mb(); /* make sure CNTR is updated before next IO */ /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { + if (status && wh->dma_bounce_buffer) { if (SCpnt) { - if (hdata->dma_dir && SCpnt) - memcpy(SCpnt->SCp.ptr, - hdata->dma_bounce_buffer, + if (wh->dma_dir && SCpnt) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } else { - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } } } -static int __init a3000_detect(struct scsi_host_template *tpnt) -{ - wd33c93_regs regs; - struct WD33C93_hostdata *hdata; - - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI)) - return 0; - if (!request_mem_region(0xDD0000, 256, "wd33c93")) - return 0; - - tpnt->proc_name = "A3000"; - tpnt->proc_info = &wd33c93_proc_info; - - a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (a3000_host == NULL) - goto fail_register; - - a3000_host->base = ZTWO_VADDR(0xDD0000); - a3000_host->irq = IRQ_AMIGA_PORTS; - DMA(a3000_host)->DAWR = DAWR_A3000; - regs.SASR = &(DMA(a3000_host)->SASR); - regs.SCMD = &(DMA(a3000_host)->SCMD); - hdata = shost_priv(a3000_host); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); - if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", - a3000_intr)) - goto fail_irq; - DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN; - - return 1; - -fail_irq: - scsi_unregister(a3000_host); -fail_register: - release_mem_region(0xDD0000, 256); - return 0; -} - static int a3000_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: kill this entire function, which should cause mid-layer to call wd33c93_host_reset anyway? */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } -#define HOSTS_C - -static struct scsi_host_template driver_template = { - .proc_name = "A3000", +static struct scsi_host_template amiga_a3000_scsi_template = { + .module = THIS_MODULE, .name = "Amiga 3000 built-in SCSI", - .detect = a3000_detect, - .release = a3000_release, + .proc_info = wd33c93_proc_info, + .proc_name = "A3000", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a3000_bus_reset, @@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING }; +static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) +{ + struct resource *res; + struct Scsi_Host *instance; + int error; + struct a3000_scsiregs *regs; + wd33c93_regs wdregs; + struct a3000_hostdata *hdata; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), "wd33c93")) + return -EBUSY; + + instance = scsi_host_alloc(&amiga_a3000_scsi_template, + sizeof(struct a3000_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; -#include "scsi_module.c" + regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); + regs->DAWR = DAWR_A3000; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15); + error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, + "A3000 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(res->start, resource_size(res)); + return error; +} + +static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + struct a3000_hostdata *hdata = shost_priv(instance); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(res->start, resource_size(res)); + return 0; +} + +static struct platform_driver amiga_a3000_scsi_driver = { + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_a3000_scsi_init(void) +{ + return platform_driver_probe(&amiga_a3000_scsi_driver, + amiga_a3000_scsi_probe); +} +module_init(amiga_a3000_scsi_init); -static int a3000_release(struct Scsi_Host *instance) +static void __exit amiga_a3000_scsi_exit(void) { - DMA(instance)->CNTR = 0; - release_mem_region(0xDD0000, 256); - free_irq(IRQ_AMIGA_PORTS, a3000_intr); - return 1; + platform_driver_unregister(&amiga_a3000_scsi_driver); } +module_exit(amiga_a3000_scsi_exit); +MODULE_DESCRIPTION("Amiga 3000 built-in SCSI"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a3000-scsi"); diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h index 684813ee378c..49db4a335aab 100644 --- a/drivers/scsi/a3000.h +++ b/drivers/scsi/a3000.h @@ -25,7 +25,7 @@ */ #define A3000_XFER_MASK (0x00000003) -typedef struct { +struct a3000_scsiregs { unsigned char pad1[2]; volatile unsigned short DAWR; volatile unsigned int WTC; @@ -46,7 +46,7 @@ typedef struct { volatile unsigned char SASR; unsigned char pad9; volatile unsigned char SCMD; -} a3000_scsiregs; +}; #define DAWR_A3000 (3) diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 11ae6be8aeaf..23c76f41883c 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -20,10 +20,6 @@ #include "53c700.h" -MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>"); -MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); -MODULE_LICENSE("GPL"); - static struct scsi_host_template a4000t_scsi_driver_template = { .name = "A4000T builtin SCSI", @@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = { .module = THIS_MODULE, }; -static struct platform_device *a4000t_scsi_device; -#define A4000T_SCSI_ADDR 0xdd0040 +#define A4000T_SCSI_OFFSET 0x40 -static int __devinit a4000t_probe(struct platform_device *dev) +static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) { - struct Scsi_Host *host; + struct resource *res; + phys_addr_t scsi_addr; struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; - if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) - goto out; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; - if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, + if (!request_mem_region(res->start, resource_size(res), "A4000T builtin SCSI")) - goto out; + return -EBUSY; - hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), + GFP_KERNEL); if (!hostdata) { - printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); + dev_err(&pdev->dev, "Failed to allocate host data\n"); goto out_release; } + scsi_addr = res->start + A4000T_SCSI_OFFSET; + /* Fill in the required pieces of hostdata */ - hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); + hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); hostdata->clock = 50; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; @@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev) /* and register the chip */ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, - &dev->dev); + &pdev->dev); if (!host) { - printk(KERN_ERR "a4000t-scsi: No host detected; " - "board configuration problem?\n"); + dev_err(&pdev->dev, + "No host detected; board configuration problem?\n"); goto out_free; } host->this_id = 7; - host->base = A4000T_SCSI_ADDR; + host->base = scsi_addr; host->irq = IRQ_AMIGA_PORTS; if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", host)) { - printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); + dev_err(&pdev->dev, "request_irq failed\n"); goto out_put_host; } - platform_set_drvdata(dev, host); + platform_set_drvdata(pdev, host); scsi_scan_host(host); - return 0; out_put_host: @@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev) out_free: kfree(hostdata); out_release: - release_mem_region(A4000T_SCSI_ADDR, 0x1000); - out: + release_mem_region(res->start, resource_size(res)); return -ENODEV; } -static __devexit int a4000t_device_remove(struct platform_device *dev) +static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) { - struct Scsi_Host *host = platform_get_drvdata(dev); + struct Scsi_Host *host = platform_get_drvdata(pdev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); scsi_remove_host(host); - NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); - release_mem_region(A4000T_SCSI_ADDR, 0x1000); - + release_mem_region(res->start, resource_size(res)); return 0; } -static struct platform_driver a4000t_scsi_driver = { - .driver = { - .name = "a4000t-scsi", - .owner = THIS_MODULE, +static struct platform_driver amiga_a4000t_scsi_driver = { + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", + .owner = THIS_MODULE, }, - .probe = a4000t_probe, - .remove = __devexit_p(a4000t_device_remove), }; -static int __init a4000t_scsi_init(void) +static int __init amiga_a4000t_scsi_init(void) { - int err; - - err = platform_driver_register(&a4000t_scsi_driver); - if (err) - return err; - - a4000t_scsi_device = platform_device_register_simple("a4000t-scsi", - -1, NULL, 0); - if (IS_ERR(a4000t_scsi_device)) { - platform_driver_unregister(&a4000t_scsi_driver); - return PTR_ERR(a4000t_scsi_device); - } - - return err; + return platform_driver_probe(&amiga_a4000t_scsi_driver, + amiga_a4000t_scsi_probe); } -static void __exit a4000t_scsi_exit(void) +module_init(amiga_a4000t_scsi_init); + +static void __exit amiga_a4000t_scsi_exit(void) { - platform_device_unregister(a4000t_scsi_device); - platform_driver_unregister(&a4000t_scsi_driver); + platform_driver_unregister(&amiga_a4000t_scsi_driver); } -module_init(a4000t_scsi_init); -module_exit(a4000t_scsi_exit); +module_exit(amiga_a4000t_scsi_exit); + +MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / " + "Kars de Jong <jongk@linux-m68k.org>"); +MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a4000t-scsi"); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 9c0c91178538..1a5bf5724750 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { - kfree (usg); - dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); + kfree(usg); rcode = -ENOMEM; goto cleanup; } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e9373a2d14fa..33898b61fdb5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -705,12 +705,17 @@ static int aac_cfg_open(struct inode *inode, struct file *file) * Bugs: Needs to handle hot plugging */ -static int aac_cfg_ioctl(struct inode *inode, struct file *file, +static long aac_cfg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + int ret; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); + lock_kernel(); + ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); + unlock_kernel(); + + return ret; } #ifdef CONFIG_COMPAT @@ -1029,7 +1034,7 @@ ssize_t aac_get_serial_number(struct device *device, char *buf) static const struct file_operations aac_cfg_fops = { .owner = THIS_MODULE, - .ioctl = aac_cfg_ioctl, + .unlocked_ioctl = aac_cfg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = aac_compat_cfg_ioctl, #endif diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ab646e580d64..ce5371b3cdd5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -48,7 +48,7 @@ struct device_attribute; /*The limit of outstanding scsi command that firmware can handle*/ #define ARCMSR_MAX_OUTSTANDING_CMD 256 #define ARCMSR_MAX_FREECCB_NUM 320 -#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" +#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD #define FUNCTION_SAY_HELLO 0x0807 #define FUNCTION_SAY_GOODBYE 0x0808 #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 +#define FUNCTION_GET_FIRMWARE_STATUS 0x080A +#define FUNCTION_HARDWARE_RESET 0x080B /* ARECA IO CONTROL CODE*/ #define ARCMSR_MESSAGE_READ_RQBUFFER \ ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER @@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F +#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 /* ************************************************************* ** structure for holding DMA address data @@ -341,13 +344,13 @@ struct MessageUnit_B uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; uint32_t postq_index; uint32_t doneq_index; - void __iomem *drv2iop_doorbell_reg; - void __iomem *drv2iop_doorbell_mask_reg; - void __iomem *iop2drv_doorbell_reg; - void __iomem *iop2drv_doorbell_mask_reg; - void __iomem *msgcode_rwbuffer_reg; - void __iomem *ioctl_wbuffer_reg; - void __iomem *ioctl_rbuffer_reg; + uint32_t __iomem *drv2iop_doorbell_reg; + uint32_t __iomem *drv2iop_doorbell_mask_reg; + uint32_t __iomem *iop2drv_doorbell_reg; + uint32_t __iomem *iop2drv_doorbell_mask_reg; + uint32_t __iomem *msgcode_rwbuffer_reg; + uint32_t __iomem *ioctl_wbuffer_reg; + uint32_t __iomem *ioctl_rbuffer_reg; }; /* @@ -375,6 +378,7 @@ struct AdapterControlBlock /* message unit ATU inbound base address0 */ uint32_t acb_flags; + uint8_t adapter_index; #define ACB_F_SCSISTOPADAPTER 0x0001 #define ACB_F_MSG_STOP_BGRB 0x0002 /* stop RAID background rebuild */ @@ -390,7 +394,7 @@ struct AdapterControlBlock #define ACB_F_BUS_RESET 0x0080 #define ACB_F_IOP_INITED 0x0100 /* iop init */ - + #define ACB_F_FIRMWARE_TRAP 0x0400 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; /* used for memory free */ struct list_head ccb_free_list; @@ -423,12 +427,19 @@ struct AdapterControlBlock #define ARECA_RAID_GOOD 0xaa uint32_t num_resets; uint32_t num_aborts; + uint32_t signature; uint32_t firm_request_len; uint32_t firm_numbers_queue; uint32_t firm_sdram_size; uint32_t firm_hd_channels; char firm_model[12]; char firm_version[20]; + char device_map[20]; /*21,84-99*/ + struct work_struct arcmsr_do_message_isr_bh; + struct timer_list eternal_timer; + unsigned short fw_state; + atomic_t rq_map_token; + int ante_token_value; };/* HW_DEVICE_EXTENSION */ /* ******************************************************************************* diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c50c436..07fdfe57e38e 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { .attr = { .name = "mu_read", .mode = S_IRUSR , + .owner = THIS_MODULE, }, .size = 1032, .read = arcmsr_sysfs_iop_message_read, @@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { .attr = { .name = "mu_write", .mode = S_IWUSR, + .owner = THIS_MODULE, }, .size = 1032, .write = arcmsr_sysfs_iop_message_write, @@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = { .attr = { .name = "mu_clear", .mode = S_IWUSR, + .owner = THIS_MODULE, }, .size = 1, .write = arcmsr_sysfs_iop_message_clear, diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ffbe2192da3c..ffa54792bb33 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -72,8 +72,16 @@ #include <scsi/scsicam.h> #include "arcmsr.h" +#ifdef CONFIG_SCSI_ARCMSR_RESET + static int sleeptime = 20; + static int retrycount = 12; + module_param(sleeptime, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset"); + module_param(retrycount, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset"); +#endif MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); -MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); +MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); @@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); +static void arcmsr_request_device_map(unsigned long pacb); +static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); +static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); +static void arcmsr_message_isr_bh_fn(struct work_struct *work); +static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); +static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); + static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, @@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, static struct scsi_host_template arcmsr_scsi_host_template = { .module = THIS_MODULE, - .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" + .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter" ARCMSR_DRIVER_VERSION, .info = arcmsr_info, .queuecommand = arcmsr_queue_command, @@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, }; -#ifdef CONFIG_SCSI_ARCMSR_AER -static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev); -static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); - -static struct pci_error_handlers arcmsr_pci_error_handlers = { - .error_detected = arcmsr_pci_error_detected, - .slot_reset = arcmsr_pci_slot_reset, -}; -#endif static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, @@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = { .probe = arcmsr_probe, .remove = arcmsr_remove, .shutdown = arcmsr_shutdown, - #ifdef CONFIG_SCSI_ARCMSR_AER - .err_handler = &arcmsr_pci_error_handlers, - #endif }; static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) @@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) void *dma_coherent; dma_addr_t dma_coherent_handle, dma_addr; struct CommandControlBlock *ccb_tmp; - uint32_t intmask_org; int i, j; - acb->pmuA = pci_ioremap_bar(pdev, 0); + acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!acb->pmuA) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); @@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GONE; - - /* - ** here we need to tell iop 331 our ccb_tmp.HighPart - ** if ccb_tmp.HighPart is not zero - */ - intmask_org = arcmsr_disable_outbound_ints(acb); } break; @@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) void __iomem *mem_base0, *mem_base1; void *dma_coherent; dma_addr_t dma_coherent_handle, dma_addr; - uint32_t intmask_org; struct CommandControlBlock *ccb_tmp; int i, j; @@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) reg = (struct MessageUnit_B *)(dma_coherent + ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); acb->pmuB = reg; - mem_base0 = pci_ioremap_bar(pdev, 0); + mem_base0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); if (!mem_base0) goto out; - mem_base1 = pci_ioremap_bar(pdev, 2); + mem_base1 = ioremap(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); if (!mem_base1) { iounmap(mem_base0); goto out; @@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GOOD; - - /* - ** here we need to tell iop 331 our ccb_tmp.HighPart - ** if ccb_tmp.HighPart is not zero - */ - intmask_org = arcmsr_disable_outbound_ints(acb); } break; } @@ -374,6 +364,88 @@ out: sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); return -ENOMEM; } +static void arcmsr_message_isr_bh_fn(struct work_struct *work) +{ + struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + + struct MessageUnit_A __iomem *reg = acb->pmuA; + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = (uint32_t __iomem *) (®->message_rwbuffer[0]); + char __iomem *devicemap = (char __iomem *) (®->message_rwbuffer[21]); + int target, lun; + struct scsi_device *psdev; + char diff; + + atomic_inc(&acb->rq_map_token); + if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { + diff = (*acb_dev_map)^readb(devicemap); + if (diff != 0) { + char temp; + *acb_dev_map = readb(devicemap); + temp = *acb_dev_map; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { + if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { + scsi_add_device(acb->host, 0, target, lun); + } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { + psdev = scsi_device_lookup(acb->host, 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + diff >>= 1; + } + } + devicemap++; + acb_dev_map++; + } + } + break; + } + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer_reg[0]); + char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer_reg[21]); + int target, lun; + struct scsi_device *psdev; + char diff; + + atomic_inc(&acb->rq_map_token); + if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { + diff = (*acb_dev_map)^readb(devicemap); + if (diff != 0) { + char temp; + *acb_dev_map = readb(devicemap); + temp = *acb_dev_map; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { + if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { + scsi_add_device(acb->host, 0, target, lun); + } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { + psdev = scsi_device_lookup(acb->host, 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + diff >>= 1; + } + } + devicemap++; + acb_dev_map++; + } + } + } + } +} static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev, ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); - + INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); error = arcmsr_alloc_ccb_pool(acb); if (error) goto out_release_regions; + arcmsr_iop_init(acb); error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); if (error) goto out_free_ccb_pool; - arcmsr_iop_init(acb); pci_set_drvdata(pdev, host); if (strncmp(acb->firm_version, "V1.42", 5) >= 0) host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; @@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev, #ifdef CONFIG_SCSI_ARCMSR_AER pci_enable_pcie_error_reporting(pdev); #endif + atomic_set(&acb->rq_map_token, 16); + acb->fw_state = true; + init_timer(&acb->eternal_timer); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); + acb->eternal_timer.data = (unsigned long) acb; + acb->eternal_timer.function = &arcmsr_request_device_map; + add_timer(&acb->eternal_timer); + return 0; out_free_sysfs: out_free_irq: @@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) return 0xff; } -static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); - if (arcmsr_hba_wait_msgint_ready(acb)) + if (arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); + return 0xff; + } + return 0x00; } -static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); - if (arcmsr_hbb_wait_msgint_ready(acb)) + if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); + return 0xff; + } + return 0x00; } -static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { + uint8_t rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_abort_hba_allcmd(acb); + rtnval = arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_abort_hbb_allcmd(acb); + rtnval = arcmsr_abort_hbb_allcmd(acb); } } + return rtnval; } static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) @@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; - orig_mask = readl(®->outbound_intmask)|\ - ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; + orig_mask = readl(®->outbound_intmask); writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ ®->outbound_intmask); } @@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; - orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ - (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); + orig_mask = readl(reg->iop2drv_doorbell_mask_reg); writel(0, reg->iop2drv_doorbell_mask_reg); } break; @@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev) struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int poll_count = 0; - arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); + flush_scheduled_work(); + del_timer_sync(&acb->eternal_timer); + arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); - arcmsr_disable_outbound_ints(acb); acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; @@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev) struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; - + del_timer_sync(&acb->eternal_timer); + arcmsr_disable_outbound_ints(acb); + flush_scheduled_work(); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } @@ -861,7 +950,7 @@ static void arcmsr_module_exit(void) module_init(arcmsr_module_init); module_exit(arcmsr_module_exit); -static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org) { u32 mask; @@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | - ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); + ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| + ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); writel(mask, ®->outbound_intmask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } @@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; - mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ - ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); + mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | + ARCMSR_IOP2DRV_DATA_READ_OK | + ARCMSR_IOP2DRV_CDB_DONE | + ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); writel(mask, reg->iop2drv_doorbell_mask_reg); acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } @@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; - iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); - iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); + iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); + iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); dma_free_coherent(&acb->pdev->dev, (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); @@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) reg->doneq_index = index; } } +/* +********************************************************************************** +** Handle a message interrupt +** +** The only message interrupt we expect is in response to a query for the current adapter config. +** We want this in order to compare the drivemap so that we can detect newly-attached drives. +********************************************************************************** +*/ +static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A *reg = acb->pmuA; + + /*clear interrupt and message state*/ + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); + schedule_work(&acb->arcmsr_do_message_isr_bh); +} +static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + /*clear interrupt and message state*/ + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); + schedule_work(&acb->arcmsr_do_message_isr_bh); +} static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; - outbound_intstatus = readl(®->outbound_intstatus) & \ + outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { return 1; @@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + /* messenger of "driver to iop commands" */ + arcmsr_hba_message_isr(acb); + } return 0; } @@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) uint32_t outbound_doorbell; struct MessageUnit_B *reg = acb->pmuB; - outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ + outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & acb->outbound_int_enable; if (!outbound_doorbell) return 1; writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); - /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ + /*in case the last action of doorbell interrupt clearance is cached, + this action can push HW to write down the clear bit*/ readl(reg->iop2drv_doorbell_reg); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { @@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } + if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { + /* messenger of "driver to iop commands" */ + arcmsr_hbb_message_isr(acb); + } return 0; } @@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) } } -static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; @@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } + + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } + ptmpQbuffer = ver_addr; while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { @@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } + ptmpuserbuffer = ver_addr; user_len = pcmdmessagefld->cmdmessage.Length; memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); @@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { uint8_t *pQbuffer = acb->rqbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { uint8_t *pQbuffer = acb->wqbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { uint8_t *pQbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ break; case ARCMSR_MESSAGE_RETURN_CODE_3F: { + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; - + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; @@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ break; case ARCMSR_MESSAGE_SAY_GOODBYE: + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } arcmsr_flush_adapter_cache(acb); break; @@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, struct CommandControlBlock *ccb; int target = cmd->device->id; int lun = cmd->device->lun; - + uint8_t scsicmd = cmd->cmnd[0]; cmd->scsi_done = done; cmd->host_scribble = NULL; cmd->result = 0; + + if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) { + if (acb->devstate[target][lun] == ARECA_RAID_GONE) { + cmd->result = (DID_NO_CONNECT << 16); + } + cmd->scsi_done(cmd); + return 0; + } + if (acb->acb_flags & ACB_F_BUS_RESET) { - printk(KERN_NOTICE "arcmsr%d: bus reset" - " and return busy \n" - , acb->host->host_no); + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t intmask_org, outbound_doorbell; + + if ((readl(®->outbound_msgaddr1) & + ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { + printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n", + acb->host->host_no); return SCSI_MLQUEUE_HOST_BUSY; } + + acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n", + acb->host->host_no); + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_get_firmware_spec(acb, 1); + /*start background rebuild*/ + arcmsr_start_adapter_bgrb(acb); + /* clear Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + /*clear interrupt */ + writel(outbound_doorbell, ®->outbound_doorbell); + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + /* enable outbound Post Queue,outbound doorbell Interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + acb->acb_flags |= ACB_F_IOP_INITED; + acb->acb_flags &= ~ACB_F_BUS_RESET; + } + break; + case ACB_ADAPTER_TYPE_B: { + } + } + } + if (target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, cmd); @@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, return 0; } -static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) +static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode) { struct MessageUnit_A __iomem *reg = acb->pmuA; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; + char *acb_device_map = acb->device_map; char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); + char __iomem *iop_device_map = (char __iomem *) (®->message_rwbuffer[21]); int count; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); if (arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); + return NULL; } + if (mode == 1) { count = 8; while (count) { *acb_firm_model = readb(iop_firm_model); @@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) count--; } + count = 16; + while (count) { + *acb_device_map = readb(iop_device_map); + acb_device_map++; + iop_device_map++; + count--; + } + printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" , acb->host->host_no , acb->firm_version); - + acb->signature = readl(®->message_rwbuffer[0]); acb->firm_request_len = readl(®->message_rwbuffer[1]); acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); acb->firm_sdram_size = readl(®->message_rwbuffer[3]); acb->firm_hd_channels = readl(®->message_rwbuffer[4]); } - -static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) + return reg->message_rwbuffer; +} +static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode) { struct MessageUnit_B *reg = acb->pmuB; uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; + char *acb_device_map = acb->device_map; char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); /*firm_model,15,60-67*/ char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); /*firm_version,17,68-83*/ + char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]); + /*firm_version,21,84-99*/ int count; writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); + return NULL; } + if (mode == 1) { count = 8; while (count) { @@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) count--; } + count = 16; + while (count) { + *acb_device_map = readb(iop_device_map); + acb_device_map++; + iop_device_map++; + count--; + } + printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->host->host_no, acb->firm_version); - lrwbuffer++; + acb->signature = readl(lrwbuffer++); + /*firm_signature,1,00-03*/ acb->firm_request_len = readl(lrwbuffer++); /*firm_request_len,1,04-07*/ acb->firm_numbers_queue = readl(lrwbuffer++); @@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) acb->firm_hd_channels = readl(lrwbuffer); /*firm_ide_channels,4,16-19*/ } - -static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) + return reg->msgcode_rwbuffer_reg; +} +static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode) { + void *rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_get_hba_config(acb); + rtnval = arcmsr_get_hba_config(acb, mode); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_get_hbb_config(acb); + rtnval = arcmsr_get_hbb_config(acb, mode); } break; } + return rtnval; } static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, @@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) } } +static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + + if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { + acb->fw_state = false; + } else { + /*to prevent rq_map_token from changing by other interrupt, then + avoid the dead-lock*/ + acb->fw_state = true; + atomic_dec(&acb->rq_map_token); + if (!(acb->fw_state) || + (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { + atomic_set(&acb->rq_map_token, 16); + } + acb->ante_token_value = atomic_read(&acb->rq_map_token); + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + } + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); + return; +} + +static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B __iomem *reg = acb->pmuB; + + if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { + acb->fw_state = false; + } else { + /*to prevent rq_map_token from changing by other interrupt, then + avoid the dead-lock*/ + acb->fw_state = true; + atomic_dec(&acb->rq_map_token); + if (!(acb->fw_state) || + (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { + atomic_set(&acb->rq_map_token, 16); + } + acb->ante_token_value = atomic_read(&acb->rq_map_token); + writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); + } + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); + return; +} + +static void arcmsr_request_device_map(unsigned long pacb) +{ + struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + arcmsr_request_hba_device_map(acb); + } + break; + case ACB_ADAPTER_TYPE_B: { + arcmsr_request_hbb_device_map(acb); + } + break; + } +} + static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) return; } +static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) +{ + uint8_t value[64]; + int i; + + /* backup pci config data */ + for (i = 0; i < 64; i++) { + pci_read_config_byte(acb->pdev, i, &value[i]); + } + /* hardware reset signal */ + pci_write_config_byte(acb->pdev, 0x84, 0x20); + msleep(1000); + /* write back pci config data */ + for (i = 0; i < 64; i++) { + pci_write_config_byte(acb->pdev, i, value[i]); + } + msleep(1000); + return; +} +/* +**************************************************************************** +**************************************************************************** +*/ +#ifdef CONFIG_SCSI_ARCMSR_RESET + int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) + { + struct Scsi_Host *shost = NULL; + spinlock_t *host_lock = NULL; + int i, isleep; + + shost = cmd->device->host; + host_lock = shost->host_lock; + + printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n", + shost->host_no, sleeptime, shost->host_busy, shost->can_queue); + isleep = sleeptime / 10; + spin_unlock_irq(host_lock); + if (isleep > 0) { + for (i = 0; i < isleep; i++) { + msleep(10000); + printk(KERN_NOTICE "^%d^\n", i); + } + } + + isleep = sleeptime % 10; + if (isleep > 0) { + msleep(isleep * 1000); + printk(KERN_NOTICE "^v^\n"); + } + spin_lock_irq(host_lock); + printk(KERN_NOTICE "***** wake up *****\n"); + return 0; + } +#endif static void arcmsr_iop_init(struct AdapterControlBlock *acb) { uint32_t intmask_org; @@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); - arcmsr_get_firmware_spec(acb); + arcmsr_get_firmware_spec(acb, 1); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ @@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) acb->acb_flags |= ACB_F_IOP_INITED; } -static void arcmsr_iop_reset(struct AdapterControlBlock *acb) +static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *ccb; uint32_t intmask_org; + uint8_t rtnval = 0x00; int i = 0; if (atomic_read(&acb->ccboutstandingcount) != 0) { + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); /* talk to iop 331 outstanding command aborted */ - arcmsr_abort_allcmd(acb); - + rtnval = arcmsr_abort_allcmd(acb); /* wait for 3 sec for all command aborted*/ ssleep(3); - - /* disable all outbound interrupt */ - intmask_org = arcmsr_disable_outbound_ints(acb); /* clear all outbound posted Q */ arcmsr_done4abort_postqueue(acb); for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { - ccb->startdone = ARCMSR_CCB_ABORTED; arcmsr_ccb_complete(ccb, 1); } } + atomic_set(&acb->ccboutstandingcount, 0); /* enable all outbound interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); + return rtnval; } + return rtnval; } static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)cmd->device->host->hostdata; - int i; + int retry = 0; - acb->num_resets++; + if (acb->acb_flags & ACB_F_BUS_RESET) + return SUCCESS; + + printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index); acb->acb_flags |= ACB_F_BUS_RESET; - for (i = 0; i < 400; i++) { - if (!atomic_read(&acb->ccboutstandingcount)) + acb->num_resets++; + while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) { + arcmsr_interrupt(acb); + retry++; + } + + if (arcmsr_iop_reset(acb)) { + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n", + acb->adapter_index, acb->num_resets, acb->num_aborts); + arcmsr_hardware_reset(acb); + acb->acb_flags |= ACB_F_FIRMWARE_TRAP; + acb->acb_flags &= ~ACB_F_IOP_INITED; + #ifdef CONFIG_SCSI_ARCMSR_RESET + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t intmask_org, outbound_doorbell; + int retry_count = 0; +sleep_again: + arcmsr_sleep_for_bus_reset(cmd); + if ((readl(®->outbound_msgaddr1) & + ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n", + acb->host->host_no, retry_count); + if (retry_count > retrycount) { + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n", + acb->host->host_no); + return SUCCESS; + } + retry_count++; + goto sleep_again; + } + acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; + acb->acb_flags |= ACB_F_IOP_INITED; + acb->acb_flags &= ~ACB_F_BUS_RESET; + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n", + acb->host->host_no); + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_get_firmware_spec(acb, 1); + /*start background rebuild*/ + arcmsr_start_adapter_bgrb(acb); + /* clear Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */ + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); + /* enable outbound Post Queue,outbound doorbell Interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + atomic_set(&acb->rq_map_token, 16); + init_timer(&acb->eternal_timer); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ); + acb->eternal_timer.data = (unsigned long) acb; + acb->eternal_timer.function = &arcmsr_request_device_map; + add_timer(&acb->eternal_timer); + #endif + } break; - arcmsr_interrupt(acb);/* FIXME: need spinlock */ - msleep(25); + case ACB_ADAPTER_TYPE_B: { } - arcmsr_iop_reset(acb); + } + } else { acb->acb_flags &= ~ACB_F_BUS_RESET; + } return SUCCESS; } @@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host) ARCMSR_DRIVER_VERSION); return buf; } -#ifdef CONFIG_SCSI_ARCMSR_AER -static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = - (struct AdapterControlBlock *) host->hostdata; - uint32_t intmask_org; - int i, j; - - if (pci_enable_device(pdev)) { - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - intmask_org = arcmsr_disable_outbound_ints(acb); - acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | - ACB_F_MESSAGE_RQBUFFER_CLEARED | - ACB_F_MESSAGE_WQBUFFER_READED); - acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; - for (i = 0; i < ARCMSR_MAX_TARGETID; i++) - for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) - acb->devstate[i][j] = ARECA_RAID_GONE; - - arcmsr_wait_firmware_ready(acb); - arcmsr_iop_confirm(acb); - /* disable all outbound interrupt */ - arcmsr_get_firmware_spec(acb); - /*start background rebuild*/ - arcmsr_start_adapter_bgrb(acb); - /* empty doorbell Qbuffer if door bell ringed */ - arcmsr_clear_doorbell_queue_buffer(acb); - arcmsr_enable_eoi_mode(acb); - /* enable outbound Post Queue,outbound doorbell Interrupt */ - arcmsr_enable_outbound_ints(acb, intmask_org); - acb->acb_flags |= ACB_F_IOP_INITED; - - pci_enable_pcie_error_reporting(pdev); - return PCI_ERS_RESULT_RECOVERED; -} - -static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; - struct CommandControlBlock *ccb; - uint32_t intmask_org; - int i = 0; - - if (atomic_read(&acb->ccboutstandingcount) != 0) { - /* talk to iop 331 outstanding command aborted */ - arcmsr_abort_allcmd(acb); - /* wait for 3 sec for all command aborted*/ - ssleep(3); - /* disable all outbound interrupt */ - intmask_org = arcmsr_disable_outbound_ints(acb); - /* clear all outbound posted Q */ - arcmsr_done4abort_postqueue(acb); - for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { - ccb = acb->pccb_pool[i]; - if (ccb->startdone == ARCMSR_CCB_START) { - ccb->startdone = ARCMSR_CCB_ABORTED; - arcmsr_ccb_complete(ccb, 1); - } - } - /* enable all outbound interrupt */ - arcmsr_enable_outbound_ints(acb, intmask_org); - } - pci_disable_device(pdev); -} - -static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = \ - (struct AdapterControlBlock *)host->hostdata; - - arcmsr_stop_adapter_bgrb(acb); - arcmsr_flush_adapter_cache(acb); -} - -static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - switch (state) { - case pci_channel_io_frozen: - arcmsr_pci_ers_need_reset_forepart(pdev); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - arcmsr_pci_ers_disconnect_forepart(pdev); - return PCI_ERS_RESULT_DISCONNECT; - break; - default: - return PCI_ERS_RESULT_NEED_RESET; - } -} -#endif diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index e641922f20bc..350cbeaae160 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { SE_DEBUG(DBG_LVL_1, - "Failed to allocate memory for" - "mgmt_invalidate_icds \n"); + "Failed to allocate memory for mgmt_invalidate_icds\n"); spin_unlock(&ctrl->mbox_lock); - return -1; + return 0; } nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); req = nonemb_cmd.va; diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 0c08e185a766..3a7b3f88932f 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) for (i = 0; hal_mods[i]; i++) hal_mods[i]->meminfo(cfg, &km_len, &dm_len); + dm_len += bfa_port_meminfo(); meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; } +static void +bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) +{ + struct bfa_port_s *port = &bfa->modules.port; + uint32_t dm_len; + uint8_t *dm_kva; + uint64_t dm_pa; + + dm_len = bfa_port_meminfo(); + dm_kva = bfa_meminfo_dma_virt(mi); + dm_pa = bfa_meminfo_dma_phys(mi); + + memset(port, 0, sizeof(struct bfa_port_s)); + bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); + bfa_port_mem_claim(port, dm_kva, dm_pa); + + bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; + bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; +} + /** * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization @@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, for (i = 0; hal_mods[i]; i++) hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); + bfa_com_port_attach(bfa, meminfo); } /** diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 0435d044c9da..b0c576f84b28 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -114,12 +114,13 @@ static int hba_count = 0; static struct class *adpt_sysfs_class; +static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long); #endif static const struct file_operations adpt_fops = { - .ioctl = adpt_ioctl, + .unlocked_ioctl = adpt_unlocked_ioctl, .open = adpt_open, .release = adpt_close, #ifdef CONFIG_COMPAT @@ -2069,8 +2070,7 @@ static int adpt_system_info(void __user *buffer) return 0; } -static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, - ulong arg) +static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { int minor; int error = 0; @@ -2153,6 +2153,20 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, return error; } +static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg) +{ + struct inode *inode; + long ret; + + inode = file->f_dentry->d_inode; + + lock_kernel(); + ret = adpt_ioctl(inode, file, cmd, arg); + unlock_kernel(); + + return ret; +} + #ifdef CONFIG_COMPAT static long compat_adpt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9276121db1ef..44a07593de56 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -688,7 +688,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) - fc_host_max_npiv_vports(lport->host) = USHORT_MAX; + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s v%s over %s", FCOE_NAME, FCOE_VERSION, diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index a765fe7a55c3..f672d6213eea 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -180,8 +180,8 @@ static const char *gdth_ctr_name(gdth_ha_str *ha); static int gdth_open(struct inode *inode, struct file *filep); static int gdth_close(struct inode *inode, struct file *filep); -static int gdth_ioctl(struct inode *inode, struct file *filep, - unsigned int cmd, unsigned long arg); +static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg); static void gdth_flush(gdth_ha_str *ha); static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); @@ -369,7 +369,7 @@ MODULE_LICENSE("GPL"); /* ioctl interface */ static const struct file_operations gdth_fops = { - .ioctl = gdth_ioctl, + .unlocked_ioctl = gdth_unlocked_ioctl, .open = gdth_open, .release = gdth_close, }; @@ -4462,8 +4462,7 @@ free_fail: return rc; } -static int gdth_ioctl(struct inode *inode, struct file *filep, - unsigned int cmd, unsigned long arg) +static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { gdth_ha_str *ha; Scsi_Cmnd *scp; @@ -4611,6 +4610,17 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, return 0; } +static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = gdth_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} /* flush routine */ static void gdth_flush(gdth_ha_str *ha) diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 18b7102bb80e..2ce26eb7a1ec 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -1,36 +1,35 @@ #include <linux/types.h> -#include <linux/mm.h> -#include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/zorro.h> -#include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/amigaints.h> #include <asm/amigahw.h> -#include <linux/zorro.h> -#include <asm/irq.h> -#include <linux/spinlock.h> #include "scsi.h" -#include <scsi/scsi_host.h> #include "wd33c93.h" #include "gvp11.h" -#include <linux/stat.h> +#define CHECK_WD33C93 -#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) +struct gvp11_hostdata { + struct WD33C93_hostdata wh; + struct gvp11_scsiregs *regs; +}; -static irqreturn_t gvp11_intr(int irq, void *_instance) +static irqreturn_t gvp11_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + struct gvp11_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->CNTR; unsigned long flags; - unsigned int status; - struct Scsi_Host *instance = (struct Scsi_Host *)_instance; - status = DMA(instance)->CNTR; if (!(status & GVP11_DMAC_INT_PENDING)) return IRQ_NONE; @@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; unsigned short cntr = GVP11_DMAC_INT_ENABLE; unsigned long addr = virt_to_bus(cmd->SCp.ptr); int bank_mask; static int scsi_alloc_out_of_range = 0; /* use bounce buffer if the physical address is bad */ - if (addr & hdata->dma_xfer_mask) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + if (addr & wh->dma_xfer_mask) { + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; if (!scsi_alloc_out_of_range) { - hdata->dma_bounce_buffer = - kmalloc(hdata->dma_bounce_len, GFP_KERNEL); - hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; + wh->dma_bounce_buffer = + kmalloc(wh->dma_bounce_len, GFP_KERNEL); + wh->dma_buffer_pool = BUF_SCSI_ALLOCED; } if (scsi_alloc_out_of_range || - !hdata->dma_bounce_buffer) { - hdata->dma_bounce_buffer = - amiga_chip_alloc(hdata->dma_bounce_len, + !wh->dma_bounce_buffer) { + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } - hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } /* check if the address of the bounce buffer is OK */ - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); - if (addr & hdata->dma_xfer_mask) { + if (addr & wh->dma_xfer_mask) { /* fall back to Chip RAM if address out of range */ - if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { - kfree(hdata->dma_bounce_buffer); + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { + kfree(wh->dma_bounce_buffer); scsi_alloc_out_of_range = 1; } else { - amiga_chip_free(hdata->dma_bounce_buffer); + amiga_chip_free(wh->dma_bounce_buffer); } - hdata->dma_bounce_buffer = - amiga_chip_alloc(hdata->dma_bounce_len, + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } - addr = virt_to_bus(hdata->dma_bounce_buffer); - hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; + addr = virt_to_bus(wh->dma_bounce_buffer); + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } @@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) if (!dir_in) cntr |= GVP11_DMAC_DIR_WRITE; - hdata->dma_dir = dir_in; - DMA(cmd->device->host)->CNTR = cntr; + wh->dma_dir = dir_in; + regs->CNTR = cntr; /* setup DMA *physical* address */ - DMA(cmd->device->host)->ACR = addr; + regs->ACR = addr; if (dir_in) { /* invalidate any cache */ @@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cache_push(addr, cmd->SCp.this_residual); } - bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; + bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; if (bank_mask) - DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); + regs->BANK = bank_mask & (addr >> 18); /* start DMA */ - DMA(cmd->device->host)->ST_DMA = 1; + regs->ST_DMA = 1; /* return success */ return 0; @@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; /* stop DMA */ - DMA(instance)->SP_DMA = 1; + regs->SP_DMA = 1; /* remove write bit from CONTROL bits */ - DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; + regs->CNTR = GVP11_DMAC_INT_ENABLE; /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { - if (hdata->dma_dir && SCpnt) - memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir && SCpnt) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) - kfree(hdata->dma_bounce_buffer); - else - amiga_chip_free(hdata->dma_bounce_buffer); - - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; - } -} - -#define CHECK_WD33C93 - -int __init gvp11_detect(struct scsi_host_template *tpnt) -{ - static unsigned char called = 0; - struct Scsi_Host *instance; - unsigned long address; - unsigned int epc; - struct zorro_dev *z = NULL; - unsigned int default_dma_xfer_mask; - struct WD33C93_hostdata *hdata; - wd33c93_regs regs; - int num_gvp11 = 0; -#ifdef CHECK_WD33C93 - volatile unsigned char *sasr_3393, *scmd_3393; - unsigned char save_sasr; - unsigned char q, qq; -#endif - - if (!MACH_IS_AMIGA || called) - return 0; - called = 1; - - tpnt->proc_name = "GVP11"; - tpnt->proc_info = &wd33c93_proc_info; - - while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { - /* - * This should (hopefully) be the correct way to identify - * all the different GVP SCSI controllers (except for the - * SERIES I though). - */ - - if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI || - z->id == ZORRO_PROD_GVP_SERIES_II) - default_dma_xfer_mask = ~0x00ffffff; - else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI || - z->id == ZORRO_PROD_GVP_A530_SCSI || - z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI) - default_dma_xfer_mask = ~0x01ffffff; - else if (z->id == ZORRO_PROD_GVP_A1291 || - z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1) - default_dma_xfer_mask = ~0x07ffffff; + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) + kfree(wh->dma_bounce_buffer); else - continue; - - /* - * Rumors state that some GVP ram boards use the same product - * code as the SCSI controllers. Therefore if the board-size - * is not 64KB we asume it is a ram board and bail out. - */ - if (z->resource.end - z->resource.start != 0xffff) - continue; + amiga_chip_free(wh->dma_bounce_buffer); - address = z->resource.start; - if (!request_mem_region(address, 256, "wd33c93")) - continue; - -#ifdef CHECK_WD33C93 - - /* - * These darn GVP boards are a problem - it can be tough to tell - * whether or not they include a SCSI controller. This is the - * ultimate Yet-Another-GVP-Detection-Hack in that it actually - * probes for a WD33c93 chip: If we find one, it's extremely - * likely that this card supports SCSI, regardless of Product_ - * Code, Board_Size, etc. - */ - - /* Get pointers to the presumed register locations and save contents */ - - sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR); - scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD); - save_sasr = *sasr_3393; - - /* First test the AuxStatus Reg */ - - q = *sasr_3393; /* read it */ - if (q & 0x08) /* bit 3 should always be clear */ - goto release; - *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ - if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ - *sasr_3393 = save_sasr; /* Oops - restore this byte */ - goto release; - } - if (*sasr_3393 != q) { /* should still read the same */ - *sasr_3393 = save_sasr; /* Oops - restore this byte */ - goto release; - } - if (*scmd_3393 != q) /* and so should the image at 0x1f */ - goto release; - - /* - * Ok, we probably have a wd33c93, but let's check a few other places - * for good measure. Make sure that this works for both 'A and 'B - * chip versions. - */ - - *sasr_3393 = WD_SCSI_STATUS; - q = *scmd_3393; - *sasr_3393 = WD_SCSI_STATUS; - *scmd_3393 = ~q; - *sasr_3393 = WD_SCSI_STATUS; - qq = *scmd_3393; - *sasr_3393 = WD_SCSI_STATUS; - *scmd_3393 = q; - if (qq != q) /* should be read only */ - goto release; - *sasr_3393 = 0x1e; /* this register is unimplemented */ - q = *scmd_3393; - *sasr_3393 = 0x1e; - *scmd_3393 = ~q; - *sasr_3393 = 0x1e; - qq = *scmd_3393; - *sasr_3393 = 0x1e; - *scmd_3393 = q; - if (qq != q || qq != 0xff) /* should be read only, all 1's */ - goto release; - *sasr_3393 = WD_TIMEOUT_PERIOD; - q = *scmd_3393; - *sasr_3393 = WD_TIMEOUT_PERIOD; - *scmd_3393 = ~q; - *sasr_3393 = WD_TIMEOUT_PERIOD; - qq = *scmd_3393; - *sasr_3393 = WD_TIMEOUT_PERIOD; - *scmd_3393 = q; - if (qq != (~q & 0xff)) /* should be read/write */ - goto release; -#endif - - instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (instance == NULL) - goto release; - instance->base = ZTWO_VADDR(address); - instance->irq = IRQ_AMIGA_PORTS; - instance->unique_id = z->slotaddr; - - hdata = shost_priv(instance); - if (gvp11_xfer_mask) - hdata->dma_xfer_mask = gvp11_xfer_mask; - else - hdata->dma_xfer_mask = default_dma_xfer_mask; - - DMA(instance)->secret2 = 1; - DMA(instance)->secret1 = 0; - DMA(instance)->secret3 = 15; - while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) - ; - DMA(instance)->CNTR = 0; - - DMA(instance)->BANK = 0; - - epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); - - /* - * Check for 14MHz SCSI clock - */ - regs.SASR = &(DMA(instance)->SASR); - regs.SCMD = &(DMA(instance)->SCMD); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(instance, regs, dma_setup, dma_stop, - (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 - : WD33C93_FS_12_15); - - if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, - "GVP11 SCSI", instance)) - goto unregister; - DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; - num_gvp11++; - continue; - -unregister: - scsi_unregister(instance); -release: - release_mem_region(address, 256); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } - - return num_gvp11; } static int gvp11_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: shouldn't we no-op this function (return FAILED), and fall back to host reset function, wd33c93_host_reset ? */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } - -#define HOSTS_C - -#include "gvp11.h" - -static struct scsi_host_template driver_template = { - .proc_name = "GVP11", +static struct scsi_host_template gvp11_scsi_template = { + .module = THIS_MODULE, .name = "GVP Series II SCSI", - .detect = gvp11_detect, - .release = gvp11_release, + .proc_info = wd33c93_proc_info, + .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = gvp11_bus_reset, @@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = { .use_clustering = DISABLE_CLUSTERING }; +static int __devinit check_wd33c93(struct gvp11_scsiregs *regs) +{ +#ifdef CHECK_WD33C93 + volatile unsigned char *sasr_3393, *scmd_3393; + unsigned char save_sasr; + unsigned char q, qq; -#include "scsi_module.c" + /* + * These darn GVP boards are a problem - it can be tough to tell + * whether or not they include a SCSI controller. This is the + * ultimate Yet-Another-GVP-Detection-Hack in that it actually + * probes for a WD33c93 chip: If we find one, it's extremely + * likely that this card supports SCSI, regardless of Product_ + * Code, Board_Size, etc. + */ + + /* Get pointers to the presumed register locations and save contents */ + + sasr_3393 = ®s->SASR; + scmd_3393 = ®s->SCMD; + save_sasr = *sasr_3393; + + /* First test the AuxStatus Reg */ + + q = *sasr_3393; /* read it */ + if (q & 0x08) /* bit 3 should always be clear */ + return -ENODEV; + *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ + if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*sasr_3393 != q) { /* should still read the same */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*scmd_3393 != q) /* and so should the image at 0x1f */ + return -ENODEV; + + /* + * Ok, we probably have a wd33c93, but let's check a few other places + * for good measure. Make sure that this works for both 'A and 'B + * chip versions. + */ + + *sasr_3393 = WD_SCSI_STATUS; + q = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = ~q; + *sasr_3393 = WD_SCSI_STATUS; + qq = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = q; + if (qq != q) /* should be read only */ + return -ENODEV; + *sasr_3393 = 0x1e; /* this register is unimplemented */ + q = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = ~q; + *sasr_3393 = 0x1e; + qq = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = q; + if (qq != q || qq != 0xff) /* should be read only, all 1's */ + return -ENODEV; + *sasr_3393 = WD_TIMEOUT_PERIOD; + q = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = ~q; + *sasr_3393 = WD_TIMEOUT_PERIOD; + qq = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = q; + if (qq != (~q & 0xff)) /* should be read/write */ + return -ENODEV; +#endif /* CHECK_WD33C93 */ -int gvp11_release(struct Scsi_Host *instance) + return 0; +} + +static int __devinit gvp11_probe(struct zorro_dev *z, + const struct zorro_device_id *ent) { -#ifdef MODULE - DMA(instance)->CNTR = 0; - release_mem_region(ZTWO_PADDR(instance->base), 256); + struct Scsi_Host *instance; + unsigned long address; + int error; + unsigned int epc; + unsigned int default_dma_xfer_mask; + struct gvp11_hostdata *hdata; + struct gvp11_scsiregs *regs; + wd33c93_regs wdregs; + + default_dma_xfer_mask = ent->driver_data; + + /* + * Rumors state that some GVP ram boards use the same product + * code as the SCSI controllers. Therefore if the board-size + * is not 64KB we asume it is a ram board and bail out. + */ + if (zorro_resource_len(z) != 0x10000) + return -ENODEV; + + address = z->resource.start; + if (!request_mem_region(address, 256, "wd33c93")) + return -EBUSY; + + regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); + + error = check_wd33c93(regs); + if (error) + goto fail_check_or_alloc; + + instance = scsi_host_alloc(&gvp11_scsi_template, + sizeof(struct gvp11_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_check_or_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs->secret2 = 1; + regs->secret1 = 0; + regs->secret3 = 15; + while (regs->CNTR & GVP11_DMAC_BUSY) + ; + regs->CNTR = 0; + regs->BANK = 0; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + if (gvp11_xfer_mask) + hdata->wh.dma_xfer_mask = gvp11_xfer_mask; + else + hdata->wh.dma_xfer_mask = default_dma_xfer_mask; + + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + /* + * Check for 14MHz SCSI clock + */ + epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); + wd33c93_init(instance, wdregs, dma_setup, dma_stop, + (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 + : WD33C93_FS_12_15); + + error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, + "GVP11 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = GVP11_DMAC_INT_ENABLE; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + scsi_scan_host(instance); + return 0; + +fail_host: free_irq(IRQ_AMIGA_PORTS, instance); -#endif - return 1; +fail_irq: + scsi_host_put(instance); +fail_check_or_alloc: + release_mem_region(address, 256); + return error; +} + +static void __devexit gvp11_remove(struct zorro_dev *z) +{ + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct gvp11_hostdata *hdata = shost_priv(instance); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + + /* + * This should (hopefully) be the correct way to identify + * all the different GVP SCSI controllers (except for the + * SERIES I though). + */ + +static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, + { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, + { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, + { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); + +static struct zorro_driver gvp11_driver = { + .name = "gvp11", + .id_table = gvp11_zorro_tbl, + .probe = gvp11_probe, + .remove = __devexit_p(gvp11_remove), +}; + +static int __init gvp11_init(void) +{ + return zorro_register_driver(&gvp11_driver); +} +module_init(gvp11_init); + +static void __exit gvp11_exit(void) +{ + zorro_unregister_driver(&gvp11_driver); } +module_exit(gvp11_exit); +MODULE_DESCRIPTION("GVP Series II SCSI"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h index e2efdf9601ef..852913cde5dd 100644 --- a/drivers/scsi/gvp11.h +++ b/drivers/scsi/gvp11.h @@ -11,9 +11,6 @@ #include <linux/types.h> -int gvp11_detect(struct scsi_host_template *); -int gvp11_release(struct Scsi_Host *); - #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 #endif @@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *); #define CAN_QUEUE 16 #endif -#ifndef HOSTS_C - /* * if the transfer address ANDed with this results in a non-zero * result, then we can't use DMA. */ #define GVP11_XFER_MASK (0xff000001) -typedef struct { +struct gvp11_scsiregs { unsigned char pad1[64]; volatile unsigned short CNTR; unsigned char pad2[31]; @@ -46,7 +41,7 @@ typedef struct { volatile unsigned short SP_DMA; volatile unsigned short secret2; /* store 1 here */ volatile unsigned short secret3; /* store 15 here */ -} gvp11_scsiregs; +}; /* bits in CNTR */ #define GVP11_DMAC_BUSY (1<<0) @@ -54,6 +49,4 @@ typedef struct { #define GVP11_DMAC_INT_ENABLE (1<<3) #define GVP11_DMAC_DIR_WRITE (1<<4) -#endif /* else def HOSTS_C */ - #endif /* GVP11_H */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 3eb2b7b3d8b0..fef49521cbc3 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1157,7 +1157,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_login *login_info = &vhost->login_info; - struct device_node *of_node = vhost->dev->archdata.of_node; + struct device_node *of_node = vhost->dev->of_node; const char *location; memset(login_info, 0, sizeof(*login_info)); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 88bad0e81bdd..aad35cc41e49 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -932,7 +932,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) struct viosrp_capabilities *req; struct srp_event_struct *evt_struct; unsigned long flags; - struct device_node *of_node = hostdata->dev->archdata.of_node; + struct device_node *of_node = hostdata->dev->of_node; const char *location; evt_struct = get_event_struct(&hostdata->pool); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6a6661c35b2f..82ea4a8226b0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); @@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; - if (ipr_cmd->ioa_cfg->sis64) + if (ipr_cmd->ioa_cfg->sis64) { ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); - else { + ioasa64->u.gata.status = 0; + } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioasa->u.gata.status = 0; } - ioasa->ioasc = 0; - ioasa->residual_data_len = 0; - ioasa->u.gata.status = 0; - + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; ipr_cmd->scsi_cmd = NULL; ipr_cmd->qc = NULL; ipr_cmd->sense_buffer[0] = 0; @@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { list_del(&ipr_cmd->queue); - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); - ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); + ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; @@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, proto = cfgtew->u.cfgte64->proto; res->res_flags = cfgtew->u.cfgte64->res_flags; res->qmodel = IPR_QUEUEING_MODEL64(res); - res->type = cfgtew->u.cfgte64->res_type & 0x0f; + res->type = cfgtew->u.cfgte64->res_type; memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, sizeof(res->res_path)); @@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_del(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); @@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 fd_ioasc; if (ioa_cfg->sis64) @@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, } ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); - if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) - memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, - sizeof(struct ipr_ioasa_gata)); + if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { + if (ipr_cmd->ioa_cfg->sis64) + memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, + sizeof(struct ipr_ioasa_gata)); + else + memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); + } LEAVE; return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); @@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); /* * If the abort task timed out and we sent a bus reset, we will get @@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) /** * ipr_handle_other_interrupt - Handle "other" interrupts * @ioa_cfg: ioa config struct - * @int_reg: interrupt register * * Return value: * IRQ_NONE / IRQ_HANDLED **/ -static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, - volatile u32 int_reg) +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) { irqreturn_t rc = IRQ_HANDLED; + volatile u32 int_reg, int_mask_reg; + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ + if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + return IRQ_HANDLED; + } + } + + return IRQ_NONE; + } if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ @@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; - volatile u32 int_reg, int_mask_reg; + volatile u32 int_reg; u32 ioasc; u16 cmd_index; int num_hrrq = 0; @@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_NONE; } - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; - - /* If an interrupt on the adapter did not occur, ignore it. - * Or in the case of SIS 64, check for a stage change interrupt. - */ - if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { - if (ioa_cfg->sis64) { - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { - - /* clear stage change */ - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); - ipr_reset_ioa_job(ioa_cfg->reset_cmd); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_HANDLED; - } - } - - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_NONE; - } - while (1) { ipr_cmd = NULL; @@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); @@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) /* Clear the PCI interrupt */ do { writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); @@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) } if (unlikely(rc == IRQ_NONE)) - rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); + rc = ipr_handle_other_interrupt(ioa_cfg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return rc; @@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, ipr_cmd->dma_use_sg = nseg; + ioarcb->data_transfer_length = cpu_to_be32(length); + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; @@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { scsi_cmd->result |= (DID_ERROR << 16); @@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); @@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) ioarcb->read_data_transfer_length = 0; ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; - ioasa->ioasc = 0; - ioasa->residual_data_len = 0; + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; if (ipr_cmd->ioa_cfg->sis64) ioarcb->u.sis64_addr_data.data_ioadl_addr = @@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) { struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { ipr_erp_done(ipr_cmd); @@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, int i; u16 data_len; u32 ioasc, fd_ioasc; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; __be32 *ioasa_data = (__be32 *)ioasa; int error_index; - ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; - fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; + ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; + fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; if (0 == ioasc) return; @@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { /* Don't log an error if the IOA already logged one */ - if (ioasa->ilid != 0) + if (ioasa->hdr.ilid != 0) return; if (!ipr_is_gscsi(res)) @@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); - if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) + data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); + if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) + data_len = sizeof(struct ipr_ioasa64); + else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) data_len = sizeof(struct ipr_ioasa); - else - data_len = be16_to_cpu(ioasa->ret_stat_len); ipr_err("IOASA Dump:\n"); @@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) u32 failing_lba; u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - u32 ioasc = be32_to_cpu(ioasa->ioasc); + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); @@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) /* Illegal request */ if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && - (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { + (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { sense_buf[7] = 10; /* additional length */ /* IOARCB was in error */ @@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) sense_buf[16] = ((IPR_FIELD_POINTER_MASK & - be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; + be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; sense_buf[17] = (IPR_FIELD_POINTER_MASK & - be32_to_cpu(ioasa->ioasc_specific)) & 0xff; + be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; } else { if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { if (ipr_is_vset_device(res)) @@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) **/ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) { - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; - if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) + if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) return 0; - memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, - min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), - SCSI_SENSE_BUFFERSIZE)); + if (ipr_cmd->ioa_cfg->sis64) + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); + else + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); return 1; } @@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; if (!res) { @@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(ipr_cmd->scsi_cmd); @@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) struct ata_queued_cmd *qc = ipr_cmd->qc; struct ipr_sata_port *sata_port = qc->ap->private_data; struct ipr_resource_entry *res = sata_port->res; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, - sizeof(struct ipr_ioasa_gata)); + if (ipr_cmd->ioa_cfg->sis64) + memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, + sizeof(struct ipr_ioasa_gata)); + else + memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); - if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) + if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) - qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); + qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); else - qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); + qc->err_mask |= ac_err_mask(sata_port->ioasa.status); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); ata_qc_complete(qc); } @@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); dev_err(&ioa_cfg->pdev->dev, "0x%02X failed with IOASC: 0x%08X\n", @@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_set_supported_devs; @@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) **/ static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) { - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; @@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) list_move_tail(&res->queue, &old_res); if (ioa_cfg->sis64) - entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; + entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); else entries = ioa_cfg->u.cfg_table->hdr.num_entries; @@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; + ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; @@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); /* sanity check the stage_time value */ - if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) + if (stage_time == 0) + stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; + else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; @@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; volatile u32 int_reg; + volatile u64 maskval; ENTER; ipr_cmd->job_step = ipr_ioafp_identify_hrrq; ipr_init_ioa_mem(ioa_cfg); ioa_cfg->allow_interrupts = 1; - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), @@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) /* Enable destructive diagnostics on IOA */ writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); - writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); - if (ioa_cfg->sis64) - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); + if (ioa_cfg->sis64) { + maskval = IPR_PCII_IPL_STAGE_CHANGE; + maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; + writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); + } else + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); @@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) rc = pci_restore_state(ioa_cfg->pdev); if (rc != PCIBIOS_SUCCESSFUL) { - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); return IPR_RC_JOB_CONTINUE; } if (ipr_set_pcix_cmd_reg(ioa_cfg)) { - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); return IPR_RC_JOB_CONTINUE; } @@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) } } - ENTER; + LEAVE; return IPR_RC_JOB_CONTINUE; } @@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) if (rc != PCIBIOS_SUCCESSFUL) { pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); rc = IPR_RC_JOB_CONTINUE; } else { ipr_cmd->job_step = ipr_reset_bist_done; @@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; do { - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioa_cfg->reset_cmd != ipr_cmd) { /* @@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; ioarcb->ioasa_host_pci_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); } ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); ipr_cmd->cmd_index = i; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4c267b5e0b96..9ecd2259eb39 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -244,6 +244,7 @@ #define IPR_RUNTIME_RESET 0x40000000 #define IPR_IPL_INIT_MIN_STAGE_TIME 5 +#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 #define IPR_IPL_INIT_STAGE_MASK 0xff000000 @@ -613,7 +614,7 @@ struct ipr_auto_sense { __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; }; -struct ipr_ioasa { +struct ipr_ioasa_hdr { __be32 ioasc; #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) @@ -645,6 +646,25 @@ struct ipr_ioasa { #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) #define IPR_FIELD_POINTER_MASK 0x0000ffff +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa { + struct ipr_ioasa_hdr hdr; + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + struct ipr_ioasa_gata gata; + } u; + + struct ipr_auto_sense auto_sense; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa64 { + struct ipr_ioasa_hdr hdr; + u8 fd_res_path[8]; + union { struct ipr_ioasa_vset vset; struct ipr_ioasa_af_dasd dasd; @@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced { }__attribute__((packed, aligned (4))); struct ipr_hostrcb_type_ff_error { - __be32 ioa_data[502]; + __be32 ioa_data[758]; }__attribute__((packed, aligned (4))); struct ipr_hostrcb_type_01_error { @@ -1181,7 +1201,7 @@ struct ipr_resource_entry { u8 flags; __be16 res_flags; - __be32 type; + u8 type; u8 qmodel; struct ipr_std_inq_data std_inq_data; @@ -1464,7 +1484,10 @@ struct ipr_cmnd { struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; struct ipr_ata64_ioadl ata_ioadl; } i; - struct ipr_ioasa ioasa; + union { + struct ipr_ioasa ioasa; + struct ipr_ioasa64 ioasa64; + } s; struct list_head queue; struct scsi_cmnd *scsi_cmd; struct ata_queued_cmd *qc; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index bf55d3057413..fec47de72535 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); - if (sk_sleep(sock->sk)) { - sock->sk->sk_err = EIO; - wake_up_interruptible(sk_sleep(sock->sk)); - } + sock->sk->sk_err = EIO; + wake_up_interruptible(sk_sleep(sock->sk)); iscsi_conn_stop(cls_conn, flag); iscsi_sw_tcp_release_conn(conn); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 4bf7edca9e69..0b6e3228610a 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -91,12 +91,15 @@ static struct proc_dir_entry *mega_proc_dir_entry; /* For controller re-ordering */ static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; +static long +megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); + /* * The File Operations structure for the serial/ioctl interface of the driver */ static const struct file_operations megadev_fops = { .owner = THIS_MODULE, - .ioctl = megadev_ioctl, + .unlocked_ioctl = megadev_unlocked_ioctl, .open = megadev_open, }; @@ -3302,8 +3305,7 @@ megadev_open (struct inode *inode, struct file *filep) * controller. */ static int -megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, - unsigned long arg) +megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { adapter_t *adapter; nitioctl_t uioc; @@ -3694,6 +3696,18 @@ freemem_and_return: return 0; } +static long +megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = megadev_ioctl(filep, cmd, arg); + unlock_kernel(); + + return ret; +} + /** * mega_m_to_n() * @arg - user address diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index d310f49d077e..2b4a048cadf1 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -1013,8 +1013,7 @@ static void mega_8_to_40ld (mraid_inquiry *inquiry, mega_inquiry3 *enquiry3, mega_product_info *); static int megadev_open (struct inode *, struct file *); -static int megadev_ioctl (struct inode *, struct file *, unsigned int, - unsigned long); +static int megadev_ioctl (struct file *, unsigned int, unsigned long); static int mega_m_to_n(void __user *, nitioctl_t *); static int mega_n_to_m(void __user *, megacmd_t *); diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 36e0b7d05c1d..41f82f76d884 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -22,7 +22,7 @@ // Entry points for char node driver static int mraid_mm_open(struct inode *, struct file *); -static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long); +static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); // routines to convert to and from the old the format @@ -70,7 +70,7 @@ static wait_queue_head_t wait_q; static const struct file_operations lsi_fops = { .open = mraid_mm_open, - .ioctl = mraid_mm_ioctl, + .unlocked_ioctl = mraid_mm_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mraid_mm_compat_ioctl, #endif @@ -110,8 +110,7 @@ mraid_mm_open(struct inode *inode, struct file *filep) * @arg : user ioctl packet */ static int -mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, - unsigned long arg) +mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { uioc_t *kioc; char signature[EXT_IOCTL_SIGN_SZ] = {0}; @@ -218,6 +217,19 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, return rval; } +static long +mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int err; + + /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */ + lock_kernel(); + err = mraid_mm_ioctl(filep, cmd, arg); + unlock_kernel(); + + return err; +} /** * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet @@ -1225,7 +1237,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, { int err; - err = mraid_mm_ioctl(NULL, filep, cmd, arg); + err = mraid_mm_ioctl(filep, cmd, arg); return err; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index b830d61684dd..0ec1ed389c20 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3757,7 +3757,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) if (ioc->config_cmds.status & MPT2_CMD_PENDING) { ioc->config_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); - ioc->config_cmds.smid = USHORT_MAX; + ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); } break; diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index e762dd3e2fcb..c65442982d7b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -258,7 +258,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _config_display_some_debug(ioc, smid, "config_done", mpi_reply); #endif - ioc->config_cmds.smid = USHORT_MAX; + ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); return 1; } diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index 716d1785cda7..c29d0dbb9660 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c @@ -16,12 +16,12 @@ #include <linux/stat.h> -static struct Scsi_Host *mvme147_host = NULL; - -static irqreturn_t mvme147_intr(int irq, void *dummy) +static irqreturn_t mvme147_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + if (irq == MVME147_IRQ_SCSI_PORT) - wd33c93_intr(mvme147_host); + wd33c93_intr(instance); else m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ return IRQ_HANDLED; @@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { - struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); + struct Scsi_Host *instance = cmd->device->host; + struct WD33C93_hostdata *hdata = shost_priv(instance); unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int mvme147_detect(struct scsi_host_template *tpnt) { static unsigned char called = 0; + struct Scsi_Host *instance; wd33c93_regs regs; struct WD33C93_hostdata *hdata; @@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt) tpnt->proc_name = "MVME147"; tpnt->proc_info = &wd33c93_proc_info; - mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (!mvme147_host) + instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); + if (!instance) goto err_out; - mvme147_host->base = 0xfffe4000; - mvme147_host->irq = MVME147_IRQ_SCSI_PORT; + instance->base = 0xfffe4000; + instance->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; - hdata = shost_priv(mvme147_host); + hdata = shost_priv(instance); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; - wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); + wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, - "MVME147 SCSI PORT", mvme147_intr)) + "MVME147 SCSI PORT", instance)) goto err_unregister; if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, - "MVME147 SCSI DMA", mvme147_intr)) + "MVME147 SCSI DMA", instance)) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ @@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt) err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); err_unregister: - scsi_unregister(mvme147_host); + scsi_unregister(instance); err_out: return 0; } @@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd) return SUCCESS; } -#define HOSTS_C - -#include "mvme147.h" static struct scsi_host_template driver_template = { .proc_name = "MVME147", diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index b219118f8bd6..d64b7178fa08 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name if (i == (-ENOSPC)) { transfer = STp->buffer->writing; /* FIXME -- check this logic */ if (transfer <= do_count) { - filp->f_pos += do_count - transfer; + *ppos += do_count - transfer; count -= do_count - transfer; if (STps->drv_block >= 0) { STps->drv_block += (do_count - transfer) / STp->block_size; @@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name goto out; } - filp->f_pos += do_count; + *ppos += do_count; b_point += do_count; count -= do_count; if (STps->drv_block >= 0) { @@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name if (STps->drv_block >= 0) { STps->drv_block += blks; } - filp->f_pos += count; + *ppos += count; count = 0; } @@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo } STp->logical_blk_num += transfer / STp->block_size; STps->drv_block += transfer / STp->block_size; - filp->f_pos += transfer; + *ppos += transfer; buf += transfer; total += transfer; } @@ -4932,7 +4932,7 @@ static int os_scsi_tape_close(struct inode * inode, struct file * filp) /* The ioctl command */ -static int osst_ioctl(struct inode * inode,struct file * file, +static long osst_ioctl(struct file * file, unsigned int cmd_in, unsigned long arg) { int i, cmd_nr, cmd_type, blk, retval = 0; @@ -4943,8 +4943,11 @@ static int osst_ioctl(struct inode * inode,struct file * file, char * name = tape_name(STp); void __user * p = (void __user *)arg; - if (mutex_lock_interruptible(&STp->lock)) + lock_kernel(); + if (mutex_lock_interruptible(&STp->lock)) { + unlock_kernel(); return -ERESTARTSYS; + } #if DEBUG if (debugging && !STp->in_use) { @@ -5256,12 +5259,15 @@ static int osst_ioctl(struct inode * inode,struct file * file, mutex_unlock(&STp->lock); - return scsi_ioctl(STp->device, cmd_in, p); + retval = scsi_ioctl(STp->device, cmd_in, p); + unlock_kernel(); + return retval; out: if (SRpnt) osst_release_request(SRpnt); mutex_unlock(&STp->lock); + unlock_kernel(); return retval; } @@ -5613,13 +5619,14 @@ static const struct file_operations osst_fops = { .owner = THIS_MODULE, .read = osst_read, .write = osst_write, - .ioctl = osst_ioctl, + .unlocked_ioctl = osst_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = osst_compat_ioctl, #endif .open = os_scsi_tape_open, .flush = os_scsi_tape_flush, .release = os_scsi_tape_close, + .llseek = noop_llseek, }; static int osst_supports(struct scsi_device * SDp) diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index aa406497eebc..ca5c15c779cf 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -755,7 +755,7 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) struct of_device *op = qpti->op; struct device_node *dp; - dp = op->node; + dp = op->dev.of_node; qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); if (qpti->scsi_id == -1) @@ -776,8 +776,8 @@ static void qpti_get_bursts(struct qlogicpti *qpti) struct of_device *op = qpti->op; u8 bursts, bmask; - bursts = of_getintprop_default(op->node, "burst-sizes", 0xff); - bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff); + bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff); + bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff); if (bmask != 0xff) bursts &= bmask; if (bursts == 0xff || @@ -1293,7 +1293,7 @@ static struct scsi_host_template qpti_template = { static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct scsi_host_template *tpnt = match->data; - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; struct qlogicpti *qpti; static int nqptis; @@ -1315,7 +1315,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic qpti->qhost = host; qpti->op = op; qpti->qpti_id = nqptis; - strcpy(qpti->prom_name, op->node->name); + strcpy(qpti->prom_name, op->dev.of_node->name); qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); if (qpti_map_regs(qpti) < 0) @@ -1456,8 +1456,11 @@ static const struct of_device_id qpti_match[] = { MODULE_DEVICE_TABLE(of, qpti_match); static struct of_platform_driver qpti_sbus_driver = { - .name = "qpti", - .match_table = qpti_match, + .driver = { + .name = "qpti", + .owner = THIS_MODULE, + .of_match_table = qpti_match, + }, .probe = qpti_sbus_probe, .remove = __devexit_p(qpti_sbus_remove), }; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index c992ecf4e372..1c027a97d8b9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget) struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; enum scsi_target_state state; - int empty; + int empty = 0; spin_lock_irqsave(shost->host_lock, flags); state = starget->state; - empty = --starget->reap_ref == 0 && - list_empty(&starget->devices) ? 1 : 0; + if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { + empty = 1; + starget->state = STARGET_DEL; + } spin_unlock_irqrestore(shost->host_lock, flags); if (!empty) return; BUG_ON(state == STARGET_DEL); - starget->state = STARGET_DEL; if (state == STARGET_CREATED) scsi_target_destroy(starget); else @@ -1220,7 +1221,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, } /** - * scsilun_to_int: convert a scsi_lun to an int + * scsilun_to_int - convert a scsi_lun to an int * @scsilun: struct scsi_lun to be converted. * * Description: @@ -1252,7 +1253,7 @@ int scsilun_to_int(struct scsi_lun *scsilun) EXPORT_SYMBOL(scsilun_to_int); /** - * int_to_scsilun: reverts an int into a scsi_lun + * int_to_scsilun - reverts an int into a scsi_lun * @lun: integer to be reverted * @scsilun: struct scsi_lun to be set. * @@ -1876,12 +1877,9 @@ void scsi_forget_host(struct Scsi_Host *shost) spin_unlock_irqrestore(shost->host_lock, flags); } -/* - * Function: scsi_get_host_dev() - * - * Purpose: Create a scsi_device that points to the host adapter itself. - * - * Arguments: SHpnt - Host that needs a scsi_device +/** + * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself + * @shost: Host that needs a scsi_device * * Lock status: None assumed. * @@ -1894,7 +1892,7 @@ void scsi_forget_host(struct Scsi_Host *shost) * * Note - this device is not accessible from any high-level * drivers (including generics), which is probably not - * optimal. We can add hooks later to attach + * optimal. We can add hooks later to attach. */ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) { @@ -1920,18 +1918,13 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_get_host_dev); -/* - * Function: scsi_free_host_dev() - * - * Purpose: Free a scsi_device that points to the host adapter itself. - * - * Arguments: SHpnt - Host that needs a scsi_device +/** + * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself + * @sdev: Host device to be freed * * Lock status: None assumed. * * Returns: Nothing - * - * Notes: */ void scsi_free_host_dev(struct scsi_device *sdev) { diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index dee1c96288d4..ef752b248c4d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -758,8 +758,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, } static int -sg_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd_in, unsigned long arg) +sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int __user *ip = p; @@ -1078,6 +1077,18 @@ sg_ioctl(struct inode *inode, struct file *filp, } } +static long +sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = sg_ioctl(filp, cmd_in, arg); + unlock_kernel(); + + return ret; +} + #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { @@ -1322,7 +1333,7 @@ static const struct file_operations sg_fops = { .read = sg_read, .write = sg_write, .poll = sg_poll, - .ioctl = sg_ioctl, + .unlocked_ioctl = sg_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sg_compat_ioctl, #endif diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3ea1a713ef25..24211d0efa6d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3962,6 +3962,7 @@ static const struct file_operations st_fops = .open = st_open, .flush = st_flush, .release = st_release, + .llseek = noop_llseek, }; static int st_probe(struct device *dev) diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index fc23d273fb1a..386dd9d602b6 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -125,7 +125,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma) struct of_device *op = esp->dev; struct device_node *dp; - dp = op->node; + dp = op->dev.of_node; esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); if (esp->scsi_id != 0xff) goto done; @@ -134,7 +134,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma) if (esp->scsi_id != 0xff) goto done; - esp->scsi_id = of_getintprop_default(espdma->node, + esp->scsi_id = of_getintprop_default(espdma->dev.of_node, "scsi-initiator-id", 7); done: @@ -147,7 +147,7 @@ static void __devinit esp_get_differential(struct esp *esp) struct of_device *op = esp->dev; struct device_node *dp; - dp = op->node; + dp = op->dev.of_node; if (of_find_property(dp, "differential", NULL)) esp->flags |= ESP_FLAG_DIFFERENTIAL; else @@ -160,7 +160,7 @@ static void __devinit esp_get_clock_params(struct esp *esp) struct device_node *bus_dp, *dp; int fmhz; - dp = op->node; + dp = op->dev.of_node; bus_dp = dp->parent; fmhz = of_getintprop_default(dp, "clock-frequency", 0); @@ -172,12 +172,12 @@ static void __devinit esp_get_clock_params(struct esp *esp) static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of) { - struct device_node *dma_dp = dma_of->node; + struct device_node *dma_dp = dma_of->dev.of_node; struct of_device *op = esp->dev; struct device_node *dp; u8 bursts, val; - dp = op->node; + dp = op->dev.of_node; bursts = of_getintprop_default(dp, "burst-sizes", 0xff); val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); if (val != 0xff) @@ -565,7 +565,7 @@ fail: static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *dma_node = NULL; - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct of_device *dma_of = NULL; int hme = 0; @@ -574,7 +574,7 @@ static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device !strcmp(dp->parent->name, "dma"))) dma_node = dp->parent; else if (!strcmp(dp->name, "SUNW,fas")) { - dma_node = op->node; + dma_node = op->dev.of_node; hme = 1; } if (dma_node) @@ -633,8 +633,11 @@ static const struct of_device_id esp_match[] = { MODULE_DEVICE_TABLE(of, esp_match); static struct of_platform_driver esp_sbus_driver = { - .name = "esp", - .match_table = esp_match, + .driver = { + .name = "esp", + .owner = THIS_MODULE, + .of_match_table = esp_match, + }, .probe = esp_sbus_probe, .remove = __devexit_p(esp_sbus_remove), }; diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index 78ed24bb6a35..30463862603b 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c @@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg) for (i = 0; i < ARRAY_SIZE(baud_table); i++) if (baud_table[i] == n) break; - if (i < BAUD_TABLE_SIZE) { + if (i < ARRAY_SIZE(baud_table)) { m68328_console_baud = n; m68328_console_cbaud = 0; if (i > 15) { diff --git a/drivers/serial/apbuart.c b/drivers/serial/apbuart.c index fe91319b5f65..0099b8692b60 100644 --- a/drivers/serial/apbuart.c +++ b/drivers/serial/apbuart.c @@ -559,7 +559,7 @@ static int __devinit apbuart_probe(struct of_device *op, i = 0; for (i = 0; i < grlib_apbuart_port_nr; i++) { - if (op->node == grlib_apbuart_nodes[i]) + if (op->dev.of_node == grlib_apbuart_nodes[i]) break; } @@ -584,12 +584,12 @@ static struct of_device_id __initdata apbuart_match[] = { }; static struct of_platform_driver grlib_apbuart_of_driver = { - .match_table = apbuart_match, .probe = apbuart_probe, .driver = { - .owner = THIS_MODULE, - .name = "grlib-apbuart", - }, + .owner = THIS_MODULE, + .name = "grlib-apbuart", + .of_match_table = apbuart_match, + }, }; diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index 300cea768d74..9eb62a256e9a 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c @@ -1342,7 +1342,7 @@ static int __devinit cpm_uart_probe(struct of_device *ofdev, /* initialize the device pointer for the port */ pinfo->port.dev = &ofdev->dev; - ret = cpm_uart_init_port(ofdev->node, pinfo); + ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo); if (ret) return ret; @@ -1372,8 +1372,11 @@ static struct of_device_id cpm_uart_match[] = { }; static struct of_platform_driver cpm_uart_driver = { - .name = "cpm_uart", - .match_table = cpm_uart_match, + .driver = { + .name = "cpm_uart", + .owner = THIS_MODULE, + .of_match_table = cpm_uart_match, + }, .probe = cpm_uart_probe, .remove = cpm_uart_remove, }; diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 02469c31bf0b..84a35f699016 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -397,34 +397,10 @@ static unsigned long mpc512x_getuartclk(void *p) return mpc5xxx_get_bus_frequency(p); } -#define DEFAULT_FIFO_SIZE 16 - -static unsigned int __init get_fifo_size(struct device_node *np, - char *fifo_name) -{ - const unsigned int *fp; - - fp = of_get_property(np, fifo_name, NULL); - if (fp) - return *fp; - - pr_warning("no %s property in %s node, defaulting to %d\n", - fifo_name, np->full_name, DEFAULT_FIFO_SIZE); - - return DEFAULT_FIFO_SIZE; -} - -#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \ - ((u32)(_base) + sizeof(struct mpc52xx_psc))) - /* Init PSC FIFO Controller */ static int __init mpc512x_psc_fifoc_init(void) { struct device_node *np; - void __iomem *psc; - unsigned int tx_fifo_size; - unsigned int rx_fifo_size; - int fifobase = 0; /* current fifo address in 32 bit words */ np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-psc-fifo"); @@ -447,51 +423,6 @@ static int __init mpc512x_psc_fifoc_init(void) return -ENODEV; } - for_each_compatible_node(np, NULL, "fsl,mpc5121-psc-uart") { - tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size"); - rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size"); - - /* size in register is in 4 byte units */ - tx_fifo_size /= 4; - rx_fifo_size /= 4; - if (!tx_fifo_size) - tx_fifo_size = 1; - if (!rx_fifo_size) - rx_fifo_size = 1; - - psc = of_iomap(np, 0); - if (!psc) { - pr_err("%s: Can't map %s device\n", - __func__, np->full_name); - continue; - } - - /* FIFO space is 4KiB, check if requested size is available */ - if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) { - pr_err("%s: no fifo space available for %s\n", - __func__, np->full_name); - iounmap(psc); - /* - * chances are that another device requests less - * fifo space, so we continue. - */ - continue; - } - /* set tx and rx fifo size registers */ - out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size); - fifobase += tx_fifo_size; - out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size); - fifobase += rx_fifo_size; - - /* reset and enable the slices */ - out_be32(&FIFOC(psc)->txcmd, 0x80); - out_be32(&FIFOC(psc)->txcmd, 0x01); - out_be32(&FIFOC(psc)->rxcmd, 0x80); - out_be32(&FIFOC(psc)->rxcmd, 0x01); - - iounmap(psc); - } - return 0; } @@ -1295,14 +1226,14 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match) /* Check validity & presence */ for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) - if (mpc52xx_uart_nodes[idx] == op->node) + if (mpc52xx_uart_nodes[idx] == op->dev.of_node) break; if (idx >= MPC52xx_PSC_MAXNUM) return -EINVAL; pr_debug("Found %s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[idx]->full_name, idx); - uartclk = psc_ops->getuartclk(op->node); + uartclk = psc_ops->getuartclk(op->dev.of_node); if (uartclk == 0) { dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); return -EINVAL; @@ -1322,7 +1253,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match) port->dev = &op->dev; /* Search for IRQ and mapbase */ - ret = of_address_to_resource(op->node, 0, &res); + ret = of_address_to_resource(op->dev.of_node, 0, &res); if (ret) return ret; @@ -1332,7 +1263,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match) return -EINVAL; } - psc_ops->get_irq(port, op->node); + psc_ops->get_irq(port, op->dev.of_node); if (port->irq == NO_IRQ) { dev_dbg(&op->dev, "Could not get irq\n"); return -EINVAL; @@ -1431,15 +1362,16 @@ mpc52xx_uart_of_enumerate(void) MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); static struct of_platform_driver mpc52xx_uart_of_driver = { - .match_table = mpc52xx_uart_of_match, .probe = mpc52xx_uart_of_probe, .remove = mpc52xx_uart_of_remove, #ifdef CONFIG_PM .suspend = mpc52xx_uart_of_suspend, .resume = mpc52xx_uart_of_resume, #endif - .driver = { - .name = "mpc52xx-psc-uart", + .driver = { + .name = "mpc52xx-psc-uart", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_uart_of_match, }, }; diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c index e1ab8ec0a4a6..3c02fa96f282 100644 --- a/drivers/serial/nwpserial.c +++ b/drivers/serial/nwpserial.c @@ -344,7 +344,7 @@ int nwpserial_register_port(struct uart_port *port) mutex_lock(&nwpserial_mutex); - dn = to_of_device(port->dev)->node; + dn = to_of_device(port->dev)->dev.of_node; if (dn == NULL) goto out; diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 4abfebdb0fcc..a48d9080f552 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -31,7 +31,7 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev, int type, struct uart_port *port) { struct resource resource; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; const unsigned int *clk, *spd; const u32 *prop; int ret, prop_size; @@ -88,7 +88,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev, int port_type; int ret; - if (of_find_property(ofdev->node, "used-by-rtas", NULL)) + if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) return -EBUSY; info = kmalloc(sizeof(*info), GFP_KERNEL); @@ -175,11 +175,13 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = { }; static struct of_platform_driver of_platform_serial_driver = { - .owner = THIS_MODULE, - .name = "of_serial", + .driver = { + .name = "of_serial", + .owner = THIS_MODULE, + .of_match_table = of_platform_serial_table, + }, .probe = of_platform_serial_probe, .remove = of_platform_serial_remove, - .match_table = of_platform_serial_table, }; static int __init of_platform_serial_init(void) diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index 700e10833bf9..cabbdc7ba583 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c @@ -1611,7 +1611,7 @@ static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match) /* Iterate the pmz_ports array to find a matching entry */ for (i = 0; i < MAX_ZS_PORTS; i++) - if (pmz_ports[i].node == mdev->ofdev.node) { + if (pmz_ports[i].node == mdev->ofdev.dev.of_node) { struct uart_pmac_port *uap = &pmz_ports[i]; uap->dev = mdev; diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c index 8dc03837617b..4a789e5361a4 100644 --- a/drivers/serial/s5pv210.c +++ b/drivers/serial/s5pv210.c @@ -119,7 +119,7 @@ static int s5p_serial_probe(struct platform_device *pdev) return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]); } -static struct platform_driver s5p_serial_drv = { +static struct platform_driver s5p_serial_driver = { .probe = s5p_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -130,19 +130,19 @@ static struct platform_driver s5p_serial_drv = { static int __init s5pv210_serial_console_init(void) { - return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf); + return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf); } console_initcall(s5pv210_serial_console_init); static int __init s5p_serial_init(void) { - return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf); + return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf); } static void __exit s5p_serial_exit(void) { - platform_driver_unregister(&s5p_serial_drv); + platform_driver_unregister(&s5p_serial_driver); } module_init(s5p_serial_init); diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index aaef223df676..c291b3add1d2 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -1025,8 +1025,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) s->chan_rx = NULL; s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; dma_release_channel(chan); - dma_free_coherent(port->dev, s->buf_len_rx * 2, - sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); + if (sg_dma_address(&s->sg_rx[0])) + dma_free_coherent(port->dev, s->buf_len_rx * 2, + sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); if (enable_pio) sci_start_rx(port); } diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index d14cca7fb88d..890f91742962 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c @@ -565,7 +565,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m if (err) goto out_free_con_read_page; - sunserial_console_match(&sunhv_console, op->node, + sunserial_console_match(&sunhv_console, op->dev.of_node, &sunhv_reg, port->line, false); err = uart_add_one_port(&sunhv_reg, port); @@ -630,8 +630,11 @@ static const struct of_device_id hv_match[] = { MODULE_DEVICE_TABLE(of, hv_match); static struct of_platform_driver hv_driver = { - .name = "hv", - .match_table = hv_match, + .driver = { + .name = "hv", + .owner = THIS_MODULE, + .of_match_table = hv_match, + }, .probe = hv_probe, .remove = __devexit_p(hv_remove), }; diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index d2e0321049e2..5e81bc6b48b0 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options) printk("Console: ttyS%d (SAB82532)\n", (sunsab_reg.minor - 64) + con->index); - sunserial_console_termios(con, to_of_device(up->port.dev)->node); + sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node); switch (con->cflag & CBAUD) { case B150: baud = 150; break; @@ -1026,11 +1026,11 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id * if (err) goto out1; - sunserial_console_match(SUNSAB_CONSOLE(), op->node, + sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[0].port.line, false); - sunserial_console_match(SUNSAB_CONSOLE(), op->node, + sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node, &sunsab_reg, up[1].port.line, false); @@ -1093,8 +1093,11 @@ static const struct of_device_id sab_match[] = { MODULE_DEVICE_TABLE(of, sab_match); static struct of_platform_driver sab_driver = { - .name = "sab", - .match_table = sab_match, + .driver = { + .name = "sab", + .owner = THIS_MODULE, + .of_match_table = sab_match, + }, .probe = sab_probe, .remove = __devexit_p(sab_remove), }; diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 01f7731e59b8..234459c2f012 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -1200,7 +1200,7 @@ static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up) return -ENODEV; printk("%s: %s port at %llx, irq %u\n", - to_of_device(up->port.dev)->node->full_name, + to_of_device(up->port.dev)->dev.of_node->full_name, (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse", (unsigned long long) up->port.mapbase, up->port.irq); @@ -1352,7 +1352,7 @@ static int __init sunsu_console_setup(struct console *co, char *options) spin_lock_init(&port->lock); /* Get firmware console settings. */ - sunserial_console_termios(co, to_of_device(port->dev)->node); + sunserial_console_termios(co, to_of_device(port->dev)->dev.of_node); memset(&termios, 0, sizeof(struct ktermios)); termios.c_cflag = co->cflag; @@ -1409,7 +1409,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp) static int __devinit su_probe(struct of_device *op, const struct of_device_id *match) { static int inst; - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct uart_sunsu_port *up; struct resource *rp; enum su_type type; @@ -1539,8 +1539,11 @@ static const struct of_device_id su_match[] = { MODULE_DEVICE_TABLE(of, su_match); static struct of_platform_driver su_driver = { - .name = "su", - .match_table = su_match, + .driver = { + .name = "su", + .owner = THIS_MODULE, + .of_match_table = su_match, + }, .probe = su_probe, .remove = __devexit_p(su_remove), }; diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index 978b3cee02d7..f9a24f4ebb34 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -1230,7 +1230,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options) (sunzilog_reg.minor - 64) + con->index, con->index); /* Get firmware console settings. */ - sunserial_console_termios(con, to_of_device(up->port.dev)->node); + sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node); /* Firmware console speed is limited to 150-->38400 baud so * this hackish cflag thing is OK. @@ -1408,7 +1408,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m int keyboard_mouse = 0; int err; - if (of_find_property(op->node, "keyboard", NULL)) + if (of_find_property(op->dev.of_node, "keyboard", NULL)) keyboard_mouse = 1; /* uarts must come before keyboards/mice */ @@ -1465,7 +1465,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m sunzilog_init_hw(&up[1]); if (!keyboard_mouse) { - if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, + if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, &sunzilog_reg, up[0].port.line, false)) up->flags |= SUNZILOG_FLAG_IS_CONS; @@ -1475,7 +1475,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m rp, sizeof(struct zilog_layout)); return err; } - if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, + if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, &sunzilog_reg, up[1].port.line, false)) up->flags |= SUNZILOG_FLAG_IS_CONS; @@ -1541,8 +1541,11 @@ static const struct of_device_id zs_match[] = { MODULE_DEVICE_TABLE(of, zs_match); static struct of_platform_driver zs_driver = { - .name = "zs", - .match_table = zs_match, + .driver = { + .name = "zs", + .owner = THIS_MODULE, + .of_match_table = zs_match, + }, .probe = zs_probe, .remove = __devexit_p(zs_remove), }; diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index e6639a95d276..8acccd564378 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c @@ -591,15 +591,15 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match) dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match); - rc = of_address_to_resource(op->node, 0, &res); + rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); return rc; } - irq = irq_of_parse_and_map(op->node, 0); + irq = irq_of_parse_and_map(op->dev.of_node, 0); - id = of_get_property(op->node, "port-number", NULL); + id = of_get_property(op->dev.of_node, "port-number", NULL); return ulite_assign(&op->dev, id ? *id : -1, res.start, irq); } @@ -610,13 +610,12 @@ static int __devexit ulite_of_remove(struct of_device *op) } static struct of_platform_driver ulite_of_driver = { - .owner = THIS_MODULE, - .name = "uartlite", - .match_table = ulite_of_match, .probe = ulite_of_probe, .remove = __devexit_p(ulite_of_remove), .driver = { .name = "uartlite", + .owner = THIS_MODULE, + .of_match_table = ulite_of_match, }, }; diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c index 074904912f64..907b06f5c447 100644 --- a/drivers/serial/ucc_uart.c +++ b/drivers/serial/ucc_uart.c @@ -1197,7 +1197,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context) static int ucc_uart_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; const unsigned int *iprop; /* Integer OF properties */ const char *sprop; /* String OF properties */ struct uart_qe_port *qe_port = NULL; @@ -1486,9 +1486,11 @@ static struct of_device_id ucc_uart_match[] = { MODULE_DEVICE_TABLE(of, ucc_uart_match); static struct of_platform_driver ucc_uart_of_driver = { - .owner = THIS_MODULE, - .name = "ucc_uart", - .match_table = ucc_uart_match, + .driver = { + .name = "ucc_uart", + .owner = THIS_MODULE, + .of_match_table = ucc_uart_match, + }, .probe = ucc_uart_probe, .remove = ucc_uart_remove, }; diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c index 34aba30eb84b..f5b4ca581541 100644 --- a/drivers/sfi/sfi_acpi.c +++ b/drivers/sfi/sfi_acpi.c @@ -173,3 +173,44 @@ int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id, sfi_acpi_put_table(table); return ret; } + +static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct sfi_table_attr *tbl_attr = + container_of(bin_attr, struct sfi_table_attr, attr); + struct acpi_table_header *th = NULL; + struct sfi_table_key key; + ssize_t cnt; + + key.sig = tbl_attr->name; + key.oem_id = NULL; + key.oem_table_id = NULL; + + th = sfi_acpi_get_table(&key); + if (!th) + return 0; + + cnt = memory_read_from_buffer(buf, count, &offset, + th, th->length); + sfi_acpi_put_table(th); + + return cnt; +} + + +void __init sfi_acpi_sysfs_init(void) +{ + u32 tbl_cnt, i; + struct sfi_table_attr *tbl_attr; + + tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64); + for (i = 0; i < tbl_cnt; i++) { + tbl_attr = + sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]); + tbl_attr->attr.read = sfi_acpi_table_show; + } + + return; +} diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index b204a0929139..005195958647 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c @@ -67,6 +67,7 @@ #include <linux/acpi.h> #include <linux/init.h> #include <linux/sfi.h> +#include <linux/slab.h> #include "sfi_core.h" @@ -382,6 +383,102 @@ static __init int sfi_find_syst(void) return -1; } +static struct kobject *sfi_kobj; +static struct kobject *tables_kobj; + +static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct sfi_table_attr *tbl_attr = + container_of(bin_attr, struct sfi_table_attr, attr); + struct sfi_table_header *th = NULL; + struct sfi_table_key key; + ssize_t cnt; + + key.sig = tbl_attr->name; + key.oem_id = NULL; + key.oem_table_id = NULL; + + if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) { + th = sfi_get_table(&key); + if (!th) + return 0; + + cnt = memory_read_from_buffer(buf, count, &offset, + th, th->len); + sfi_put_table(th); + } else + cnt = memory_read_from_buffer(buf, count, &offset, + syst_va, syst_va->header.len); + + return cnt; +} + +struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa) +{ + struct sfi_table_attr *tbl_attr; + struct sfi_table_header *th; + int ret; + + tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL); + if (!tbl_attr) + return NULL; + + th = sfi_map_table(pa); + if (!th || !th->sig[0]) { + kfree(tbl_attr); + return NULL; + } + + sysfs_attr_init(&tbl_attr->attr.attr); + memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE); + + tbl_attr->attr.size = 0; + tbl_attr->attr.read = sfi_table_show; + tbl_attr->attr.attr.name = tbl_attr->name; + tbl_attr->attr.attr.mode = 0400; + + ret = sysfs_create_bin_file(tables_kobj, + &tbl_attr->attr); + if (ret) + kfree(tbl_attr); + + sfi_unmap_table(th); + return tbl_attr; +} + +static int __init sfi_sysfs_init(void) +{ + int tbl_cnt, i; + + if (sfi_disabled) + return 0; + + sfi_kobj = kobject_create_and_add("sfi", firmware_kobj); + if (!sfi_kobj) + return 0; + + tables_kobj = kobject_create_and_add("tables", sfi_kobj); + if (!tables_kobj) { + kobject_put(sfi_kobj); + return 0; + } + + sfi_sysfs_install_table(syst_pa); + + tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64); + + for (i = 0; i < tbl_cnt; i++) + sfi_sysfs_install_table(syst_va->pentry[i]); + + sfi_acpi_sysfs_init(); + kobject_uevent(sfi_kobj, KOBJ_ADD); + kobject_uevent(tables_kobj, KOBJ_ADD); + pr_info("SFI sysfs interfaces init success\n"); + return 0; +} + void __init sfi_init(void) { if (!acpi_disabled) @@ -390,7 +487,7 @@ void __init sfi_init(void) if (sfi_disabled) return; - pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n"); + pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n"); if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init()) disable_sfi(); @@ -414,3 +511,9 @@ void __init sfi_init_late(void) sfi_acpi_init(); } + +/* + * The reason we put it here becasue we need wait till the /sys/firmware + * is setup, then our interface can be registered in /sys/firmware/sfi + */ +core_initcall(sfi_sysfs_init); diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h index da82d39e104d..b7cf220d44ec 100644 --- a/drivers/sfi/sfi_core.h +++ b/drivers/sfi/sfi_core.h @@ -61,6 +61,12 @@ struct sfi_table_key{ char *oem_table_id; }; +/* sysfs interface */ +struct sfi_table_attr { + struct bin_attribute attr; + char name[8]; +}; + #define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL } extern int __init sfi_acpi_init(void); @@ -68,3 +74,5 @@ extern struct sfi_table_header *sfi_check_table(u64 paddr, struct sfi_table_key *key); struct sfi_table_header *sfi_get_table(struct sfi_table_key *key); extern void sfi_put_table(struct sfi_table_header *table); +extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa); +extern void __init sfi_acpi_sysfs_init(void); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f950b6316949..91c2f4f3af10 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -117,6 +117,16 @@ config SPI_DAVINCI help SPI master controller for DaVinci and DA8xx SPI modules. +config SPI_EP93XX + tristate "Cirrus Logic EP93xx SPI controller" + depends on ARCH_EP93XX + help + This enables using the Cirrus EP93xx SPI controller in master + mode. + + To compile this driver as a module, choose M here. The module will be + called ep93xx_spi. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GENERIC_GPIO @@ -165,6 +175,13 @@ config SPI_MPC52xx_PSC This enables using the Freescale MPC52xx Programmable Serial Controller in master SPI mode. +config SPI_MPC512x_PSC + tristate "Freescale MPC512x PSC SPI controller" + depends on SPI_MASTER && PPC_MPC512x + help + This enables using the Freescale MPC5121 Programmable Serial + Controller in SPI master mode. + config SPI_MPC8xxx tristate "Freescale MPC8xxx SPI controller" depends on FSL_SOC diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index d7d0f89b797b..e9cbd18217a0 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o +obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o obj-$(CONFIG_SPI_GPIO) += spi_gpio.o obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o @@ -30,6 +31,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o obj-$(CONFIG_SPI_ORION) += orion_spi.o obj-$(CONFIG_SPI_PL022) += amba-pl022.o +obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index e9aeee16d922..f0a1418ce660 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -102,13 +102,21 @@ /* * SSP Control Register 0 - SSP_CR0 */ -#define SSP_CR0_MASK_DSS (0x1FUL << 0) -#define SSP_CR0_MASK_HALFDUP (0x1UL << 5) +#define SSP_CR0_MASK_DSS (0x0FUL << 0) +#define SSP_CR0_MASK_FRF (0x3UL << 4) #define SSP_CR0_MASK_SPO (0x1UL << 6) #define SSP_CR0_MASK_SPH (0x1UL << 7) #define SSP_CR0_MASK_SCR (0xFFUL << 8) -#define SSP_CR0_MASK_CSS (0x1FUL << 16) -#define SSP_CR0_MASK_FRF (0x3UL << 21) + +/* + * The ST version of this block moves som bits + * in SSP_CR0 and extends it to 32 bits + */ +#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) +#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) +#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) +#define SSP_CR0_MASK_FRF_ST (0x3UL << 21) + /* * SSP Control Register 0 - SSP_CR1 @@ -117,16 +125,18 @@ #define SSP_CR1_MASK_SSE (0x1UL << 1) #define SSP_CR1_MASK_MS (0x1UL << 2) #define SSP_CR1_MASK_SOD (0x1UL << 3) -#define SSP_CR1_MASK_RENDN (0x1UL << 4) -#define SSP_CR1_MASK_TENDN (0x1UL << 5) -#define SSP_CR1_MASK_MWAIT (0x1UL << 6) -#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7) -#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10) /* - * SSP Data Register - SSP_DR + * The ST version of this block adds some bits + * in SSP_CR1 */ -#define SSP_DR_MASK_DATA 0xFFFFFFFF +#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) +#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) +#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) +#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) +#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) +/* This one is only in the PL023 variant */ +#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) /* * SSP Status Register - SSP_SR @@ -134,7 +144,7 @@ #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ -#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ +#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ /* @@ -227,7 +237,7 @@ /* * SSP Test Data Register - SSP_TDR */ -#define TDR_MASK_TESTDATA (0xFFFFFFFF) +#define TDR_MASK_TESTDATA (0xFFFFFFFF) /* * Message State @@ -235,33 +245,33 @@ * hold a single state value, that's why all this * (void *) casting is done here. */ -#define STATE_START ((void *) 0) -#define STATE_RUNNING ((void *) 1) -#define STATE_DONE ((void *) 2) -#define STATE_ERROR ((void *) -1) +#define STATE_START ((void *) 0) +#define STATE_RUNNING ((void *) 1) +#define STATE_DONE ((void *) 2) +#define STATE_ERROR ((void *) -1) /* * Queue State */ -#define QUEUE_RUNNING (0) -#define QUEUE_STOPPED (1) +#define QUEUE_RUNNING (0) +#define QUEUE_STOPPED (1) /* * SSP State - Whether Enabled or Disabled */ -#define SSP_DISABLED (0) -#define SSP_ENABLED (1) +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) /* * SSP DMA State - Whether DMA Enabled or Disabled */ -#define SSP_DMA_DISABLED (0) -#define SSP_DMA_ENABLED (1) +#define SSP_DMA_DISABLED (0) +#define SSP_DMA_ENABLED (1) /* * SSP Clock Defaults */ -#define NMDK_SSP_DEFAULT_CLKRATE 0x2 -#define NMDK_SSP_DEFAULT_PRESCALE 0x40 +#define SSP_DEFAULT_CLKRATE 0x2 +#define SSP_DEFAULT_PRESCALE 0x40 /* * SSP Clock Parameter ranges @@ -307,16 +317,22 @@ enum ssp_writing { * @fifodepth: depth of FIFOs (both) * @max_bpw: maximum number of bits per word * @unidir: supports unidirection transfers + * @extended_cr: 32 bit wide control register 0 with extra + * features and extra features in CR1 as found in the ST variants + * @pl023: supports a subset of the ST extensions called "PL023" */ struct vendor_data { int fifodepth; int max_bpw; bool unidir; + bool extended_cr; + bool pl023; }; /** * struct pl022 - This is the private SSP driver data structure * @adev: AMBA device model hookup + * @vendor: Vendor data for the IP block * @phybase: The physical memory where the SSP device resides * @virtbase: The virtual memory where the SSP is mapped * @master: SPI framework hookup @@ -369,7 +385,8 @@ struct pl022 { /** * struct chip_data - To maintain runtime state of SSP for each client chip - * @cr0: Value of control register CR0 of SSP + * @cr0: Value of control register CR0 of SSP - on later ST variants this + * register is 32 bits wide rather than just 16 * @cr1: Value of control register CR1 of SSP * @dmacr: Value of DMA control Register of SSP * @cpsr: Value of Clock prescale register @@ -384,7 +401,7 @@ struct pl022 { * This would be set according to the current message that would be served */ struct chip_data { - u16 cr0; + u32 cr0; u16 cr1; u16 dmacr; u16 cpsr; @@ -517,7 +534,10 @@ static void restore_state(struct pl022 *pl022) { struct chip_data *chip = pl022->cur_chip; - writew(chip->cr0, SSP_CR0(pl022->virtbase)); + if (pl022->vendor->extended_cr) + writel(chip->cr0, SSP_CR0(pl022->virtbase)); + else + writew(chip->cr0, SSP_CR0(pl022->virtbase)); writew(chip->cr1, SSP_CR1(pl022->virtbase)); writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); @@ -525,38 +545,70 @@ static void restore_state(struct pl022 *pl022) writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } -/** - * load_ssp_default_config - Load default configuration for SSP - * @pl022: SSP driver private data structure - */ - /* * Default SSP Register Values */ #define DEFAULT_SSP_REG_CR0 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ - GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ - GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ - GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +/* ST versions have slightly different bit layout */ +#define DEFAULT_SSP_REG_CR0_ST ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ + GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ +) + +/* The PL023 version is slightly different again */ +#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) #define DEFAULT_SSP_REG_CR1 ( \ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ +) + +/* ST versions extend this register to use all 16 bits */ +#define DEFAULT_SSP_REG_CR1_ST ( \ + DEFAULT_SSP_REG_CR1 | \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ +) + +/* + * The PL023 variant has further differences: no loopback mode, no microwire + * support, and a new clock feedback delay setting. + */ +#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ - GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \ - GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \ - GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\ - GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \ - GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \ + GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ + GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ + GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ + GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ + GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ ) #define DEFAULT_SSP_REG_CPSR ( \ - GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ + GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ ) #define DEFAULT_SSP_REG_DMACR (\ @@ -564,11 +616,22 @@ static void restore_state(struct pl022 *pl022) GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ ) - +/** + * load_ssp_default_config - Load default configuration for SSP + * @pl022: SSP driver private data structure + */ static void load_ssp_default_config(struct pl022 *pl022) { - writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); - writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + if (pl022->vendor->pl023) { + writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); + } else if (pl022->vendor->extended_cr) { + writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); + } else { + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); + } writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); @@ -1008,7 +1071,7 @@ static void do_polling_transfer(void *data) writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n"); + dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); /* FIXME: insert a timeout so we don't hang here indefinately */ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) readwriter(pl022); @@ -1148,7 +1211,6 @@ static int stop_queue(struct pl022 *pl022) * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - pl022->run = QUEUE_STOPPED; while (!list_empty(&pl022->queue) && pl022->busy && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); @@ -1157,6 +1219,7 @@ static int stop_queue(struct pl022 *pl022) if (!list_empty(&pl022->queue) || pl022->busy) status = -EBUSY; + else pl022->run = QUEUE_STOPPED; spin_unlock_irqrestore(&pl022->queue_lock, flags); @@ -1280,11 +1343,21 @@ static int verify_controller_parameters(struct pl022 *pl022, "Wait State is configured incorrectly\n"); return -EINVAL; } - if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - && (chip_info->duplex != - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { - dev_err(chip_info->dev, - "DUPLEX is configured incorrectly\n"); + /* Half duplex is only available in the ST Micro version */ + if (pl022->vendor->extended_cr) { + if ((chip_info->duplex != + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + && (chip_info->duplex != + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) + dev_err(chip_info->dev, + "Microwire duplex mode is configured incorrectly\n"); + return -EINVAL; + } else { + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) + dev_err(chip_info->dev, + "Microwire half duplex mode requested," + " but this is only available in the" + " ST version of PL022\n"); return -EINVAL; } } @@ -1581,22 +1654,49 @@ static int pl022_setup(struct spi_device *spi) chip->cpsr = chip_info->clk_freq.cpsdvsr; - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0); - SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5); + /* Special setup for the ST micro extended control registers */ + if (pl022->vendor->extended_cr) { + if (pl022->vendor->pl023) { + /* These bits are only in the PL023 */ + SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, + SSP_CR1_MASK_FBCLKDEL_ST, 13); + } else { + /* These bits are in the PL022 but not PL023 */ + SSP_WRITE_BITS(chip->cr0, chip_info->duplex, + SSP_CR0_MASK_HALFDUP_ST, 5); + SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, + SSP_CR0_MASK_CSS_ST, 16); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF_ST, 21); + SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, + SSP_CR1_MASK_MWAIT_ST, 6); + } + SSP_WRITE_BITS(chip->cr0, chip_info->data_size, + SSP_CR0_MASK_DSS_ST, 0); + SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, + SSP_CR1_MASK_RENDN_ST, 4); + SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, + SSP_CR1_MASK_TENDN_ST, 5); + SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, + SSP_CR1_MASK_RXIFLSEL_ST, 7); + SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, + SSP_CR1_MASK_TXIFLSEL_ST, 10); + } else { + SSP_WRITE_BITS(chip->cr0, chip_info->data_size, + SSP_CR0_MASK_DSS, 0); + SSP_WRITE_BITS(chip->cr0, chip_info->iface, + SSP_CR0_MASK_FRF, 4); + } + /* Stuff that is common for all versions */ SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); - SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16); - SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21); - SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); + /* Loopback is available on all versions except PL023 */ + if (!pl022->vendor->pl023) + SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5); - SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6); - SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7); - SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10); /* Save controller_state */ spi_set_ctldata(spi, chip); @@ -1809,6 +1909,8 @@ static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, + .extended_cr = false, + .pl023 = false, }; @@ -1816,6 +1918,16 @@ static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, + .extended_cr = true, + .pl023 = false, +}; + +static struct vendor_data vendor_st_pl023 = { + .fifodepth = 32, + .max_bpw = 32, + .unidir = false, + .extended_cr = true, + .pl023 = true, }; static struct amba_id pl022_ids[] = { @@ -1837,6 +1949,18 @@ static struct amba_id pl022_ids[] = { .mask = 0xffffffff, .data = &vendor_st, }, + { + /* + * ST-Ericsson derivative "PL023" (this is not + * an official ARM number), this is a PL022 SSP block + * stripped to SPI mode only, it has 32bit wide + * and 32 locations deep TX/RX FIFO but no extended + * CR0/CR1 register + */ + .id = 0x00080023, + .mask = 0xffffffff, + .data = &vendor_st_pl023, + }, { 0, 0 }, }; diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index 95afb6b77395..b85090caf7cf 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c @@ -301,7 +301,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, struct davinci_spi *davinci_spi; struct davinci_spi_platform_data *pdata; u8 bits_per_word = 0; - u32 hz = 0, prescale; + u32 hz = 0, prescale = 0, clkspeed; davinci_spi = spi_master_get_devdata(spi->master); pdata = davinci_spi->pdata; @@ -338,10 +338,16 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, spi->chip_select); - prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; + clkspeed = clk_get_rate(davinci_spi->clk); + if (hz > clkspeed / 2) + prescale = 1 << 8; + if (hz < clkspeed / 256) + prescale = 255 << 8; + if (!prescale) + prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00; clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); - set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); + set_fmt_bits(davinci_spi->base, prescale, spi->chip_select); return 0; } diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c new file mode 100644 index 000000000000..0ba35df9a6df --- /dev/null +++ b/drivers/spi/ep93xx_spi.c @@ -0,0 +1,938 @@ +/* + * Driver for Cirrus Logic EP93xx SPI controller. + * + * Copyright (c) 2010 Mika Westerberg + * + * Explicit FIFO handling code was inspired by amba-pl022 driver. + * + * Chip select support using other than built-in GPIOs by H. Hartley Sweeten. + * + * For more information about the SPI controller see documentation on Cirrus + * Logic web site: + * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/spi/spi.h> + +#include <mach/ep93xx_spi.h> + +#define SSPCR0 0x0000 +#define SSPCR0_MODE_SHIFT 6 +#define SSPCR0_SCR_SHIFT 8 + +#define SSPCR1 0x0004 +#define SSPCR1_RIE BIT(0) +#define SSPCR1_TIE BIT(1) +#define SSPCR1_RORIE BIT(2) +#define SSPCR1_LBM BIT(3) +#define SSPCR1_SSE BIT(4) +#define SSPCR1_MS BIT(5) +#define SSPCR1_SOD BIT(6) + +#define SSPDR 0x0008 + +#define SSPSR 0x000c +#define SSPSR_TFE BIT(0) +#define SSPSR_TNF BIT(1) +#define SSPSR_RNE BIT(2) +#define SSPSR_RFF BIT(3) +#define SSPSR_BSY BIT(4) +#define SSPCPSR 0x0010 + +#define SSPIIR 0x0014 +#define SSPIIR_RIS BIT(0) +#define SSPIIR_TIS BIT(1) +#define SSPIIR_RORIS BIT(2) +#define SSPICR SSPIIR + +/* timeout in milliseconds */ +#define SPI_TIMEOUT 5 +/* maximum depth of RX/TX FIFO */ +#define SPI_FIFO_SIZE 8 + +/** + * struct ep93xx_spi - EP93xx SPI controller structure + * @lock: spinlock that protects concurrent accesses to fields @running, + * @current_msg and @msg_queue + * @pdev: pointer to platform device + * @clk: clock for the controller + * @regs_base: pointer to ioremap()'d registers + * @irq: IRQ number used by the driver + * @min_rate: minimum clock rate (in Hz) supported by the controller + * @max_rate: maximum clock rate (in Hz) supported by the controller + * @running: is the queue running + * @wq: workqueue used by the driver + * @msg_work: work that is queued for the driver + * @wait: wait here until given transfer is completed + * @msg_queue: queue for the messages + * @current_msg: message that is currently processed (or %NULL if none) + * @tx: current byte in transfer to transmit + * @rx: current byte in transfer to receive + * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one + * frame decreases this level and sending one frame increases it. + * + * This structure holds EP93xx SPI controller specific information. When + * @running is %true, driver accepts transfer requests from protocol drivers. + * @current_msg is used to hold pointer to the message that is currently + * processed. If @current_msg is %NULL, it means that no processing is going + * on. + * + * Most of the fields are only written once and they can be accessed without + * taking the @lock. Fields that are accessed concurrently are: @current_msg, + * @running, and @msg_queue. + */ +struct ep93xx_spi { + spinlock_t lock; + const struct platform_device *pdev; + struct clk *clk; + void __iomem *regs_base; + int irq; + unsigned long min_rate; + unsigned long max_rate; + bool running; + struct workqueue_struct *wq; + struct work_struct msg_work; + struct completion wait; + struct list_head msg_queue; + struct spi_message *current_msg; + size_t tx; + size_t rx; + size_t fifo_level; +}; + +/** + * struct ep93xx_spi_chip - SPI device hardware settings + * @spi: back pointer to the SPI device + * @rate: max rate in hz this chip supports + * @div_cpsr: cpsr (pre-scaler) divider + * @div_scr: scr divider + * @dss: bits per word (4 - 16 bits) + * @ops: private chip operations + * + * This structure is used to store hardware register specific settings for each + * SPI device. Settings are written to hardware by function + * ep93xx_spi_chip_setup(). + */ +struct ep93xx_spi_chip { + const struct spi_device *spi; + unsigned long rate; + u8 div_cpsr; + u8 div_scr; + u8 dss; + struct ep93xx_spi_chip_ops *ops; +}; + +/* converts bits per word to CR0.DSS value */ +#define bits_per_word_to_dss(bpw) ((bpw) - 1) + +static inline void +ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) +{ + __raw_writeb(value, espi->regs_base + reg); +} + +static inline u8 +ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readb(spi->regs_base + reg); +} + +static inline void +ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) +{ + __raw_writew(value, espi->regs_base + reg); +} + +static inline u16 +ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) +{ + return __raw_readw(spi->regs_base + reg); +} + +static int ep93xx_spi_enable(const struct ep93xx_spi *espi) +{ + u8 regval; + int err; + + err = clk_enable(espi->clk); + if (err) + return err; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + return 0; +} + +static void ep93xx_spi_disable(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~SSPCR1_SSE; + ep93xx_spi_write_u8(espi, SSPCR1, regval); + + clk_disable(espi->clk); +} + +static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) +{ + u8 regval; + + regval = ep93xx_spi_read_u8(espi, SSPCR1); + regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); + ep93xx_spi_write_u8(espi, SSPCR1, regval); +} + +/** + * ep93xx_spi_calc_divisors() - calculates SPI clock divisors + * @espi: ep93xx SPI controller struct + * @chip: divisors are calculated for this chip + * @rate: desired SPI output clock rate + * + * Function calculates cpsr (clock pre-scaler) and scr divisors based on + * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, + * for some reason, divisors cannot be calculated nothing is stored and + * %-EINVAL is returned. + */ +static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, + struct ep93xx_spi_chip *chip, + unsigned long rate) +{ + unsigned long spi_clk_rate = clk_get_rate(espi->clk); + int cpsr, scr; + + /* + * Make sure that max value is between values supported by the + * controller. Note that minimum value is already checked in + * ep93xx_spi_transfer(). + */ + rate = clamp(rate, espi->min_rate, espi->max_rate); + + /* + * Calculate divisors so that we can get speed according the + * following formula: + * rate = spi_clock_rate / (cpsr * (1 + scr)) + * + * cpsr must be even number and starts from 2, scr can be any number + * between 0 and 255. + */ + for (cpsr = 2; cpsr <= 254; cpsr += 2) { + for (scr = 0; scr <= 255; scr++) { + if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { + chip->div_scr = (u8)scr; + chip->div_cpsr = (u8)cpsr; + return 0; + } + } + } + + return -EINVAL; +} + +static void ep93xx_spi_cs_control(struct spi_device *spi, bool control) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(spi); + int value = (spi->mode & SPI_CS_HIGH) ? control : !control; + + if (chip->ops && chip->ops->cs_control) + chip->ops->cs_control(spi, value); +} + +/** + * ep93xx_spi_setup() - setup an SPI device + * @spi: SPI device to setup + * + * This function sets up SPI device mode, speed etc. Can be called multiple + * times for a single device. Returns %0 in case of success, negative error in + * case of failure. When this function returns success, the device is + * deselected. + */ +static int ep93xx_spi_setup(struct spi_device *spi) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct ep93xx_spi_chip *chip; + + if (spi->bits_per_word < 4 || spi->bits_per_word > 16) { + dev_err(&espi->pdev->dev, "invalid bits per word %d\n", + spi->bits_per_word); + return -EINVAL; + } + + chip = spi_get_ctldata(spi); + if (!chip) { + dev_dbg(&espi->pdev->dev, "initial setup for %s\n", + spi->modalias); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->spi = spi; + chip->ops = spi->controller_data; + + if (chip->ops && chip->ops->setup) { + int ret = chip->ops->setup(spi); + if (ret) { + kfree(chip); + return ret; + } + } + + spi_set_ctldata(spi, chip); + } + + if (spi->max_speed_hz != chip->rate) { + int err; + + err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); + if (err != 0) { + spi_set_ctldata(spi, NULL); + kfree(chip); + return err; + } + chip->rate = spi->max_speed_hz; + } + + chip->dss = bits_per_word_to_dss(spi->bits_per_word); + + ep93xx_spi_cs_control(spi, false); + return 0; +} + +/** + * ep93xx_spi_transfer() - queue message to be transferred + * @spi: target SPI device + * @msg: message to be transferred + * + * This function is called by SPI device drivers when they are going to transfer + * a new message. It simply puts the message in the queue and schedules + * workqueue to perform the actual transfer later on. + * + * Returns %0 on success and negative error in case of failure. + */ +static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); + struct spi_transfer *t; + unsigned long flags; + + if (!msg || !msg->complete) + return -EINVAL; + + /* first validate each transfer */ + list_for_each_entry(t, &msg->transfers, transfer_list) { + if (t->bits_per_word) { + if (t->bits_per_word < 4 || t->bits_per_word > 16) + return -EINVAL; + } + if (t->speed_hz && t->speed_hz < espi->min_rate) + return -EINVAL; + } + + /* + * Now that we own the message, let's initialize it so that it is + * suitable for us. We use @msg->status to signal whether there was + * error in transfer and @msg->state is used to hold pointer to the + * current transfer (or %NULL if no active current transfer). + */ + msg->state = NULL; + msg->status = 0; + msg->actual_length = 0; + + spin_lock_irqsave(&espi->lock, flags); + if (!espi->running) { + spin_unlock_irqrestore(&espi->lock, flags); + return -ESHUTDOWN; + } + list_add_tail(&msg->queue, &espi->msg_queue); + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irqrestore(&espi->lock, flags); + + return 0; +} + +/** + * ep93xx_spi_cleanup() - cleans up master controller specific state + * @spi: SPI device to cleanup + * + * This function releases master controller specific state for given @spi + * device. + */ +static void ep93xx_spi_cleanup(struct spi_device *spi) +{ + struct ep93xx_spi_chip *chip; + + chip = spi_get_ctldata(spi); + if (chip) { + if (chip->ops && chip->ops->cleanup) + chip->ops->cleanup(spi); + spi_set_ctldata(spi, NULL); + kfree(chip); + } +} + +/** + * ep93xx_spi_chip_setup() - configures hardware according to given @chip + * @espi: ep93xx SPI controller struct + * @chip: chip specific settings + * + * This function sets up the actual hardware registers with settings given in + * @chip. Note that no validation is done so make sure that callers validate + * settings before calling this. + */ +static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, + const struct ep93xx_spi_chip *chip) +{ + u16 cr0; + + cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; + cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; + cr0 |= chip->dss; + + dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", + chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); + dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); + + ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); + ep93xx_spi_write_u16(espi, SSPCR0, cr0); +} + +static inline int bits_per_word(const struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word; +} + +static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u16 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u16(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } else { + u8 tx_val = 0; + + if (t->tx_buf) + tx_val = ((u8 *)t->tx_buf)[espi->tx]; + ep93xx_spi_write_u8(espi, SSPDR, tx_val); + espi->tx += sizeof(tx_val); + } +} + +static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) +{ + if (bits_per_word(espi) > 8) { + u16 rx_val; + + rx_val = ep93xx_spi_read_u16(espi, SSPDR); + if (t->rx_buf) + ((u16 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } else { + u8 rx_val; + + rx_val = ep93xx_spi_read_u8(espi, SSPDR); + if (t->rx_buf) + ((u8 *)t->rx_buf)[espi->rx] = rx_val; + espi->rx += sizeof(rx_val); + } +} + +/** + * ep93xx_spi_read_write() - perform next RX/TX transfer + * @espi: ep93xx SPI controller struct + * + * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If + * called several times, the whole transfer will be completed. Returns + * %-EINPROGRESS when current transfer was not yet completed otherwise %0. + * + * When this function is finished, RX FIFO should be empty and TX FIFO should be + * full. + */ +static int ep93xx_spi_read_write(struct ep93xx_spi *espi) +{ + struct spi_message *msg = espi->current_msg; + struct spi_transfer *t = msg->state; + + /* read as long as RX FIFO has frames in it */ + while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) { + ep93xx_do_read(espi, t); + espi->fifo_level--; + } + + /* write as long as TX FIFO has room */ + while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { + ep93xx_do_write(espi, t); + espi->fifo_level++; + } + + if (espi->rx == t->len) { + msg->actual_length += t->len; + return 0; + } + + return -EINPROGRESS; +} + +/** + * ep93xx_spi_process_transfer() - processes one SPI transfer + * @espi: ep93xx SPI controller struct + * @msg: current message + * @t: transfer to process + * + * This function processes one SPI transfer given in @t. Function waits until + * transfer is complete (may sleep) and updates @msg->status based on whether + * transfer was succesfully processed or not. + */ +static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, + struct spi_message *msg, + struct spi_transfer *t) +{ + struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); + + msg->state = t; + + /* + * Handle any transfer specific settings if needed. We use + * temporary chip settings here and restore original later when + * the transfer is finished. + */ + if (t->speed_hz || t->bits_per_word) { + struct ep93xx_spi_chip tmp_chip = *chip; + + if (t->speed_hz) { + int err; + + err = ep93xx_spi_calc_divisors(espi, &tmp_chip, + t->speed_hz); + if (err) { + dev_err(&espi->pdev->dev, + "failed to adjust speed\n"); + msg->status = err; + return; + } + } + + if (t->bits_per_word) + tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); + + /* + * Set up temporary new hw settings for this transfer. + */ + ep93xx_spi_chip_setup(espi, &tmp_chip); + } + + espi->rx = 0; + espi->tx = 0; + + /* + * Now everything is set up for the current transfer. We prime the TX + * FIFO, enable interrupts, and wait for the transfer to complete. + */ + if (ep93xx_spi_read_write(espi)) { + ep93xx_spi_enable_interrupts(espi); + wait_for_completion(&espi->wait); + } + + /* + * In case of error during transmit, we bail out from processing + * the message. + */ + if (msg->status) + return; + + /* + * After this transfer is finished, perform any possible + * post-transfer actions requested by the protocol driver. + */ + if (t->delay_usecs) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(t->delay_usecs)); + } + if (t->cs_change) { + if (!list_is_last(&t->transfer_list, &msg->transfers)) { + /* + * In case protocol driver is asking us to drop the + * chipselect briefly, we let the scheduler to handle + * any "delay" here. + */ + ep93xx_spi_cs_control(msg->spi, false); + cond_resched(); + ep93xx_spi_cs_control(msg->spi, true); + } + } + + if (t->speed_hz || t->bits_per_word) + ep93xx_spi_chip_setup(espi, chip); +} + +/* + * ep93xx_spi_process_message() - process one SPI message + * @espi: ep93xx SPI controller struct + * @msg: message to process + * + * This function processes a single SPI message. We go through all transfers in + * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is + * asserted during the whole message (unless per transfer cs_change is set). + * + * @msg->status contains %0 in case of success or negative error code in case of + * failure. + */ +static void ep93xx_spi_process_message(struct ep93xx_spi *espi, + struct spi_message *msg) +{ + unsigned long timeout; + struct spi_transfer *t; + int err; + + /* + * Enable the SPI controller and its clock. + */ + err = ep93xx_spi_enable(espi); + if (err) { + dev_err(&espi->pdev->dev, "failed to enable SPI controller\n"); + msg->status = err; + return; + } + + /* + * Just to be sure: flush any data from RX FIFO. + */ + timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); + while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) { + if (time_after(jiffies, timeout)) { + dev_warn(&espi->pdev->dev, + "timeout while flushing RX FIFO\n"); + msg->status = -ETIMEDOUT; + return; + } + ep93xx_spi_read_u16(espi, SSPDR); + } + + /* + * We explicitly handle FIFO level. This way we don't have to check TX + * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. + */ + espi->fifo_level = 0; + + /* + * Update SPI controller registers according to spi device and assert + * the chipselect. + */ + ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); + ep93xx_spi_cs_control(msg->spi, true); + + list_for_each_entry(t, &msg->transfers, transfer_list) { + ep93xx_spi_process_transfer(espi, msg, t); + if (msg->status) + break; + } + + /* + * Now the whole message is transferred (or failed for some reason). We + * deselect the device and disable the SPI controller. + */ + ep93xx_spi_cs_control(msg->spi, false); + ep93xx_spi_disable(espi); +} + +#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) + +/** + * ep93xx_spi_work() - EP93xx SPI workqueue worker function + * @work: work struct + * + * Workqueue worker function. This function is called when there are new + * SPI messages to be processed. Message is taken out from the queue and then + * passed to ep93xx_spi_process_message(). + * + * After message is transferred, protocol driver is notified by calling + * @msg->complete(). In case of error, @msg->status is set to negative error + * number, otherwise it contains zero (and @msg->actual_length is updated). + */ +static void ep93xx_spi_work(struct work_struct *work) +{ + struct ep93xx_spi *espi = work_to_espi(work); + struct spi_message *msg; + + spin_lock_irq(&espi->lock); + if (!espi->running || espi->current_msg || + list_empty(&espi->msg_queue)) { + spin_unlock_irq(&espi->lock); + return; + } + msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); + list_del_init(&msg->queue); + espi->current_msg = msg; + spin_unlock_irq(&espi->lock); + + ep93xx_spi_process_message(espi, msg); + + /* + * Update the current message and re-schedule ourselves if there are + * more messages in the queue. + */ + spin_lock_irq(&espi->lock); + espi->current_msg = NULL; + if (espi->running && !list_empty(&espi->msg_queue)) + queue_work(espi->wq, &espi->msg_work); + spin_unlock_irq(&espi->lock); + + /* notify the protocol driver that we are done with this message */ + msg->complete(msg->context); +} + +static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) +{ + struct ep93xx_spi *espi = dev_id; + u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR); + + /* + * If we got ROR (receive overrun) interrupt we know that something is + * wrong. Just abort the message. + */ + if (unlikely(irq_status & SSPIIR_RORIS)) { + /* clear the overrun interrupt */ + ep93xx_spi_write_u8(espi, SSPICR, 0); + dev_warn(&espi->pdev->dev, + "receive overrun, aborting the message\n"); + espi->current_msg->status = -EIO; + } else { + /* + * Interrupt is either RX (RIS) or TX (TIS). For both cases we + * simply execute next data transfer. + */ + if (ep93xx_spi_read_write(espi)) { + /* + * In normal case, there still is some processing left + * for current transfer. Let's wait for the next + * interrupt then. + */ + return IRQ_HANDLED; + } + } + + /* + * Current transfer is finished, either with error or with success. In + * any case we disable interrupts and notify the worker to handle + * any post-processing of the message. + */ + ep93xx_spi_disable_interrupts(espi); + complete(&espi->wait); + return IRQ_HANDLED; +} + +static int __init ep93xx_spi_probe(struct platform_device *pdev) +{ + struct spi_master *master; + struct ep93xx_spi_info *info; + struct ep93xx_spi *espi; + struct resource *res; + int error; + + info = pdev->dev.platform_data; + + master = spi_alloc_master(&pdev->dev, sizeof(*espi)); + if (!master) { + dev_err(&pdev->dev, "failed to allocate spi master\n"); + return -ENOMEM; + } + + master->setup = ep93xx_spi_setup; + master->transfer = ep93xx_spi_transfer; + master->cleanup = ep93xx_spi_cleanup; + master->bus_num = pdev->id; + master->num_chipselect = info->num_chipselect; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + platform_set_drvdata(pdev, master); + + espi = spi_master_get_devdata(master); + + espi->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(espi->clk)) { + dev_err(&pdev->dev, "unable to get spi clock\n"); + error = PTR_ERR(espi->clk); + goto fail_release_master; + } + + spin_lock_init(&espi->lock); + init_completion(&espi->wait); + + /* + * Calculate maximum and minimum supported clock rates + * for the controller. + */ + espi->max_rate = clk_get_rate(espi->clk) / 2; + espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); + espi->pdev = pdev; + + espi->irq = platform_get_irq(pdev, 0); + if (espi->irq < 0) { + error = -EBUSY; + dev_err(&pdev->dev, "failed to get irq resources\n"); + goto fail_put_clock; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "unable to get iomem resource\n"); + error = -ENODEV; + goto fail_put_clock; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "unable to request iomem resources\n"); + error = -EBUSY; + goto fail_put_clock; + } + + espi->regs_base = ioremap(res->start, resource_size(res)); + if (!espi->regs_base) { + dev_err(&pdev->dev, "failed to map resources\n"); + error = -ENODEV; + goto fail_free_mem; + } + + error = request_irq(espi->irq, ep93xx_spi_interrupt, 0, + "ep93xx-spi", espi); + if (error) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto fail_unmap_regs; + } + + espi->wq = create_singlethread_workqueue("ep93xx_spid"); + if (!espi->wq) { + dev_err(&pdev->dev, "unable to create workqueue\n"); + goto fail_free_irq; + } + INIT_WORK(&espi->msg_work, ep93xx_spi_work); + INIT_LIST_HEAD(&espi->msg_queue); + espi->running = true; + + /* make sure that the hardware is disabled */ + ep93xx_spi_write_u8(espi, SSPCR1, 0); + + error = spi_register_master(master); + if (error) { + dev_err(&pdev->dev, "failed to register SPI master\n"); + goto fail_free_queue; + } + + dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", + (unsigned long)res->start, espi->irq); + + return 0; + +fail_free_queue: + destroy_workqueue(espi->wq); +fail_free_irq: + free_irq(espi->irq, espi); +fail_unmap_regs: + iounmap(espi->regs_base); +fail_free_mem: + release_mem_region(res->start, resource_size(res)); +fail_put_clock: + clk_put(espi->clk); +fail_release_master: + spi_master_put(master); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static int __exit ep93xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct ep93xx_spi *espi = spi_master_get_devdata(master); + struct resource *res; + + spin_lock_irq(&espi->lock); + espi->running = false; + spin_unlock_irq(&espi->lock); + + destroy_workqueue(espi->wq); + + /* + * Complete remaining messages with %-ESHUTDOWN status. + */ + spin_lock_irq(&espi->lock); + while (!list_empty(&espi->msg_queue)) { + struct spi_message *msg; + + msg = list_first_entry(&espi->msg_queue, + struct spi_message, queue); + list_del_init(&msg->queue); + msg->status = -ESHUTDOWN; + spin_unlock_irq(&espi->lock); + msg->complete(msg->context); + spin_lock_irq(&espi->lock); + } + spin_unlock_irq(&espi->lock); + + free_irq(espi->irq, espi); + iounmap(espi->regs_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + clk_put(espi->clk); + platform_set_drvdata(pdev, NULL); + + spi_unregister_master(master); + return 0; +} + +static struct platform_driver ep93xx_spi_driver = { + .driver = { + .name = "ep93xx-spi", + .owner = THIS_MODULE, + }, + .remove = __exit_p(ep93xx_spi_remove), +}; + +static int __init ep93xx_spi_init(void) +{ + return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe); +} +module_init(ep93xx_spi_init); + +static void __exit ep93xx_spi_exit(void) +{ + platform_driver_unregister(&ep93xx_spi_driver); +} +module_exit(ep93xx_spi_exit); + +MODULE_DESCRIPTION("EP93xx SPI Controller driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-spi"); diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c new file mode 100644 index 000000000000..28a126d2742b --- /dev/null +++ b/drivers/spi/mpc512x_psc_spi.c @@ -0,0 +1,576 @@ +/* + * MPC512x PSC in SPI mode driver. + * + * Copyright (C) 2007,2008 Freescale Semiconductor Inc. + * Original port from 52xx driver: + * Hongjun Chen <hong-jun.chen@freescale.com> + * + * Fork of mpc52xx_psc_spi.c: + * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/of_platform.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/spi/spi.h> +#include <linux/fsl_devices.h> +#include <asm/mpc52xx_psc.h> + +struct mpc512x_psc_spi { + void (*cs_control)(struct spi_device *spi, bool on); + u32 sysclk; + + /* driver internal data */ + struct mpc52xx_psc __iomem *psc; + struct mpc512x_psc_fifo __iomem *fifo; + unsigned int irq; + u8 bits_per_word; + u8 busy; + u32 mclk; + u8 eofbyte; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; /* Message queue lock */ + + struct completion done; +}; + +/* controller state */ +struct mpc512x_psc_spi_cs { + int bits_per_word; + int speed_hz; +}; + +/* set clock freq, clock ramp, bits per work + * if t is NULL then reset the values to the default values + */ +static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + + cs->speed_hz = (t && t->speed_hz) + ? t->speed_hz : spi->max_speed_hz; + cs->bits_per_word = (t && t->bits_per_word) + ? t->bits_per_word : spi->bits_per_word; + cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8; + return 0; +} + +static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sicr = in_be32(&psc->sicr); + + /* Set clock phase and polarity */ + if (spi->mode & SPI_CPHA) + sicr |= 0x00001000; + else + sicr &= ~0x00001000; + + if (spi->mode & SPI_CPOL) + sicr |= 0x00002000; + else + sicr &= ~0x00002000; + + if (spi->mode & SPI_LSB_FIRST) + sicr |= 0x10000000; + else + sicr &= ~0x10000000; + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + if (cs->speed_hz) + bclkdiv = (mps->mclk / cs->speed_hz) - 1; + else + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + mps->bits_per_word = cs->bits_per_word; + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); +} + +static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); + +} + +/* extract and scale size field in txsz or rxsz */ +#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2); + +#define EOFBYTE 1 + +static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + size_t len = t->len; + u8 *tx_buf = (u8 *)t->tx_buf; + u8 *rx_buf = (u8 *)t->rx_buf; + + if (!tx_buf && !rx_buf && t->len) + return -EINVAL; + + /* Zero MR2 */ + in_8(&psc->mode); + out_8(&psc->mode, 0x0); + + while (len) { + int count; + int i; + u8 data; + size_t fifosz; + int rxcount; + + /* + * The number of bytes that can be sent at a time + * depends on the fifo size. + */ + fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz)); + count = min(fifosz, len); + + for (i = count; i > 0; i--) { + data = tx_buf ? *tx_buf++ : 0; + if (len == EOFBYTE) + setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); + out_8(&fifo->txdata_8, data); + len--; + } + + INIT_COMPLETION(mps->done); + + /* interrupt on tx fifo empty */ + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); + + /* enable transmiter/receiver */ + out_8(&psc->command, + MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); + + wait_for_completion(&mps->done); + + mdelay(1); + + /* rx fifo should have count bytes in it */ + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) + mdelay(1); + + rxcount = in_be32(&fifo->rxcnt); + if (rxcount != count) { + dev_warn(&spi->dev, "expected %d bytes in rx fifo " + "but got %d\n", count, rxcount); + } + + rxcount = min(rxcount, count); + for (i = rxcount; i > 0; i--) { + data = in_8(&fifo->rxdata_8); + if (rx_buf) + *rx_buf++ = data; + } + while (in_be32(&fifo->rxcnt)) { + in_8(&fifo->rxdata_8); + } + + out_8(&psc->command, + MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + } + /* disable transmiter/receiver and fifo interrupt */ + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + out_be32(&fifo->tximr, 0); + return 0; +} + +static void mpc512x_psc_spi_work(struct work_struct *work) +{ + struct mpc512x_psc_spi *mps = container_of(work, + struct mpc512x_psc_spi, + work); + + spin_lock_irq(&mps->lock); + mps->busy = 1; + while (!list_empty(&mps->queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + unsigned cs_change; + int status; + + m = container_of(mps->queue.next, struct spi_message, queue); + list_del_init(&m->queue); + spin_unlock_irq(&mps->lock); + + spi = m->spi; + cs_change = 1; + status = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = mpc512x_psc_spi_transfer_setup(spi, t); + if (status < 0) + break; + } + + if (cs_change) + mpc512x_psc_spi_activate_cs(spi); + cs_change = t->cs_change; + + status = mpc512x_psc_spi_transfer_rxtx(spi, t); + if (status) + break; + m->actual_length += t->len; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + } + + m->status = status; + m->complete(m->context); + + if (status || !cs_change) + mpc512x_psc_spi_deactivate_cs(spi); + + mpc512x_psc_spi_transfer_setup(spi, NULL); + + spin_lock_irq(&mps->lock); + } + mps->busy = 0; + spin_unlock_irq(&mps->lock); +} + +static int mpc512x_psc_spi_setup(struct spi_device *spi) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + struct mpc512x_psc_spi_cs *cs = spi->controller_state; + unsigned long flags; + + if (spi->bits_per_word % 8) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + cs->bits_per_word = spi->bits_per_word; + cs->speed_hz = spi->max_speed_hz; + + spin_lock_irqsave(&mps->lock, flags); + if (!mps->busy) + mpc512x_psc_spi_deactivate_cs(spi); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static int mpc512x_psc_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mps->lock, flags); + list_add_tail(&m->queue, &mps->queue); + queue_work(mps->workqueue, &mps->work); + spin_unlock_irqrestore(&mps->lock, flags); + + return 0; +} + +static void mpc512x_psc_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +static int mpc512x_psc_spi_port_config(struct spi_master *master, + struct mpc512x_psc_spi *mps) +{ + struct mpc52xx_psc __iomem *psc = mps->psc; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + struct clk *spiclk; + int ret = 0; + char name[32]; + u32 sicr; + u32 ccr; + u16 bclkdiv; + + sprintf(name, "psc%d_mclk", master->bus_num); + spiclk = clk_get(&master->dev, name); + clk_enable(spiclk); + mps->mclk = clk_get_rate(spiclk); + clk_put(spiclk); + + /* Reset the PSC into a known state */ + out_8(&psc->command, MPC52xx_PSC_RST_RX); + out_8(&psc->command, MPC52xx_PSC_RST_TX); + out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); + + /* Disable psc interrupts all useful interrupts are in fifo */ + out_be16(&psc->isr_imr.imr, 0); + + /* Disable fifo interrupts, will be enabled later */ + out_be32(&fifo->tximr, 0); + out_be32(&fifo->rximr, 0); + + /* Setup fifo slice address and size */ + /*out_be32(&fifo->txsz, 0x0fe00004);*/ + /*out_be32(&fifo->rxsz, 0x0ff00004);*/ + + sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */ + 0x00800000 | /* GenClk = 1 -- internal clk */ + 0x00008000 | /* SPI = 1 */ + 0x00004000 | /* MSTR = 1 -- SPI master */ + 0x00000800; /* UseEOF = 1 -- SS low until EOF */ + + out_be32(&psc->sicr, sicr); + + ccr = in_be32(&psc->ccr); + ccr &= 0xFF000000; + bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ + ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); + out_be32(&psc->ccr, ccr); + + /* Set 2ms DTL delay */ + out_8(&psc->ctur, 0x00); + out_8(&psc->ctlr, 0x82); + + /* we don't use the alarms */ + out_be32(&fifo->rxalarm, 0xfff); + out_be32(&fifo->txalarm, 0); + + /* Enable FIFO slices for Rx/Tx */ + out_be32(&fifo->rxcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + out_be32(&fifo->txcmd, + MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA); + + mps->bits_per_word = 8; + + return ret; +} + +static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) +{ + struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id; + struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; + + /* clear interrupt and wake up the work queue */ + if (in_be32(&fifo->txisr) & + in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) { + out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); + out_be32(&fifo->tximr, 0); + complete(&mps->done); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +/* bus_num is used only for the case dev->platform_data == NULL */ +static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, + u32 size, unsigned int irq, + s16 bus_num) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct mpc512x_psc_spi *mps; + struct spi_master *master; + int ret; + void *tempp; + + master = spi_alloc_master(dev, sizeof *mps); + if (master == NULL) + return -ENOMEM; + + dev_set_drvdata(dev, master); + mps = spi_master_get_devdata(master); + mps->irq = irq; + + if (pdata == NULL) { + dev_err(dev, "probe called without platform data, no " + "cs_control function will be called\n"); + mps->cs_control = NULL; + mps->sysclk = 0; + master->bus_num = bus_num; + master->num_chipselect = 255; + } else { + mps->cs_control = pdata->cs_control; + mps->sysclk = pdata->sysclk; + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + } + + master->setup = mpc512x_psc_spi_setup; + master->transfer = mpc512x_psc_spi_transfer; + master->cleanup = mpc512x_psc_spi_cleanup; + + tempp = ioremap(regaddr, size); + if (!tempp) { + dev_err(dev, "could not ioremap I/O port range\n"); + ret = -EFAULT; + goto free_master; + } + mps->psc = tempp; + mps->fifo = + (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc)); + + ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED, + "mpc512x-psc-spi", mps); + if (ret) + goto free_master; + + ret = mpc512x_psc_spi_port_config(master, mps); + if (ret < 0) + goto free_irq; + + spin_lock_init(&mps->lock); + init_completion(&mps->done); + INIT_WORK(&mps->work, mpc512x_psc_spi_work); + INIT_LIST_HEAD(&mps->queue); + + mps->workqueue = + create_singlethread_workqueue(dev_name(master->dev.parent)); + if (mps->workqueue == NULL) { + ret = -EBUSY; + goto free_irq; + } + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + return ret; + +unreg_master: + destroy_workqueue(mps->workqueue); +free_irq: + free_irq(mps->irq, mps); +free_master: + if (mps->psc) + iounmap(mps->psc); + spi_master_put(master); + + return ret; +} + +static int __exit mpc512x_psc_spi_do_remove(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); + + flush_workqueue(mps->workqueue); + destroy_workqueue(mps->workqueue); + spi_unregister_master(master); + free_irq(mps->irq, mps); + if (mps->psc) + iounmap(mps->psc); + + return 0; +} + +static int __init mpc512x_psc_spi_of_probe(struct of_device *op, + const struct of_device_id *match) +{ + const u32 *regaddr_p; + u64 regaddr64, size64; + s16 id = -1; + + regaddr_p = of_get_address(op->node, 0, &size64, NULL); + if (!regaddr_p) { + dev_err(&op->dev, "Invalid PSC address\n"); + return -EINVAL; + } + regaddr64 = of_translate_address(op->node, regaddr_p); + + /* get PSC id (0..11, used by port_config) */ + if (op->dev.platform_data == NULL) { + const u32 *psc_nump; + + psc_nump = of_get_property(op->node, "cell-index", NULL); + if (!psc_nump || *psc_nump > 11) { + dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " + "has invalid cell-index property\n", + op->node->full_name); + return -EINVAL; + } + id = *psc_nump; + } + + return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, + irq_of_parse_and_map(op->node, 0), id); +} + +static int __exit mpc512x_psc_spi_of_remove(struct of_device *op) +{ + return mpc512x_psc_spi_do_remove(&op->dev); +} + +static struct of_device_id mpc512x_psc_spi_of_match[] = { + { .compatible = "fsl,mpc5121-psc-spi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); + +static struct of_platform_driver mpc512x_psc_spi_of_driver = { + .match_table = mpc512x_psc_spi_of_match, + .probe = mpc512x_psc_spi_of_probe, + .remove = __exit_p(mpc512x_psc_spi_of_remove), + .driver = { + .name = "mpc512x-psc-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init mpc512x_psc_spi_init(void) +{ + return of_register_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_init(mpc512x_psc_spi_init); + +static void __exit mpc512x_psc_spi_exit(void) +{ + of_unregister_platform_driver(&mpc512x_psc_spi_of_driver); +} +module_exit(mpc512x_psc_spi_exit); + +MODULE_AUTHOR("John Rigby"); +MODULE_DESCRIPTION("MPC512x PSC SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 77d4cc88edea..7104cb739da7 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -472,18 +472,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, s16 id = -1; int rc; - regaddr_p = of_get_address(op->node, 0, &size64, NULL); + regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); if (!regaddr_p) { dev_err(&op->dev, "Invalid PSC address\n"); return -EINVAL; } - regaddr64 = of_translate_address(op->node, regaddr_p); + regaddr64 = of_translate_address(op->dev.of_node, regaddr_p); /* get PSC id (1..6, used by port_config) */ if (op->dev.platform_data == NULL) { const u32 *psc_nump; - psc_nump = of_get_property(op->node, "cell-index", NULL); + psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL); if (!psc_nump || *psc_nump > 5) { dev_err(&op->dev, "Invalid cell-index property\n"); return -EINVAL; @@ -492,9 +492,10 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, } rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, - irq_of_parse_and_map(op->node, 0), id); + irq_of_parse_and_map(op->dev.of_node, 0), id); if (rc == 0) - of_register_spi_devices(dev_get_drvdata(&op->dev), op->node); + of_register_spi_devices(dev_get_drvdata(&op->dev), + op->dev.of_node); return rc; } @@ -513,14 +514,12 @@ static const struct of_device_id mpc52xx_psc_spi_of_match[] = { MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); static struct of_platform_driver mpc52xx_psc_spi_of_driver = { - .owner = THIS_MODULE, - .name = "mpc52xx-psc-spi", - .match_table = mpc52xx_psc_spi_of_match, .probe = mpc52xx_psc_spi_of_probe, .remove = __exit_p(mpc52xx_psc_spi_of_remove), .driver = { .name = "mpc52xx-psc-spi", .owner = THIS_MODULE, + .of_match_table = mpc52xx_psc_spi_of_match, }, }; diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index cd68f1ce5cc3..b1a76bff775f 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c @@ -403,7 +403,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, /* MMIO registers */ dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); - regs = of_iomap(op->node, 0); + regs = of_iomap(op->dev.of_node, 0); if (!regs) return -ENODEV; @@ -445,11 +445,11 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, ms = spi_master_get_devdata(master); ms->master = master; ms->regs = regs; - ms->irq0 = irq_of_parse_and_map(op->node, 0); - ms->irq1 = irq_of_parse_and_map(op->node, 1); + ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); + ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); ms->state = mpc52xx_spi_fsmstate_idle; - ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); - ms->gpio_cs_count = of_gpio_count(op->node); + ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); + ms->gpio_cs_count = of_gpio_count(op->dev.of_node); if (ms->gpio_cs_count > 0) { master->num_chipselect = ms->gpio_cs_count; ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), @@ -460,7 +460,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, } for (i = 0; i < ms->gpio_cs_count; i++) { - gpio_cs = of_get_gpio(op->node, i); + gpio_cs = of_get_gpio(op->dev.of_node, i); if (gpio_cs < 0) { dev_err(&op->dev, "could not parse the gpio field " @@ -512,7 +512,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, if (rc) goto err_register; - of_register_spi_devices(master, op->node); + of_register_spi_devices(master, op->dev.of_node); dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); return rc; @@ -558,9 +558,11 @@ static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); static struct of_platform_driver mpc52xx_spi_of_driver = { - .owner = THIS_MODULE, - .name = "mpc52xx-spi", - .match_table = mpc52xx_spi_match, + .driver = { + .name = "mpc52xx-spi", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_spi_match, + }, .probe = mpc52xx_spi_probe, .remove = __exit_p(mpc52xx_spi_remove), }; diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index e0de0d0eedea..b3a94ca0a75a 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -38,7 +38,7 @@ #include <plat/dma.h> #include <plat/clock.h> - +#include <plat/mcspi.h> #define OMAP2_MCSPI_MAX_FREQ 48000000 @@ -113,7 +113,7 @@ struct omap2_mcspi_dma { /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ -#define DMA_MIN_BYTES 8 +#define DMA_MIN_BYTES 160 struct omap2_mcspi { @@ -229,6 +229,8 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); + /* Flash post-writes */ + mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); } static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) @@ -303,11 +305,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) unsigned int count, c; unsigned long base, tx_reg, rx_reg; int word_len, data_type, element_count; + int elements; + u32 l; u8 * rx; const u8 * tx; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + l = mcspi_cached_chconf0(spi); count = xfer->len; c = count; @@ -346,8 +351,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) } if (rx != NULL) { + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) + elements--; + omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, element_count - 1, 1, + data_type, elements, 1, OMAP_DMA_SYNC_ELEMENT, mcspi_dma->dma_rx_sync_dev, 1); @@ -379,17 +388,42 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) wait_for_completion(&mcspi_dma->dma_rx_completion); dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) + & OMAP2_MCSPI_CHSTAT_RXS)) { + u32 w; + + w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); + if (word_len <= 8) + ((u8 *)xfer->rx_buf)[elements++] = w; + else if (word_len <= 16) + ((u16 *)xfer->rx_buf)[elements++] = w; + else /* word_len <= 32 */ + ((u32 *)xfer->rx_buf)[elements++] = w; + } else { + dev_err(&spi->dev, + "DMA RX penultimate word empty"); + count -= (word_len <= 8) ? 2 : + (word_len <= 16) ? 4 : + /* word_len <= 32 */ 8; + omap2_mcspi_set_enable(spi, 1); + return count; + } + } + if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); if (word_len <= 8) - ((u8 *)xfer->rx_buf)[element_count - 1] = w; + ((u8 *)xfer->rx_buf)[elements] = w; else if (word_len <= 16) - ((u16 *)xfer->rx_buf)[element_count - 1] = w; + ((u16 *)xfer->rx_buf)[elements] = w; else /* word_len <= 32 */ - ((u32 *)xfer->rx_buf)[element_count - 1] = w; + ((u32 *)xfer->rx_buf)[elements] = w; } else { dev_err(&spi->dev, "DMA RX last word empty"); count -= (word_len <= 8) ? 1 : @@ -433,7 +467,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) word_len = cs->word_len; l = mcspi_cached_chconf0(spi); - l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; /* We store the pre-calculated register addresses on stack to speed * up the transfer loop. */ @@ -468,11 +501,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 1 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); +#ifdef VERBOSE + dev_dbg(&spi->dev, "read-%d %02x\n", + word_len, *(rx - 1)); +#endif + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); #ifdef VERBOSE dev_dbg(&spi->dev, "read-%d %02x\n", @@ -506,11 +554,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 2 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); +#ifdef VERBOSE + dev_dbg(&spi->dev, "read-%d %04x\n", + word_len, *(rx - 1)); +#endif + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); #ifdef VERBOSE dev_dbg(&spi->dev, "read-%d %04x\n", @@ -544,11 +607,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "RXS timed out\n"); goto out; } - /* prevent last RX_ONLY read from triggering - * more word i/o: switch to rx+tx - */ - if (c == 0 && tx == NULL) - mcspi_write_chconf0(spi, l); + + if (c == 4 && tx == NULL && + (l & OMAP2_MCSPI_CHCONF_TURBO)) { + omap2_mcspi_set_enable(spi, 0); + *rx++ = __raw_readl(rx_reg); +#ifdef VERBOSE + dev_dbg(&spi->dev, "read-%d %08x\n", + word_len, *(rx - 1)); +#endif + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS) < 0) { + dev_err(&spi->dev, + "RXS timed out\n"); + goto out; + } + c = 0; + } else if (c == 0 && tx == NULL) { + omap2_mcspi_set_enable(spi, 0); + } + *rx++ = __raw_readl(rx_reg); #ifdef VERBOSE dev_dbg(&spi->dev, "read-%d %08x\n", @@ -568,6 +646,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "EOT timed out\n"); } out: + omap2_mcspi_set_enable(spi, 1); return count - c; } @@ -755,7 +834,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) struct omap2_mcspi_cs *cs; mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &mcspi->dma_channels[spi->chip_select]; if (spi->controller_state) { /* Unlink controller state from context save list */ @@ -765,13 +843,17 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) kfree(spi->controller_state); } - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + if (spi->chip_select < spi->master->num_chipselect) { + mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + if (mcspi_dma->dma_rx_channel != -1) { + omap_free_dma(mcspi_dma->dma_rx_channel); + mcspi_dma->dma_rx_channel = -1; + } + if (mcspi_dma->dma_tx_channel != -1) { + omap_free_dma(mcspi_dma->dma_tx_channel); + mcspi_dma->dma_tx_channel = -1; + } } } @@ -797,6 +879,7 @@ static void omap2_mcspi_work(struct work_struct *work) struct spi_transfer *t = NULL; int cs_active = 0; struct omap2_mcspi_cs *cs; + struct omap2_mcspi_device_config *cd; int par_override = 0; int status = 0; u32 chconf; @@ -809,6 +892,7 @@ static void omap2_mcspi_work(struct work_struct *work) spi = m->spi; cs = spi->controller_state; + cd = spi->controller_data; omap2_mcspi_set_enable(spi, 1); list_for_each_entry(t, &m->transfers, transfer_list) { @@ -832,10 +916,19 @@ static void omap2_mcspi_work(struct work_struct *work) chconf = mcspi_cached_chconf0(spi); chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; + chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; + if (t->tx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; else if (t->rx_buf == NULL) chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; + + if (cd && cd->turbo_mode && t->tx_buf == NULL) { + /* Turbo mode is for more than one word */ + if (t->len > ((cs->word_len + 7) >> 3)) + chconf |= OMAP2_MCSPI_CHCONF_TURBO; + } + mcspi_write_chconf0(spi, chconf); if (t->len) { diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h new file mode 100644 index 000000000000..fc033bbf9180 --- /dev/null +++ b/drivers/spi/spi_bitbang_txrx.h @@ -0,0 +1,93 @@ +/* + * Mix this utility code with some glue code to get one of several types of + * simple SPI master driver. Two do polled word-at-a-time I/O: + * + * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](), + * expanding the per-word routines from the inline templates below. + * + * - Drivers for controllers resembling bare shift registers. Provide + * chipselect() and txrx_word[](), with custom setup()/cleanup() methods + * that use your controller's clock and chipselect registers. + * + * Some hardware works well with requests at spi_transfer scope: + * + * - Drivers leveraging smarter hardware, with fifos or DMA; or for half + * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(), + * and custom setup()/cleanup() methods. + */ + +/* + * The code that knows what GPIO pins do what should have declared four + * functions, ideally as inlines, before including this header: + * + * void setsck(struct spi_device *, int is_on); + * void setmosi(struct spi_device *, int is_on); + * int getmiso(struct spi_device *); + * void spidelay(unsigned); + * + * setsck()'s is_on parameter is a zero/nonzero boolean. + * + * setmosi()'s is_on parameter is a zero/nonzero boolean. + * + * getmiso() is required to return 0 or 1 only. Any other value is invalid + * and will result in improper operation. + * + * A non-inlined routine would call bitbang_txrx_*() routines. The + * main loop could easily compile down to a handful of instructions, + * especially if the delay is a NOP (to run at peak speed). + * + * Since this is software, the timings may not be exactly what your board's + * chips need ... there may be several reasons you'd need to tweak timings + * in these routines, not just make to make it faster or slower to match a + * particular CPU clock rate. + */ + +static inline u32 +bitbang_txrx_be_cpha0(struct spi_device *spi, + unsigned nsecs, unsigned cpol, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on trailing edge */ + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, !cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on leading edge */ + word <<= 1; + word |= getmiso(spi); + setsck(spi, cpol); + } + return word; +} + +static inline u32 +bitbang_txrx_be_cpha1(struct spi_device *spi, + unsigned nsecs, unsigned cpol, + u32 word, u8 bits) +{ + /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ + + /* clock starts at inactive polarity */ + for (word <<= (32 - bits); likely(bits); bits--) { + + /* setup MSB (to slave) on leading edge */ + setsck(spi, !cpol); + setmosi(spi, word & (1 << 31)); + spidelay(nsecs); /* T(setup) */ + + setsck(spi, cpol); + spidelay(nsecs); + + /* sample MSB (from slave) on trailing edge */ + word <<= 1; + word |= getmiso(spi); + } + return word; +} diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index c2184866fa9c..8b5281281111 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c @@ -149,8 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value) #define spidelay(X) do{}while(0) //#define spidelay ndelay -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 butterfly_txrx_word_mode0(struct spi_device *spi, diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index 26bd03e61855..7edbd5807e0e 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -127,8 +127,7 @@ static inline int getmiso(const struct spi_device *spi) */ #define spidelay(nsecs) do {} while (0) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" /* * These functions can leverage inline expansion of GPIO calls to shrink diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c index 568c781ad91c..86fb7b5993db 100644 --- a/drivers/spi/spi_lm70llp.c +++ b/drivers/spi/spi_lm70llp.c @@ -174,8 +174,7 @@ static inline int getmiso(struct spi_device *s) } /*--------------------------------------------------------------------*/ -#define EXPAND_BITBANG_TXRX 1 -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static void lm70_chipselect(struct spi_device *spi, int value) { diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index e324627d97a2..ffa111a7e9d4 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c @@ -241,7 +241,6 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) /* Turn off SPI unit prior changing mode */ mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); - mpc8xxx_spi_write_reg(mode, cs->hw_mode); /* When in CPM mode, we need to reinit tx and rx. */ if (mspi->flags & SPI_CPM_MODE) { @@ -258,7 +257,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) } } } - + mpc8xxx_spi_write_reg(mode, cs->hw_mode); local_irq_restore(flags); } @@ -287,36 +286,12 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) } } -static -int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +static int +mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + struct mpc8xxx_spi *mpc8xxx_spi, + int bits_per_word) { - struct mpc8xxx_spi *mpc8xxx_spi; - u8 bits_per_word, pm; - u32 hz; - struct spi_mpc8xxx_cs *cs = spi->controller_state; - - mpc8xxx_spi = spi_master_get_devdata(spi->master); - - if (t) { - bits_per_word = t->bits_per_word; - hz = t->speed_hz; - } else { - bits_per_word = 0; - hz = 0; - } - - /* spi_transfer level calls that work per-word */ - if (!bits_per_word) - bits_per_word = spi->bits_per_word; - - /* Make sure its a bit width we support [4..16, 32] */ - if ((bits_per_word < 4) - || ((bits_per_word > 16) && (bits_per_word != 32))) - return -EINVAL; - - if (!hz) - hz = spi->max_speed_hz; - cs->rx_shift = 0; cs->tx_shift = 0; if (bits_per_word <= 8) { @@ -340,19 +315,82 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) return -EINVAL; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && - spi->mode & SPI_LSB_FIRST) { + spi->mode & SPI_LSB_FIRST) { cs->tx_shift = 0; if (bits_per_word <= 8) cs->rx_shift = 8; else cs->rx_shift = 0; } - mpc8xxx_spi->rx_shift = cs->rx_shift; mpc8xxx_spi->tx_shift = cs->tx_shift; mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; + return bits_per_word; +} + +static int +mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + int bits_per_word) +{ + /* QE uses Little Endian for words > 8 + * so transform all words > 8 into 8 bits + * Unfortnatly that doesn't work for LSB so + * reject these for now */ + /* Note: 32 bits word, LSB works iff + * tfcr/rfcr is set to CPMFCR_GBL */ + if (spi->mode & SPI_LSB_FIRST && + bits_per_word > 8) + return -EINVAL; + if (bits_per_word > 8) + return 8; /* pretend its 8 bits */ + return bits_per_word; +} + +static +int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + int bits_per_word; + u8 pm; + u32 hz; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } else { + bits_per_word = 0; + hz = 0; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16, 32] */ + if ((bits_per_word < 4) + || ((bits_per_word > 16) && (bits_per_word != 32))) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + if (!(mpc8xxx_spi->flags & SPI_CPM_MODE)) + bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi, + mpc8xxx_spi, + bits_per_word); + else if (mpc8xxx_spi->flags & SPI_QE) + bits_per_word = mspi_apply_qe_mode_quirks(cs, spi, + bits_per_word); + + if (bits_per_word < 0) + return bits_per_word; + if (bits_per_word == 32) bits_per_word = 0; else @@ -438,7 +476,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, dev_err(dev, "unable to map tx dma\n"); return -ENOMEM; } - } else { + } else if (t->tx_buf) { mspi->tx_dma = t->tx_dma; } @@ -449,7 +487,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, dev_err(dev, "unable to map rx dma\n"); goto err_rx_dma; } - } else { + } else if (t->rx_buf) { mspi->rx_dma = t->rx_dma; } @@ -477,7 +515,7 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) if (mspi->map_tx_dma) dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); - if (mspi->map_tx_dma) + if (mspi->map_rx_dma) dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); mspi->xfer_in_progress = NULL; } @@ -797,7 +835,7 @@ static void mpc8xxx_spi_free_dummy_rx(void) static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; const u32 *iprop; int size; unsigned long spi_base_ofs; @@ -851,7 +889,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; const u32 *iprop; int size; unsigned long pram_ofs; @@ -1123,7 +1161,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) static int of_mpc8xxx_spi_get_chipselects(struct device *dev) { - struct device_node *np = dev_archdata_get_node(&dev->archdata); + struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev->platform_data; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); unsigned int ngpios; @@ -1224,7 +1262,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, const struct of_device_id *ofid) { struct device *dev = &ofdev->dev; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_spi_probe_info *pinfo; struct fsl_spi_platform_data *pdata; struct spi_master *master; @@ -1312,8 +1350,11 @@ static const struct of_device_id of_mpc8xxx_spi_match[] = { MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); static struct of_platform_driver of_mpc8xxx_spi_driver = { - .name = "mpc8xxx_spi", - .match_table = of_mpc8xxx_spi_match, + .driver = { + .name = "mpc8xxx_spi", + .owner = THIS_MODULE, + .of_match_table = of_mpc8xxx_spi_match, + }, .probe = of_mpc8xxx_spi_probe, .remove = __devexit_p(of_mpc8xxx_spi_remove), }; diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 7cb5ff37f6e2..19c0b3b34fce 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c @@ -587,12 +587,12 @@ static const struct of_device_id spi_ppc4xx_of_match[] = { MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); static struct of_platform_driver spi_ppc4xx_of_driver = { - .match_table = spi_ppc4xx_of_match, .probe = spi_ppc4xx_of_probe, .remove = __exit_p(spi_ppc4xx_of_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .of_match_table = spi_ppc4xx_of_match, }, }; diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index bbf9371cd284..8979a75dbd7b 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -58,8 +58,7 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c index a65c12ffa733..a511be7961a0 100644 --- a/drivers/spi/spi_sh_sci.c +++ b/drivers/spi/spi_sh_sci.c @@ -78,8 +78,7 @@ static inline u32 getmiso(struct spi_device *dev) #define spidelay(x) ndelay(x) -#define EXPAND_BITBANG_TXRX -#include <linux/spi/spi_bitbang.h> +#include "spi_bitbang_txrx.h" static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits) diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 748d33a76d29..4654805b08d8 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c @@ -48,13 +48,13 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, const u32 *prop; int len; - rc = of_address_to_resource(ofdev->node, 0, &r_mem); + rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); if (rc) { dev_warn(&ofdev->dev, "invalid address\n"); return rc; } - rc = of_irq_to_resource(ofdev->node, 0, &r_irq); + rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); if (rc == NO_IRQ) { dev_warn(&ofdev->dev, "no IRQ found\n"); return -ENODEV; @@ -67,7 +67,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, return -ENOMEM; /* number of slave select bits is required */ - prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); + prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len); if (!prop || len < sizeof(*prop)) { dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); return -EINVAL; @@ -81,7 +81,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, master); /* Add any subnodes on the SPI bus */ - of_register_spi_devices(master, ofdev->node); + of_register_spi_devices(master, ofdev->dev.of_node); return 0; } @@ -109,12 +109,12 @@ static const struct of_device_id xilinx_spi_of_match[] = { MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); static struct of_platform_driver xilinx_spi_of_driver = { - .match_table = xilinx_spi_of_match, .probe = xilinx_spi_of_probe, .remove = __exit_p(xilinx_spi_of_remove), .driver = { .name = "xilinx-xps-spi", .owner = THIS_MODULE, + .of_match_table = xilinx_spi_of_match, }, }; diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c index 49f0d31c118a..cf7c34a99459 100644 --- a/drivers/staging/go7007/saa7134-go7007.c +++ b/drivers/staging/go7007/saa7134-go7007.c @@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev, printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", (status >> 16) & 0x0f); if (status & 0x100000) { - dma_sync_single(&dev->pci->dev, - saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&dev->pci->dev, + saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); } else { - dma_sync_single(&dev->pci->dev, - saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&dev->pci->dev, + saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->top, PAGE_SIZE); saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); } diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 9286e863b0e7..643b413d9f0f 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/statfs.h> #include <linux/writeback.h> -#include <linux/quotaops.h> #include "netfs.h" @@ -880,7 +879,7 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb) /* * We want fsync() to work on POHMELFS. */ -static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync) +static int pohmelfs_fsync(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; struct writeback_control wbc = { @@ -969,13 +968,6 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr) goto err_out_exit; } - if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { - err = dquot_transfer(inode, attr); - if (err) - goto err_out_exit; - } - err = inode_setattr(inode, attr); if (err) { dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c index 21a95ffdfb86..a09038542f26 100644 --- a/drivers/staging/rt2860/common/rtmp_init.c +++ b/drivers/staging/rt2860/common/rtmp_init.c @@ -2810,17 +2810,6 @@ void UserCfgInit(struct rt_rtmp_adapter *pAd) } /* IRQL = PASSIVE_LEVEL */ -u8 BtoH(char ch) -{ - if (ch >= '0' && ch <= '9') - return (ch - '0'); /* Handle numerals */ - if (ch >= 'A' && ch <= 'F') - return (ch - 'A' + 0xA); /* Handle capitol hex digits */ - if (ch >= 'a' && ch <= 'f') - return (ch - 'a' + 0xA); /* Handle small hex digits */ - return (255); -} - /* */ /* FUNCTION: AtoH(char *, u8 *, int) */ /* */ @@ -2847,8 +2836,8 @@ void AtoH(char *src, u8 *dest, int destlen) destTemp = (u8 *)dest; while (destlen--) { - *destTemp = BtoH(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */ - *destTemp += BtoH(*srcptr++); /* Add 2nd ascii byte to above. */ + *destTemp = hex_to_bin(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */ + *destTemp += hex_to_bin(*srcptr++); /* Add 2nd ascii byte to above. */ destTemp++; } } diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h index ab525ee15042..82b6e783b33f 100644 --- a/drivers/staging/rt2860/rtmp.h +++ b/drivers/staging/rt2860/rtmp.h @@ -2356,8 +2356,6 @@ void RTMPMoveMemory(void *pDest, void *pSrc, unsigned long Length); void AtoH(char *src, u8 *dest, int destlen); -u8 BtoH(char ch); - void RTMPPatchMacBbpBug(struct rt_rtmp_adapter *pAd); void RTMPInitTimer(struct rt_rtmp_adapter *pAd, diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index e89304c72568..b53deee25d74 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -5879,20 +5879,13 @@ out: static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) { IXJ_FILTER_CADENCE *lcp; - lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); - if (lcp == NULL) { + lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE)); + if (IS_ERR(lcp)) { if(ixjdebug & 0x0001) { - printk(KERN_INFO "Could not allocate memory for cadence\n"); + printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n"); } - return -ENOMEM; + return PTR_ERR(lcp); } - if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) { - if(ixjdebug & 0x0001) { - printk(KERN_INFO "Could not copy cadence to kernel\n"); - } - kfree(lcp); - return -EFAULT; - } if (lcp->filter > 5) { if(ixjdebug & 0x0001) { printk(KERN_INFO "Cadence out of range\n"); diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c index 1e9ba4bdffef..1335456b4f93 100644 --- a/drivers/usb/atm/speedtch.c +++ b/drivers/usb/atm/speedtch.c @@ -127,8 +127,6 @@ MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20"); #define ENDPOINT_ISOC_DATA 0x07 #define ENDPOINT_FIRMWARE 0x05 -#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) ) - struct speedtch_params { unsigned int altsetting; unsigned int BMaxDSL; @@ -669,7 +667,8 @@ static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_de memset(atm_dev->esi, 0, sizeof(atm_dev->esi)); if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) { for (i = 0; i < 6; i++) - atm_dev->esi[i] = (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1])); + atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) + + hex_to_bin(mac_str[i * 2 + 1]); } /* Start modem synchronisation */ diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 3537d51073b2..2928523268b5 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2768,8 +2768,11 @@ static const struct of_device_id qe_udc_match[] __devinitconst = { MODULE_DEVICE_TABLE(of, qe_udc_match); static struct of_platform_driver udc_driver = { - .name = (char *)driver_name, - .match_table = qe_udc_match, + .driver = { + .name = (char *)driver_name, + .owner = THIS_MODULE, + .of_match_table = qe_udc_match, + }, .probe = qe_udc_probe, .remove = __devexit_p(qe_udc_remove), #ifdef CONFIG_PM diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 6b8bf8c781c4..43abf55d8c60 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -794,7 +794,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } static int -printer_fsync(struct file *fd, struct dentry *dentry, int datasync) +printer_fsync(struct file *fd, int datasync) { struct printer_dev *dev = fd->private_data; unsigned long flags; diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index ead59f42e69b..544ccfd7056e 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -199,8 +199,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) writel(pdata->portsc, hcd->regs + PORTSC_OFFSET); mdelay(10); - /* setup USBCONTROL. */ - ret = mxc_set_usbcontrol(pdev->id, pdata->flags); + /* setup specific usb hw */ + ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); if (ret < 0) goto err_init; diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 8df33b8a634c..5aec92866ab3 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -108,7 +108,7 @@ ppc44x_enable_bmt(struct device_node *dn) static int __devinit ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dn = op->node; + struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci = NULL; struct resource res; @@ -274,13 +274,12 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match); static struct of_platform_driver ehci_hcd_ppc_of_driver = { - .name = "ppc-of-ehci", - .match_table = ehci_hcd_ppc_of_match, .probe = ehci_hcd_ppc_of_probe, .remove = ehci_hcd_ppc_of_remove, .shutdown = ehci_hcd_ppc_of_shutdown, - .driver = { - .name = "ppc-of-ehci", - .owner = THIS_MODULE, + .driver = { + .name = "ppc-of-ehci", + .owner = THIS_MODULE, + .of_match_table = ehci_hcd_ppc_of_match, }, }; diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index f603bb2c0a8e..013972bbde57 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -288,13 +288,12 @@ static const struct of_device_id ehci_hcd_xilinx_of_match[] = { MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); static struct of_platform_driver ehci_hcd_xilinx_of_driver = { - .name = "xilinx-of-ehci", - .match_table = ehci_hcd_xilinx_of_match, .probe = ehci_hcd_xilinx_of_probe, .remove = ehci_hcd_xilinx_of_remove, .shutdown = ehci_hcd_xilinx_of_shutdown, - .driver = { - .name = "xilinx-of-ehci", - .owner = THIS_MODULE, + .driver = { + .name = "xilinx-of-ehci", + .owner = THIS_MODULE, + .of_match_table = ehci_hcd_xilinx_of_match, }, }; diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 90453379a434..c7c8392a88b9 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -565,7 +565,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev, const struct of_device_id *ofid) { struct device *dev = &ofdev->dev; - struct device_node *node = ofdev->node; + struct device_node *node = dev->of_node; struct usb_hcd *hcd; struct fhci_hcd *fhci; struct resource usb_regs; @@ -670,7 +670,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev, } for (j = 0; j < NUM_PINS; j++) { - fhci->pins[j] = qe_pin_request(ofdev->node, j); + fhci->pins[j] = qe_pin_request(node, j); if (IS_ERR(fhci->pins[j])) { ret = PTR_ERR(fhci->pins[j]); dev_err(dev, "can't get pin %d: %d\n", j, ret); @@ -813,8 +813,11 @@ static const struct of_device_id of_fhci_match[] = { MODULE_DEVICE_TABLE(of, of_fhci_match); static struct of_platform_driver of_fhci_driver = { - .name = "fsl,usb-fhci", - .match_table = of_fhci_match, + .driver = { + .name = "fsl,usb-fhci", + .owner = THIS_MODULE, + .of_match_table = of_fhci_match, + }, .probe = of_fhci_probe, .remove = __devexit_p(of_fhci_remove), }; diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index 8f0259eaa2c7..ec85d0c3cc3e 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -31,7 +31,7 @@ static int of_isp1760_probe(struct of_device *dev, const struct of_device_id *match) { struct usb_hcd *hcd; - struct device_node *dp = dev->node; + struct device_node *dp = dev->dev.of_node; struct resource *res; struct resource memory; struct of_irq oirq; @@ -120,8 +120,11 @@ static const struct of_device_id of_isp1760_match[] = { MODULE_DEVICE_TABLE(of, of_isp1760_match); static struct of_platform_driver isp1760_of_driver = { - .name = "nxp-isp1760", - .match_table = of_isp1760_match, + .driver = { + .name = "nxp-isp1760", + .owner = THIS_MODULE, + .of_match_table = of_isp1760_match, + }, .probe = of_isp1760_probe, .remove = of_isp1760_remove, }; diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 103263c230cf..df165917412a 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -83,7 +83,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = { static int __devinit ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dn = op->node; + struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ohci_hcd *ohci; struct resource res; @@ -244,18 +244,13 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match); static struct of_platform_driver ohci_hcd_ppc_of_driver = { - .name = "ppc-of-ohci", - .match_table = ohci_hcd_ppc_of_match, .probe = ohci_hcd_ppc_of_probe, .remove = ohci_hcd_ppc_of_remove, .shutdown = ohci_hcd_ppc_of_shutdown, -#ifdef CONFIG_PM - /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/ - /*.resume = ohci_hcd_ppc_soc_drv_resume,*/ -#endif - .driver = { - .name = "ppc-of-ohci", - .owner = THIS_MODULE, + .driver = { + .name = "ppc-of-ohci", + .owner = THIS_MODULE, + .of_match_table = ohci_hcd_ppc_of_match, }, }; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index e7fa3644ba6a..61c76b13f0f1 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -954,8 +954,7 @@ static int mon_bin_queued(struct mon_reader_bin *rp) /* */ -static int mon_bin_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; // struct mon_bus* mbus = rp->r.m_bus; @@ -1095,6 +1094,19 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file, return ret; } +static long mon_bin_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = mon_bin_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + + #ifdef CONFIG_COMPAT static long mon_bin_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -1148,14 +1160,13 @@ static long mon_bin_compat_ioctl(struct file *file, return 0; case MON_IOCG_STATS: - return mon_bin_ioctl(NULL, file, cmd, - (unsigned long) compat_ptr(arg)); + return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); case MON_IOCQ_URB_LEN: case MON_IOCQ_RING_SIZE: case MON_IOCT_RING_SIZE: case MON_IOCH_MFLUSH: - return mon_bin_ioctl(NULL, file, cmd, arg); + return mon_bin_ioctl(file, cmd, arg); default: ; @@ -1239,7 +1250,7 @@ static const struct file_operations mon_fops_binary = { .read = mon_bin_read, /* .write = mon_text_write, */ .poll = mon_bin_poll, - .ioctl = mon_bin_ioctl, + .unlocked_ioctl = mon_bin_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mon_bin_compat_ioctl, #endif diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c index 1becdc3837e6..8ec94f15a738 100644 --- a/drivers/usb/mon/mon_stat.c +++ b/drivers/usb/mon/mon_stat.c @@ -11,6 +11,7 @@ #include <linux/slab.h> #include <linux/usb.h> #include <linux/fs.h> +#include <linux/smp_lock.h> #include <asm/uaccess.h> #include "usb_mon.h" @@ -63,6 +64,6 @@ const struct file_operations mon_fops_stat = { .read = mon_stat_read, /* .write = mon_stat_write, */ /* .poll = mon_stat_poll, */ - /* .ioctl = mon_stat_ioctl, */ + /* .unlocked_ioctl = mon_stat_ioctl, */ .release = mon_stat_release, }; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index aa88911c9504..0f41c9195e9b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, int r; switch (ioctl) { case VHOST_NET_SET_BACKEND: - r = copy_from_user(&backend, argp, sizeof backend); - if (r < 0) - return r; + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; return vhost_net_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_FEATURES; - return copy_to_user(featurep, &features, sizeof features); + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; case VHOST_SET_FEATURES: - r = copy_from_user(&features, featurep, sizeof features); - if (r < 0) - return r; + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; if (features & ~VHOST_FEATURES) return -EOPNOTSUPP; return vhost_net_set_features(n, features); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 750effe0f98b..3b83382e06eb 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; unsigned long size = offsetof(struct vhost_memory, regions); - long r; - r = copy_from_user(&mem, m, size); - if (r) - return r; + if (copy_from_user(&mem, m, size)) + return -EFAULT; if (mem.padding) return -EOPNOTSUPP; if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) @@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -ENOMEM; memcpy(newmem, &mem, size); - r = copy_from_user(newmem->regions, m->regions, - mem.nregions * sizeof *m->regions); - if (r) { + if (copy_from_user(newmem->regions, m->regions, + mem.nregions * sizeof *m->regions)) { kfree(newmem); - return r; + return -EFAULT; } - if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) + if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { + kfree(newmem); return -EFAULT; + } oldmem = d->memory; rcu_assign_pointer(d->memory, newmem); synchronize_rcu(); @@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = get_user(idx, idxp); if (r < 0) return r; - if (idx > d->nvqs) + if (idx >= d->nvqs) return -ENOBUFS; vq = d->vqs + idx; @@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = -EBUSY; break; } - r = copy_from_user(&s, argp, sizeof s); - if (r < 0) + if (copy_from_user(&s, argp, sizeof s)) { + r = -EFAULT; break; + } if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { r = -EINVAL; break; @@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = -EBUSY; break; } - r = copy_from_user(&s, argp, sizeof s); - if (r < 0) + if (copy_from_user(&s, argp, sizeof s)) { + r = -EFAULT; break; + } if (s.num > 0xffff) { r = -EINVAL; break; @@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) case VHOST_GET_VRING_BASE: s.index = idx; s.num = vq->last_avail_idx; - r = copy_to_user(argp, &s, sizeof s); + if (copy_to_user(argp, &s, sizeof s)) + r = -EFAULT; break; case VHOST_SET_VRING_ADDR: - r = copy_from_user(&a, argp, sizeof a); - if (r < 0) + if (copy_from_user(&a, argp, sizeof a)) { + r = -EFAULT; break; + } if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { r = -EOPNOTSUPP; break; @@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) vq->used = (void __user *)(unsigned long)a.used_user_addr; break; case VHOST_SET_VRING_KICK: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) filep = eventfp; break; case VHOST_SET_VRING_CALL: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) filep = eventfp; break; case VHOST_SET_VRING_ERR: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: - r = copy_from_user(&p, argp, sizeof p); - if (r < 0) + if (copy_from_user(&p, argp, sizeof p)) { + r = -EFAULT; break; + } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; @@ -806,7 +813,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, count = indirect->len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ - if (count > USHORT_MAX + 1) { + if (count > USHRT_MAX + 1) { vq_err(vq, "Indirect buffer length too big: %d\n", indirect->len); return -E2BIG; diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c index 8d406fb689c1..f3d7440f0072 100644 --- a/drivers/video/arcfb.c +++ b/drivers/video/arcfb.c @@ -80,7 +80,7 @@ struct arcfb_par { spinlock_t lock; }; -static struct fb_fix_screeninfo arcfb_fix __initdata = { +static struct fb_fix_screeninfo arcfb_fix __devinitdata = { .id = "arcfb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, @@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo arcfb_var __initdata = { +static struct fb_var_screeninfo arcfb_var __devinitdata = { .xres = 128, .yres = 64, .xres_virtual = 128, @@ -588,7 +588,7 @@ err: return retval; } -static int arcfb_remove(struct platform_device *dev) +static int __devexit arcfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev) static struct platform_driver arcfb_driver = { .probe = arcfb_probe, - .remove = arcfb_remove, + .remove = __devexit_p(arcfb_remove), .driver = { .name = "arcfb", }, diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 29d72851f85b..f8d69ad36830 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -1820,10 +1820,6 @@ struct atyclk { #define ATYIO_FEATW 0x41545903 /* ATY\03 */ #endif -#ifndef FBIO_WAITFORVSYNC -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) -#endif - static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { struct atyfb_par *par = (struct atyfb_par *) info->par; diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 68d2518fadaa..38ffc3fbcbe4 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -222,6 +222,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) data->port = __check_device(pdata, name); if (data->port < 0) { dev_err(&pdev->dev, "wrong platform data is assigned"); + kfree(data); return -EINVAL; } @@ -266,6 +267,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) backlight_update_status(bl); return 0; out: + backlight_device_unregister(bl); kfree(data); return ret; } diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index c025c84601b0..e54a337227ea 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -8,12 +8,13 @@ menuconfig BACKLIGHT_LCD_SUPPORT Enable this to be able to choose the drivers for controlling the backlight and the LCD panel on some platforms, for example on PDAs. +if BACKLIGHT_LCD_SUPPORT + # # LCD # config LCD_CLASS_DEVICE tristate "Lowlevel LCD controls" - depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of LCD. @@ -24,31 +25,32 @@ config LCD_CLASS_DEVICE To have support for your specific LCD panel you will have to select the proper drivers which depend on this option. +if LCD_CLASS_DEVICE + config LCD_CORGI tristate "LCD Panel support for SHARP corgi/spitz model" - depends on LCD_CLASS_DEVICE && SPI_MASTER && PXA_SHARPSL + depends on SPI_MASTER && PXA_SHARPSL help Say y here to support the LCD panels usually found on SHARP corgi (C7x0) and spitz (Cxx00) models. config LCD_L4F00242T03 tristate "Epson L4F00242T03 LCD" - depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GENERIC_GPIO help SPI driver for Epson L4F00242T03. This provides basic support for init and powering the LCD up/down through a sysfs interface. config LCD_LMS283GF05 tristate "Samsung LMS283GF05 LCD" - depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GENERIC_GPIO help SPI driver for Samsung LMS283GF05. This provides basic support for powering the LCD up/down through a sysfs interface. config LCD_LTV350QV tristate "Samsung LTV350QV LCD Panel" - depends on LCD_CLASS_DEVICE && SPI_MASTER - default n + depends on SPI_MASTER help If you have a Samsung LTV350QV LCD panel, say y to include a power control driver for it. The panel starts up in power @@ -59,60 +61,61 @@ config LCD_LTV350QV config LCD_ILI9320 tristate - depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT - default n help If you have a panel based on the ILI9320 controller chip then say y to include a power driver for it. config LCD_TDO24M tristate "Toppoly TDO24M and TDO35S LCD Panels support" - depends on LCD_CLASS_DEVICE && SPI_MASTER - default n + depends on SPI_MASTER help If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to include the support for it. config LCD_VGG2432A4 tristate "VGG2432A4 LCM device support" - depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER + depends on SPI_MASTER select LCD_ILI9320 - default n help If you have a VGG2432A4 panel based on the ILI9320 controller chip then say y to include a power driver for it. config LCD_PLATFORM tristate "Platform LCD controls" - depends on LCD_CLASS_DEVICE help This driver provides a platform-device registered LCD power control interface. config LCD_TOSA tristate "Sharp SL-6000 LCD Driver" - depends on LCD_CLASS_DEVICE && SPI - depends on MACH_TOSA - default n + depends on SPI && MACH_TOSA help If you have an Sharp SL-6000 Zaurus say Y to enable a driver for its LCD. config LCD_HP700 tristate "HP Jornada 700 series LCD Driver" - depends on LCD_CLASS_DEVICE depends on SA1100_JORNADA720_SSP && !PREEMPT default y help If you have an HP Jornada 700 series handheld (710/720/728) say Y to enable LCD control driver. +config LCD_S6E63M0 + tristate "S6E63M0 AMOLED LCD Driver" + depends on SPI && BACKLIGHT_CLASS_DEVICE + default n + help + If you have an S6E63M0 LCD Panel, say Y to enable its + LCD control driver. + +endif # LCD_CLASS_DEVICE + # # Backlight # config BACKLIGHT_CLASS_DEVICE tristate "Lowlevel Backlight controls" - depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of the LCD @@ -121,9 +124,11 @@ config BACKLIGHT_CLASS_DEVICE To have support for your specific LCD panel you will have to select the proper drivers which depend on this option. +if BACKLIGHT_CLASS_DEVICE + config BACKLIGHT_ATMEL_LCDC bool "Atmel LCDC Contrast-as-Backlight control" - depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL + depends on FB_ATMEL default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK help This provides a backlight control internal to the Atmel LCDC @@ -136,8 +141,7 @@ config BACKLIGHT_ATMEL_LCDC config BACKLIGHT_ATMEL_PWM tristate "Atmel PWM backlight control" - depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM - default n + depends on ATMEL_PWM help Say Y here if you want to use the PWM peripheral in Atmel AT91 and AVR32 devices. This driver will need additional platform data to know @@ -146,9 +150,18 @@ config BACKLIGHT_ATMEL_PWM To compile this driver as a module, choose M here: the module will be called atmel-pwm-bl. +config BACKLIGHT_EP93XX + tristate "Cirrus EP93xx Backlight Driver" + depends on FB_EP93XX + help + If you have a LCD backlight connected to the BRIGHT output of + the EP93xx, say Y here to enable this driver. + + To compile this driver as a module, choose M here: the module will + be called ep93xx_bl. + config BACKLIGHT_GENERIC tristate "Generic (aka Sharp Corgi) Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE default y help Say y to enable the generic platform backlight driver previously @@ -157,7 +170,7 @@ config BACKLIGHT_GENERIC config BACKLIGHT_LOCOMO tristate "Sharp LOCOMO LCD/Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO + depends on SHARP_LOCOMO default y help If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to @@ -165,7 +178,7 @@ config BACKLIGHT_LOCOMO config BACKLIGHT_OMAP1 tristate "OMAP1 PWL-based LCD Backlight" - depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1 + depends on ARCH_OMAP1 default y help This driver controls the LCD backlight level and power for @@ -174,7 +187,7 @@ config BACKLIGHT_OMAP1 config BACKLIGHT_HP680 tristate "HP Jornada 680 Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX + depends on SH_HP6XX default y help If you have a HP Jornada 680, say y to enable the @@ -182,7 +195,6 @@ config BACKLIGHT_HP680 config BACKLIGHT_HP700 tristate "HP Jornada 700 series Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE depends on SA1100_JORNADA720_SSP && !PREEMPT default y help @@ -191,76 +203,70 @@ config BACKLIGHT_HP700 config BACKLIGHT_PROGEAR tristate "Frontpath ProGear Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && PCI && X86 - default n + depends on PCI && X86 help If you have a Frontpath ProGear say Y to enable the backlight driver. config BACKLIGHT_CARILLO_RANCH tristate "Intel Carillo Ranch Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578 - default n + depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578 help If you have a Intel LE80578 (Carillo Ranch) say Y to enable the backlight driver. config BACKLIGHT_PWM tristate "Generic PWM based Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM + depends on HAVE_PWM help If you have a LCD backlight adjustable by PWM, say Y to enable this driver. config BACKLIGHT_DA903X tristate "Backlight Driver for DA9030/DA9034 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X + depends on PMIC_DA903X help If you have a LCD backlight connected to the WLED output of DA9030 or DA9034 WLED output, say Y here to enable this driver. config BACKLIGHT_MAX8925 tristate "Backlight driver for MAX8925" - depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925 + depends on MFD_MAX8925 help If you have a LCD backlight connected to the WLED output of MAX8925 WLED output, say Y here to enable this driver. config BACKLIGHT_MBP_NVIDIA tristate "MacBook Pro Nvidia Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && X86 - default n + depends on X86 help If you have an Apple Macbook Pro with Nvidia graphics hardware say Y to enable a driver for its backlight config BACKLIGHT_TOSA tristate "Sharp SL-6000 Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && I2C - depends on MACH_TOSA && LCD_TOSA - default n + depends on I2C && MACH_TOSA && LCD_TOSA help If you have an Sharp SL-6000 Zaurus say Y to enable a driver for its backlight config BACKLIGHT_SAHARA tristate "Tabletkiosk Sahara Touch-iT Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && X86 - default n + depends on X86 help If you have a Tabletkiosk Sahara Touch-iT, say y to enable the backlight driver. config BACKLIGHT_WM831X tristate "WM831x PMIC Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X + depends on MFD_WM831X help If you have a backlight driven by the ISINK and DCDC of a WM831x PMIC say y to enable the backlight driver for it. config BACKLIGHT_ADX tristate "Avionic Design Xanthos Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX + depends on ARCH_PXA_ADX default y help Say Y to enable the backlight driver on Avionic Design Xanthos-based @@ -268,7 +274,7 @@ config BACKLIGHT_ADX config BACKLIGHT_ADP5520 tristate "Backlight Driver for ADP5520/ADP5501 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520 + depends on PMIC_ADP5520 help If you have a LCD backlight connected to the BST/BL_SNK output of ADP5520 or ADP5501, say Y here to enable this driver. @@ -276,9 +282,31 @@ config BACKLIGHT_ADP5520 To compile this driver as a module, choose M here: the module will be called adp5520_bl. +config BACKLIGHT_ADP8860 + tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED" + depends on BACKLIGHT_CLASS_DEVICE && I2C + select NEW_LEDS + select LEDS_CLASS + help + If you have a LCD backlight connected to the ADP8860, ADP8861 or + ADP8863 say Y here to enable this driver. + + To compile this driver as a module, choose M here: the module will + be called adp8860_bl. + config BACKLIGHT_88PM860X tristate "Backlight Driver for 88PM8606 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X + depends on MFD_88PM860X help Say Y to enable the backlight driver for Marvell 88PM8606. +config BACKLIGHT_PCF50633 + tristate "Backlight driver for NXP PCF50633 MFD" + depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633 + help + If you have a backlight driven by a NXP PCF50633 MFD, say Y here to + enable its driver. + +endif # BACKLIGHT_CLASS_DEVICE + +endif # BACKLIGHT_LCD_SUPPORT diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 09d1f14d6257..44c0f81ad85d 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -11,9 +11,11 @@ obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o obj-$(CONFIG_LCD_TDO24M) += tdo24m.o obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o +obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o +obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o @@ -30,5 +32,7 @@ obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o +obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o +obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c new file mode 100644 index 000000000000..921ca37398f3 --- /dev/null +++ b/drivers/video/backlight/adp8860_bl.c @@ -0,0 +1,838 @@ +/* + * Backlight driver for Analog Devices ADP8860 Backlight Devices + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/pm.h> +#include <linux/platform_device.h> +#include <linux/i2c.h> +#include <linux/fb.h> +#include <linux/backlight.h> +#include <linux/leds.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include <linux/i2c/adp8860.h> +#define ADP8860_EXT_FEATURES +#define ADP8860_USE_LEDS + +#define ADP8860_MFDVID 0x00 /* Manufacturer and device ID */ +#define ADP8860_MDCR 0x01 /* Device mode and status */ +#define ADP8860_MDCR2 0x02 /* Device mode and Status Register 2 */ +#define ADP8860_INTR_EN 0x03 /* Interrupts enable */ +#define ADP8860_CFGR 0x04 /* Configuration register */ +#define ADP8860_BLSEN 0x05 /* Sink enable backlight or independent */ +#define ADP8860_BLOFF 0x06 /* Backlight off timeout */ +#define ADP8860_BLDIM 0x07 /* Backlight dim timeout */ +#define ADP8860_BLFR 0x08 /* Backlight fade in and out rates */ +#define ADP8860_BLMX1 0x09 /* Backlight (Brightness Level 1-daylight) maximum current */ +#define ADP8860_BLDM1 0x0A /* Backlight (Brightness Level 1-daylight) dim current */ +#define ADP8860_BLMX2 0x0B /* Backlight (Brightness Level 2-office) maximum current */ +#define ADP8860_BLDM2 0x0C /* Backlight (Brightness Level 2-office) dim current */ +#define ADP8860_BLMX3 0x0D /* Backlight (Brightness Level 3-dark) maximum current */ +#define ADP8860_BLDM3 0x0E /* Backlight (Brightness Level 3-dark) dim current */ +#define ADP8860_ISCFR 0x0F /* Independent sink current fade control register */ +#define ADP8860_ISCC 0x10 /* Independent sink current control register */ +#define ADP8860_ISCT1 0x11 /* Independent Sink Current Timer Register LED[7:5] */ +#define ADP8860_ISCT2 0x12 /* Independent Sink Current Timer Register LED[4:1] */ +#define ADP8860_ISCF 0x13 /* Independent sink current fade register */ +#define ADP8860_ISC7 0x14 /* Independent Sink Current LED7 */ +#define ADP8860_ISC6 0x15 /* Independent Sink Current LED6 */ +#define ADP8860_ISC5 0x16 /* Independent Sink Current LED5 */ +#define ADP8860_ISC4 0x17 /* Independent Sink Current LED4 */ +#define ADP8860_ISC3 0x18 /* Independent Sink Current LED3 */ +#define ADP8860_ISC2 0x19 /* Independent Sink Current LED2 */ +#define ADP8860_ISC1 0x1A /* Independent Sink Current LED1 */ +#define ADP8860_CCFG 0x1B /* Comparator configuration */ +#define ADP8860_CCFG2 0x1C /* Second comparator configuration */ +#define ADP8860_L2_TRP 0x1D /* L2 comparator reference */ +#define ADP8860_L2_HYS 0x1E /* L2 hysteresis */ +#define ADP8860_L3_TRP 0x1F /* L3 comparator reference */ +#define ADP8860_L3_HYS 0x20 /* L3 hysteresis */ +#define ADP8860_PH1LEVL 0x21 /* First phototransistor ambient light level-low byte register */ +#define ADP8860_PH1LEVH 0x22 /* First phototransistor ambient light level-high byte register */ +#define ADP8860_PH2LEVL 0x23 /* Second phototransistor ambient light level-low byte register */ +#define ADP8860_PH2LEVH 0x24 /* Second phototransistor ambient light level-high byte register */ + +#define ADP8860_MANUFID 0x0 /* Analog Devices ADP8860 Manufacturer ID */ +#define ADP8861_MANUFID 0x4 /* Analog Devices ADP8861 Manufacturer ID */ +#define ADP8863_MANUFID 0x2 /* Analog Devices ADP8863 Manufacturer ID */ + +#define ADP8860_DEVID(x) ((x) & 0xF) +#define ADP8860_MANID(x) ((x) >> 4) + +/* MDCR Device mode and status */ +#define INT_CFG (1 << 6) +#define NSTBY (1 << 5) +#define DIM_EN (1 << 4) +#define GDWN_DIS (1 << 3) +#define SIS_EN (1 << 2) +#define CMP_AUTOEN (1 << 1) +#define BLEN (1 << 0) + +/* ADP8860_CCFG Main ALS comparator level enable */ +#define L3_EN (1 << 1) +#define L2_EN (1 << 0) + +#define CFGR_BLV_SHIFT 3 +#define CFGR_BLV_MASK 0x3 +#define ADP8860_FLAG_LED_MASK 0xFF + +#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) +#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1)) +#define ALS_CCFG_VAL(filt) ((0x7 & filt) << 5) + +enum { + adp8860, + adp8861, + adp8863 +}; + +struct adp8860_led { + struct led_classdev cdev; + struct work_struct work; + struct i2c_client *client; + enum led_brightness new_brightness; + int id; + int flags; +}; + +struct adp8860_bl { + struct i2c_client *client; + struct backlight_device *bl; + struct adp8860_led *led; + struct adp8860_backlight_platform_data *pdata; + struct mutex lock; + unsigned long cached_daylight_max; + int id; + int revid; + int current_brightness; + unsigned en_ambl_sens:1; + unsigned gdwn_dis:1; +}; + +static int adp8860_read(struct i2c_client *client, int reg, uint8_t *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, reg); + if (ret < 0) { + dev_err(&client->dev, "failed reading at 0x%02x\n", reg); + return ret; + } + + *val = (uint8_t)ret; + return 0; +} + +static int adp8860_write(struct i2c_client *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + uint8_t reg_val; + int ret; + + mutex_lock(&data->lock); + + ret = adp8860_read(client, reg, ®_val); + + if (!ret && ((reg_val & bit_mask) == 0)) { + reg_val |= bit_mask; + ret = adp8860_write(client, reg, reg_val); + } + + mutex_unlock(&data->lock); + return ret; +} + +static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + uint8_t reg_val; + int ret; + + mutex_lock(&data->lock); + + ret = adp8860_read(client, reg, ®_val); + + if (!ret && (reg_val & bit_mask)) { + reg_val &= ~bit_mask; + ret = adp8860_write(client, reg, reg_val); + } + + mutex_unlock(&data->lock); + return ret; +} + +/* + * Independent sink / LED + */ +#if defined(ADP8860_USE_LEDS) +static void adp8860_led_work(struct work_struct *work) +{ + struct adp8860_led *led = container_of(work, struct adp8860_led, work); + adp8860_write(led->client, ADP8860_ISC1 - led->id + 1, + led->new_brightness >> 1); +} + +static void adp8860_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct adp8860_led *led; + + led = container_of(led_cdev, struct adp8860_led, cdev); + led->new_brightness = value; + schedule_work(&led->work); +} + +static int adp8860_led_setup(struct adp8860_led *led) +{ + struct i2c_client *client = led->client; + int ret = 0; + + ret = adp8860_write(client, ADP8860_ISC1 - led->id + 1, 0); + ret |= adp8860_set_bits(client, ADP8860_ISCC, 1 << (led->id - 1)); + + if (led->id > 4) + ret |= adp8860_set_bits(client, ADP8860_ISCT1, + (led->flags & 0x3) << ((led->id - 5) * 2)); + else + ret |= adp8860_set_bits(client, ADP8860_ISCT2, + (led->flags & 0x3) << ((led->id - 1) * 2)); + + return ret; +} + +static int __devinit adp8860_led_probe(struct i2c_client *client) +{ + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct adp8860_bl *data = i2c_get_clientdata(client); + struct adp8860_led *led, *led_dat; + struct led_info *cur_led; + int ret, i; + + led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); + if (led == NULL) { + dev_err(&client->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + + ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law); + ret = adp8860_write(client, ADP8860_ISCT1, + (pdata->led_on_time & 0x3) << 6); + ret |= adp8860_write(client, ADP8860_ISCF, + FADE_VAL(pdata->led_fade_in, pdata->led_fade_out)); + + if (ret) { + dev_err(&client->dev, "failed to write\n"); + goto err_free; + } + + for (i = 0; i < pdata->num_leds; ++i) { + cur_led = &pdata->leds[i]; + led_dat = &led[i]; + + led_dat->id = cur_led->flags & ADP8860_FLAG_LED_MASK; + + if (led_dat->id > 7 || led_dat->id < 1) { + dev_err(&client->dev, "Invalid LED ID %d\n", + led_dat->id); + goto err; + } + + if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) { + dev_err(&client->dev, "LED %d used by Backlight\n", + led_dat->id); + goto err; + } + + led_dat->cdev.name = cur_led->name; + led_dat->cdev.default_trigger = cur_led->default_trigger; + led_dat->cdev.brightness_set = adp8860_led_set; + led_dat->cdev.brightness = LED_OFF; + led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT; + led_dat->client = client; + led_dat->new_brightness = LED_OFF; + INIT_WORK(&led_dat->work, adp8860_led_work); + + ret = led_classdev_register(&client->dev, &led_dat->cdev); + if (ret) { + dev_err(&client->dev, "failed to register LED %d\n", + led_dat->id); + goto err; + } + + ret = adp8860_led_setup(led_dat); + if (ret) { + dev_err(&client->dev, "failed to write\n"); + i++; + goto err; + } + } + + data->led = led; + + return 0; + + err: + for (i = i - 1; i >= 0; --i) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + + err_free: + kfree(led); + + return ret; +} + +static int __devexit adp8860_led_remove(struct i2c_client *client) +{ + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct adp8860_bl *data = i2c_get_clientdata(client); + int i; + + for (i = 0; i < pdata->num_leds; i++) { + led_classdev_unregister(&data->led[i].cdev); + cancel_work_sync(&data->led[i].work); + } + + kfree(data->led); + return 0; +} +#else +static int __devinit adp8860_led_probe(struct i2c_client *client) +{ + return 0; +} + +static int __devexit adp8860_led_remove(struct i2c_client *client) +{ + return 0; +} +#endif + +static int adp8860_bl_set(struct backlight_device *bl, int brightness) +{ + struct adp8860_bl *data = bl_get_data(bl); + struct i2c_client *client = data->client; + int ret = 0; + + if (data->en_ambl_sens) { + if ((brightness > 0) && (brightness < ADP8860_MAX_BRIGHTNESS)) { + /* Disable Ambient Light auto adjust */ + ret |= adp8860_clr_bits(client, ADP8860_MDCR, + CMP_AUTOEN); + ret |= adp8860_write(client, ADP8860_BLMX1, brightness); + } else { + /* + * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust + * restore daylight l1 sysfs brightness + */ + ret |= adp8860_write(client, ADP8860_BLMX1, + data->cached_daylight_max); + ret |= adp8860_set_bits(client, ADP8860_MDCR, + CMP_AUTOEN); + } + } else + ret |= adp8860_write(client, ADP8860_BLMX1, brightness); + + if (data->current_brightness && brightness == 0) + ret |= adp8860_set_bits(client, + ADP8860_MDCR, DIM_EN); + else if (data->current_brightness == 0 && brightness) + ret |= adp8860_clr_bits(client, + ADP8860_MDCR, DIM_EN); + + if (!ret) + data->current_brightness = brightness; + + return ret; +} + +static int adp8860_bl_update_status(struct backlight_device *bl) +{ + int brightness = bl->props.brightness; + if (bl->props.power != FB_BLANK_UNBLANK) + brightness = 0; + + if (bl->props.fb_blank != FB_BLANK_UNBLANK) + brightness = 0; + + return adp8860_bl_set(bl, brightness); +} + +static int adp8860_bl_get_brightness(struct backlight_device *bl) +{ + struct adp8860_bl *data = bl_get_data(bl); + + return data->current_brightness; +} + +static const struct backlight_ops adp8860_bl_ops = { + .update_status = adp8860_bl_update_status, + .get_brightness = adp8860_bl_get_brightness, +}; + +static int adp8860_bl_setup(struct backlight_device *bl) +{ + struct adp8860_bl *data = bl_get_data(bl); + struct i2c_client *client = data->client; + struct adp8860_backlight_platform_data *pdata = data->pdata; + int ret = 0; + + ret |= adp8860_write(client, ADP8860_BLSEN, ~pdata->bl_led_assign); + ret |= adp8860_write(client, ADP8860_BLMX1, pdata->l1_daylight_max); + ret |= adp8860_write(client, ADP8860_BLDM1, pdata->l1_daylight_dim); + + if (data->en_ambl_sens) { + data->cached_daylight_max = pdata->l1_daylight_max; + ret |= adp8860_write(client, ADP8860_BLMX2, + pdata->l2_office_max); + ret |= adp8860_write(client, ADP8860_BLDM2, + pdata->l2_office_dim); + ret |= adp8860_write(client, ADP8860_BLMX3, + pdata->l3_dark_max); + ret |= adp8860_write(client, ADP8860_BLDM3, + pdata->l3_dark_dim); + + ret |= adp8860_write(client, ADP8860_L2_TRP, pdata->l2_trip); + ret |= adp8860_write(client, ADP8860_L2_HYS, pdata->l2_hyst); + ret |= adp8860_write(client, ADP8860_L3_TRP, pdata->l3_trip); + ret |= adp8860_write(client, ADP8860_L3_HYS, pdata->l3_hyst); + ret |= adp8860_write(client, ADP8860_CCFG, L2_EN | L3_EN | + ALS_CCFG_VAL(pdata->abml_filt)); + } + + ret |= adp8860_write(client, ADP8860_CFGR, + BL_CFGR_VAL(pdata->bl_fade_law, 0)); + + ret |= adp8860_write(client, ADP8860_BLFR, FADE_VAL(pdata->bl_fade_in, + pdata->bl_fade_out)); + + ret |= adp8860_set_bits(client, ADP8860_MDCR, BLEN | DIM_EN | NSTBY | + (data->gdwn_dis ? GDWN_DIS : 0)); + + return ret; +} + +static ssize_t adp8860_show(struct device *dev, char *buf, int reg) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, reg, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + return sprintf(buf, "%u\n", reg_val); +} + +static ssize_t adp8860_store(struct device *dev, const char *buf, + size_t count, int reg) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + mutex_lock(&data->lock); + adp8860_write(data->client, reg, val); + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t adp8860_bl_l3_dark_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX3); +} + +static ssize_t adp8860_bl_l3_dark_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLMX3); +} + +static DEVICE_ATTR(l3_dark_max, 0664, adp8860_bl_l3_dark_max_show, + adp8860_bl_l3_dark_max_store); + +static ssize_t adp8860_bl_l2_office_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX2); +} + +static ssize_t adp8860_bl_l2_office_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLMX2); +} +static DEVICE_ATTR(l2_office_max, 0664, adp8860_bl_l2_office_max_show, + adp8860_bl_l2_office_max_store); + +static ssize_t adp8860_bl_l1_daylight_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX1); +} + +static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + + strict_strtoul(buf, 10, &data->cached_daylight_max); + return adp8860_store(dev, buf, count, ADP8860_BLMX1); +} +static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, + adp8860_bl_l1_daylight_max_store); + +static ssize_t adp8860_bl_l3_dark_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM3); +} + +static ssize_t adp8860_bl_l3_dark_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM3); +} +static DEVICE_ATTR(l3_dark_dim, 0664, adp8860_bl_l3_dark_dim_show, + adp8860_bl_l3_dark_dim_store); + +static ssize_t adp8860_bl_l2_office_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM2); +} + +static ssize_t adp8860_bl_l2_office_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM2); +} +static DEVICE_ATTR(l2_office_dim, 0664, adp8860_bl_l2_office_dim_show, + adp8860_bl_l2_office_dim_store); + +static ssize_t adp8860_bl_l1_daylight_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM1); +} + +static ssize_t adp8860_bl_l1_daylight_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM1); +} +static DEVICE_ATTR(l1_daylight_dim, 0664, adp8860_bl_l1_daylight_dim_show, + adp8860_bl_l1_daylight_dim_store); + +#ifdef ADP8860_EXT_FEATURES +static ssize_t adp8860_bl_ambient_light_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + uint16_t ret_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, ADP8860_PH1LEVL, ®_val); + ret_val = reg_val; + error |= adp8860_read(data->client, ADP8860_PH1LEVH, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + /* Return 13-bit conversion value for the first light sensor */ + ret_val += (reg_val & 0x1F) << 8; + + return sprintf(buf, "%u\n", ret_val); +} +static DEVICE_ATTR(ambient_light_level, 0444, + adp8860_bl_ambient_light_level_show, NULL); + +static ssize_t adp8860_bl_ambient_light_zone_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, ADP8860_CFGR, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + return sprintf(buf, "%u\n", + ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1); +} + +static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + unsigned long val; + uint8_t reg_val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + if (val == 0) { + /* Enable automatic ambient light sensing */ + adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); + } else if ((val > 0) && (val < 6)) { + /* Disable automatic ambient light sensing */ + adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); + + /* Set user supplied ambient light zone */ + mutex_lock(&data->lock); + adp8860_read(data->client, ADP8860_CFGR, ®_val); + reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); + reg_val |= val << CFGR_BLV_SHIFT; + adp8860_write(data->client, ADP8860_CFGR, reg_val); + mutex_unlock(&data->lock); + } + + return count; +} +static DEVICE_ATTR(ambient_light_zone, 0664, + adp8860_bl_ambient_light_zone_show, + adp8860_bl_ambient_light_zone_store); +#endif + +static struct attribute *adp8860_bl_attributes[] = { + &dev_attr_l3_dark_max.attr, + &dev_attr_l3_dark_dim.attr, + &dev_attr_l2_office_max.attr, + &dev_attr_l2_office_dim.attr, + &dev_attr_l1_daylight_max.attr, + &dev_attr_l1_daylight_dim.attr, +#ifdef ADP8860_EXT_FEATURES + &dev_attr_ambient_light_level.attr, + &dev_attr_ambient_light_zone.attr, +#endif + NULL +}; + +static const struct attribute_group adp8860_bl_attr_group = { + .attrs = adp8860_bl_attributes, +}; + +static int __devinit adp8860_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct backlight_device *bl; + struct adp8860_bl *data; + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct backlight_properties props; + uint8_t reg_val; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); + return -EIO; + } + + if (!pdata) { + dev_err(&client->dev, "no platform data?\n"); + return -EINVAL; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + ret = adp8860_read(client, ADP8860_MFDVID, ®_val); + if (ret < 0) + goto out2; + + switch (ADP8860_MANID(reg_val)) { + case ADP8863_MANUFID: + data->gdwn_dis = !!pdata->gdwn_dis; + case ADP8860_MANUFID: + data->en_ambl_sens = !!pdata->en_ambl_sens; + break; + case ADP8861_MANUFID: + data->gdwn_dis = !!pdata->gdwn_dis; + break; + default: + dev_err(&client->dev, "failed to probe\n"); + ret = -ENODEV; + goto out2; + } + + /* It's confirmed that the DEVID field is actually a REVID */ + + data->revid = ADP8860_DEVID(reg_val); + data->client = client; + data->pdata = pdata; + data->id = id->driver_data; + data->current_brightness = 0; + i2c_set_clientdata(client, data); + + memset(&props, 0, sizeof(props)); + props.max_brightness = ADP8860_MAX_BRIGHTNESS; + + mutex_init(&data->lock); + + bl = backlight_device_register(dev_driver_string(&client->dev), + &client->dev, data, &adp8860_bl_ops, &props); + if (IS_ERR(bl)) { + dev_err(&client->dev, "failed to register backlight\n"); + ret = PTR_ERR(bl); + goto out2; + } + + bl->props.max_brightness = + bl->props.brightness = ADP8860_MAX_BRIGHTNESS; + + data->bl = bl; + + if (data->en_ambl_sens) + ret = sysfs_create_group(&bl->dev.kobj, + &adp8860_bl_attr_group); + + if (ret) { + dev_err(&client->dev, "failed to register sysfs\n"); + goto out1; + } + + ret = adp8860_bl_setup(bl); + if (ret) { + ret = -EIO; + goto out; + } + + backlight_update_status(bl); + + dev_info(&client->dev, "%s Rev.%d Backlight\n", + client->name, data->revid); + + if (pdata->num_leds) + adp8860_led_probe(client); + + return 0; + +out: + if (data->en_ambl_sens) + sysfs_remove_group(&data->bl->dev.kobj, + &adp8860_bl_attr_group); +out1: + backlight_device_unregister(bl); +out2: + i2c_set_clientdata(client, NULL); + kfree(data); + + return ret; +} + +static int __devexit adp8860_remove(struct i2c_client *client) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + + adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); + + if (data->led) + adp8860_led_remove(client); + + if (data->en_ambl_sens) + sysfs_remove_group(&data->bl->dev.kobj, + &adp8860_bl_attr_group); + + backlight_device_unregister(data->bl); + i2c_set_clientdata(client, NULL); + kfree(data); + + return 0; +} + +#ifdef CONFIG_PM +static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) +{ + adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); + + return 0; +} + +static int adp8860_i2c_resume(struct i2c_client *client) +{ + adp8860_set_bits(client, ADP8860_MDCR, NSTBY); + + return 0; +} +#else +#define adp8860_i2c_suspend NULL +#define adp8860_i2c_resume NULL +#endif + +static const struct i2c_device_id adp8860_id[] = { + { "adp8860", adp8860 }, + { "adp8861", adp8861 }, + { "adp8863", adp8863 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adp8860_id); + +static struct i2c_driver adp8860_driver = { + .driver = { + .name = KBUILD_MODNAME, + }, + .probe = adp8860_probe, + .remove = __devexit_p(adp8860_remove), + .suspend = adp8860_i2c_suspend, + .resume = adp8860_i2c_resume, + .id_table = adp8860_id, +}; + +static int __init adp8860_init(void) +{ + return i2c_add_driver(&adp8860_driver); +} +module_init(adp8860_init); + +static void __exit adp8860_exit(void) +{ + i2c_del_driver(&adp8860_driver); +} +module_exit(adp8860_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("ADP8860 Backlight driver"); +MODULE_ALIAS("i2c:adp8860-backlight"); diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c index 7f4a7c30a98b..fe9af129c5dd 100644 --- a/drivers/video/backlight/adx_bl.c +++ b/drivers/video/backlight/adx_bl.c @@ -107,8 +107,8 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev) props.max_brightness = 0xff; bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl, &adx_backlight_ops, &props); - if (!bldev) { - ret = -ENOMEM; + if (IS_ERR(bldev)) { + ret = PTR_ERR(bldev); goto out; } diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c new file mode 100644 index 000000000000..b0cc49184803 --- /dev/null +++ b/drivers/video/backlight/ep93xx_bl.c @@ -0,0 +1,160 @@ +/* + * Driver for the Cirrus EP93xx lcd backlight + * + * Copyright (c) 2010 H Hartley Sweeten <hsweeten@visionengravers.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This driver controls the pulse width modulated brightness control output, + * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. + */ + + +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/fb.h> +#include <linux/backlight.h> + +#include <mach/hardware.h> + +#define EP93XX_RASTER_REG(x) (EP93XX_RASTER_BASE + (x)) +#define EP93XX_RASTER_BRIGHTNESS EP93XX_RASTER_REG(0x20) + +#define EP93XX_MAX_COUNT 255 +#define EP93XX_MAX_BRIGHT 255 +#define EP93XX_DEF_BRIGHT 128 + +struct ep93xxbl { + void __iomem *mmio; + int brightness; +}; + +static int ep93xxbl_set(struct backlight_device *bl, int brightness) +{ + struct ep93xxbl *ep93xxbl = bl_get_data(bl); + + __raw_writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio); + + ep93xxbl->brightness = brightness; + + return 0; +} + +static int ep93xxbl_update_status(struct backlight_device *bl) +{ + int brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK || + bl->props.fb_blank != FB_BLANK_UNBLANK) + brightness = 0; + + return ep93xxbl_set(bl, brightness); +} + +static int ep93xxbl_get_brightness(struct backlight_device *bl) +{ + struct ep93xxbl *ep93xxbl = bl_get_data(bl); + + return ep93xxbl->brightness; +} + +static const struct backlight_ops ep93xxbl_ops = { + .update_status = ep93xxbl_update_status, + .get_brightness = ep93xxbl_get_brightness, +}; + +static int __init ep93xxbl_probe(struct platform_device *dev) +{ + struct ep93xxbl *ep93xxbl; + struct backlight_device *bl; + struct backlight_properties props; + + ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL); + if (!ep93xxbl) + return -ENOMEM; + + /* + * This register is located in the range already ioremap'ed by + * the framebuffer driver. A MFD driver seems a bit of overkill + * to handle this so use the static I/O mapping; this address + * is already virtual. + * + * NOTE: No locking is required; the framebuffer does not touch + * this register. + */ + ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = EP93XX_MAX_BRIGHT; + bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl, + &ep93xxbl_ops, &props); + if (IS_ERR(bl)) + return PTR_ERR(bl); + + bl->props.brightness = EP93XX_DEF_BRIGHT; + + platform_set_drvdata(dev, bl); + + ep93xxbl_update_status(bl); + + return 0; +} + +static int ep93xxbl_remove(struct platform_device *dev) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + backlight_device_unregister(bl); + platform_set_drvdata(dev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int ep93xxbl_suspend(struct platform_device *dev, pm_message_t state) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + return ep93xxbl_set(bl, 0); +} + +static int ep93xxbl_resume(struct platform_device *dev) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + backlight_update_status(bl); + return 0; +} +#else +#define ep93xxbl_suspend NULL +#define ep93xxbl_resume NULL +#endif + +static struct platform_driver ep93xxbl_driver = { + .driver = { + .name = "ep93xx-bl", + .owner = THIS_MODULE, + }, + .probe = ep93xxbl_probe, + .remove = __devexit_p(ep93xxbl_remove), + .suspend = ep93xxbl_suspend, + .resume = ep93xxbl_resume, +}; + +static int __init ep93xxbl_init(void) +{ + return platform_driver_register(&ep93xxbl_driver); +} +module_init(ep93xxbl_init); + +static void __exit ep93xxbl_exit(void) +{ + platform_driver_unregister(&ep93xxbl_driver); +} +module_exit(ep93xxbl_exit); + +MODULE_DESCRIPTION("EP93xx Backlight Driver"); +MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-bl"); diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index bcdb12c93efd..9093ef0fa869 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -125,8 +125,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (priv == NULL) { dev_err(&spi->dev, "No memory for this device.\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } dev_set_drvdata(&spi->dev, priv); @@ -139,7 +138,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 reset gpio.\n"); - return ret; + goto err; } ret = gpio_direction_output(pdata->reset_gpio, 1); @@ -151,7 +150,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 data en gpio.\n"); - return ret; + goto err2; } ret = gpio_direction_output(pdata->data_enable_gpio, 0); @@ -222,9 +221,9 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi) gpio_free(pdata->reset_gpio); if (priv->io_reg) - regulator_put(priv->core_reg); - if (priv->core_reg) regulator_put(priv->io_reg); + if (priv->core_reg) + regulator_put(priv->core_reg); kfree(priv); diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index b5accc957ad3..b2b2c7ba1f63 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -162,6 +162,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev) backlight_update_status(bl); return 0; out: + backlight_device_unregister(bl); kfree(data); return ret; } diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 1b5d3fe6bbbc..9fb533f6373e 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -141,7 +141,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { .callback = mbp_dmi_match, .ident = "MacBook 1,1", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), }, .driver_data = (void *)&intel_chipset_data, @@ -184,6 +184,42 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, { .callback = mbp_dmi_match, + .ident = "MacBookPro 1,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 1,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 2,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 2,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, .ident = "MacBookPro 3,1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), @@ -238,6 +274,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, { .callback = mbp_dmi_match, + .ident = "MacBook 6,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, + { + .callback = mbp_dmi_match, .ident = "MacBookAir 2,1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c new file mode 100644 index 000000000000..3c424f7efdcc --- /dev/null +++ b/drivers/video/backlight/pcf50633-backlight.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/platform_device.h> + +#include <linux/backlight.h> +#include <linux/fb.h> + +#include <linux/mfd/pcf50633/core.h> +#include <linux/mfd/pcf50633/backlight.h> + +struct pcf50633_bl { + struct pcf50633 *pcf; + struct backlight_device *bl; + + unsigned int brightness; + unsigned int brightness_limit; +}; + +/* + * pcf50633_bl_set_brightness_limit + * + * Update the brightness limit for the pc50633 backlight. The actual brightness + * will not go above the limit. This is useful to limit power drain for example + * on low battery. + * + * @dev: Pointer to a pcf50633 device + * @limit: The brightness limit. Valid values are 0-63 + */ +int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit) +{ + struct pcf50633_bl *pcf_bl = platform_get_drvdata(pcf->bl_pdev); + + if (!pcf_bl) + return -ENODEV; + + pcf_bl->brightness_limit = limit & 0x3f; + backlight_update_status(pcf_bl->bl); + + return 0; +} + +static int pcf50633_bl_update_status(struct backlight_device *bl) +{ + struct pcf50633_bl *pcf_bl = bl_get_data(bl); + unsigned int new_brightness; + + + if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) || + bl->props.power != FB_BLANK_UNBLANK) + new_brightness = 0; + else if (bl->props.brightness < pcf_bl->brightness_limit) + new_brightness = bl->props.brightness; + else + new_brightness = pcf_bl->brightness_limit; + + + if (pcf_bl->brightness == new_brightness) + return 0; + + if (new_brightness) { + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDOUT, + new_brightness); + if (!pcf_bl->brightness) + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 1); + } else { + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 0); + } + + pcf_bl->brightness = new_brightness; + + return 0; +} + +static int pcf50633_bl_get_brightness(struct backlight_device *bl) +{ + struct pcf50633_bl *pcf_bl = bl_get_data(bl); + return pcf_bl->brightness; +} + +static const struct backlight_ops pcf50633_bl_ops = { + .get_brightness = pcf50633_bl_get_brightness, + .update_status = pcf50633_bl_update_status, + .options = BL_CORE_SUSPENDRESUME, +}; + +static int __devinit pcf50633_bl_probe(struct platform_device *pdev) +{ + int ret; + struct pcf50633_bl *pcf_bl; + struct device *parent = pdev->dev.parent; + struct pcf50633_platform_data *pcf50633_data = parent->platform_data; + struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data; + struct backlight_properties bl_props; + + pcf_bl = kzalloc(sizeof(*pcf_bl), GFP_KERNEL); + if (!pcf_bl) + return -ENOMEM; + + bl_props.max_brightness = 0x3f; + bl_props.power = FB_BLANK_UNBLANK; + + if (pdata) { + bl_props.brightness = pdata->default_brightness; + pcf_bl->brightness_limit = pdata->default_brightness_limit; + } else { + bl_props.brightness = 0x3f; + pcf_bl->brightness_limit = 0x3f; + } + + pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent); + + pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl, + &pcf50633_bl_ops, &bl_props); + + if (IS_ERR(pcf_bl->bl)) { + ret = PTR_ERR(pcf_bl->bl); + goto err_free; + } + + platform_set_drvdata(pdev, pcf_bl); + + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time); + + /* Should be different from bl_props.brightness, so we do not exit + * update_status early the first time it's called */ + pcf_bl->brightness = pcf_bl->bl->props.brightness + 1; + + backlight_update_status(pcf_bl->bl); + + return 0; + +err_free: + kfree(pcf_bl); + + return ret; +} + +static int __devexit pcf50633_bl_remove(struct platform_device *pdev) +{ + struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev); + + backlight_device_unregister(pcf_bl->bl); + + platform_set_drvdata(pdev, NULL); + + kfree(pcf_bl); + + return 0; +} + +static struct platform_driver pcf50633_bl_driver = { + .probe = pcf50633_bl_probe, + .remove = __devexit_p(pcf50633_bl_remove), + .driver = { + .name = "pcf50633-backlight", + }, +}; + +static int __init pcf50633_bl_init(void) +{ + return platform_driver_register(&pcf50633_bl_driver); +} +module_init(pcf50633_bl_init); + +static void __exit pcf50633_bl_exit(void) +{ + platform_driver_unregister(&pcf50633_bl_driver); +} +module_exit(pcf50633_bl_exit); + +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("PCF50633 backlight driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pcf50633-backlight"); diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c new file mode 100644 index 000000000000..a3128c9cb7ad --- /dev/null +++ b/drivers/video/backlight/s6e63m0.c @@ -0,0 +1,920 @@ +/* + * S6E63M0 AMOLED LCD panel driver. + * + * Author: InKi Dae <inki.dae@samsung.com> + * + * Derived from drivers/video/omap/lcd-apollon.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/wait.h> +#include <linux/fb.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/spi/spi.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/lcd.h> +#include <linux/backlight.h> + +#include "s6e63m0_gamma.h" + +#define SLEEPMSEC 0x1000 +#define ENDDEF 0x2000 +#define DEFMASK 0xFF00 +#define COMMAND_ONLY 0xFE +#define DATA_ONLY 0xFF + +#define MIN_BRIGHTNESS 0 +#define MAX_BRIGHTNESS 10 + +#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) + +struct s6e63m0 { + struct device *dev; + struct spi_device *spi; + unsigned int power; + unsigned int current_brightness; + unsigned int gamma_mode; + unsigned int gamma_table_count; + struct lcd_device *ld; + struct backlight_device *bd; + struct lcd_platform_data *lcd_pd; +}; + +static const unsigned short SEQ_PANEL_CONDITION_SET[] = { + 0xF8, 0x01, + DATA_ONLY, 0x27, + DATA_ONLY, 0x27, + DATA_ONLY, 0x07, + DATA_ONLY, 0x07, + DATA_ONLY, 0x54, + DATA_ONLY, 0x9f, + DATA_ONLY, 0x63, + DATA_ONLY, 0x86, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x33, + DATA_ONLY, 0x0d, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = { + 0xf2, 0x02, + DATA_ONLY, 0x03, + DATA_ONLY, 0x1c, + DATA_ONLY, 0x10, + DATA_ONLY, 0x10, + + 0xf7, 0x03, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_GAMMA_SETTING[] = { + 0xfa, 0x00, + DATA_ONLY, 0x18, + DATA_ONLY, 0x08, + DATA_ONLY, 0x24, + DATA_ONLY, 0x64, + DATA_ONLY, 0x56, + DATA_ONLY, 0x33, + DATA_ONLY, 0xb6, + DATA_ONLY, 0xba, + DATA_ONLY, 0xa8, + DATA_ONLY, 0xac, + DATA_ONLY, 0xb1, + DATA_ONLY, 0x9d, + DATA_ONLY, 0xc1, + DATA_ONLY, 0xc1, + DATA_ONLY, 0xb7, + DATA_ONLY, 0x00, + DATA_ONLY, 0x9c, + DATA_ONLY, 0x00, + DATA_ONLY, 0x9f, + DATA_ONLY, 0x00, + DATA_ONLY, 0xd6, + + 0xfa, 0x01, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ETC_CONDITION_SET[] = { + 0xf6, 0x00, + DATA_ONLY, 0x8c, + DATA_ONLY, 0x07, + + 0xb3, 0xc, + + 0xb5, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xb6, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xb7, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xb8, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xb9, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xba, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xc1, 0x4d, + DATA_ONLY, 0x96, + DATA_ONLY, 0x1d, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x01, + DATA_ONLY, 0xdf, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x03, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x03, + DATA_ONLY, 0x06, + DATA_ONLY, 0x09, + DATA_ONLY, 0x0d, + DATA_ONLY, 0x0f, + DATA_ONLY, 0x12, + DATA_ONLY, 0x15, + DATA_ONLY, 0x18, + + 0xb2, 0x10, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0b, + DATA_ONLY, 0x05, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ACL_ON[] = { + /* ACL on */ + 0xc0, 0x01, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ACL_OFF[] = { + /* ACL off */ + 0xc0, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ELVSS_ON[] = { + /* ELVSS on */ + 0xb1, 0x0b, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ELVSS_OFF[] = { + /* ELVSS off */ + 0xb1, 0x0a, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_STAND_BY_OFF[] = { + 0x11, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_STAND_BY_ON[] = { + 0x10, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_DISPLAY_ON[] = { + 0x29, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + + +static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data) +{ + u16 buf[1]; + struct spi_message msg; + + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + }; + + buf[0] = (addr << 8) | data; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(lcd->spi, &msg); +} + +static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address, + unsigned char command) +{ + int ret = 0; + + if (address != DATA_ONLY) + ret = s6e63m0_spi_write_byte(lcd, 0x0, address); + if (command != COMMAND_ONLY) + ret = s6e63m0_spi_write_byte(lcd, 0x1, command); + + return ret; +} + +static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd, + const unsigned short *wbuf) +{ + int ret = 0, i = 0; + + while ((wbuf[i] & DEFMASK) != ENDDEF) { + if ((wbuf[i] & DEFMASK) != SLEEPMSEC) { + ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]); + if (ret) + break; + } else + udelay(wbuf[i+1]*1000); + i += 2; + } + + return ret; +} + +static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma) +{ + unsigned int i = 0; + int ret = 0; + + /* disable gamma table updating. */ + ret = s6e63m0_spi_write(lcd, 0xfa, 0x00); + if (ret) { + dev_err(lcd->dev, "failed to disable gamma table updating.\n"); + goto gamma_err; + } + + for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) { + ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]); + if (ret) { + dev_err(lcd->dev, "failed to set gamma table.\n"); + goto gamma_err; + } + } + + /* update gamma table. */ + ret = s6e63m0_spi_write(lcd, 0xfa, 0x01); + if (ret) + dev_err(lcd->dev, "failed to update gamma table.\n"); + +gamma_err: + return ret; +} + +static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma) +{ + int ret = 0; + + ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]); + + return ret; +} + + +static int s6e63m0_ldi_init(struct s6e63m0 *lcd) +{ + int ret, i; + const unsigned short *init_seq[] = { + SEQ_PANEL_CONDITION_SET, + SEQ_DISPLAY_CONDITION_SET, + SEQ_GAMMA_SETTING, + SEQ_ETC_CONDITION_SET, + SEQ_ACL_ON, + SEQ_ELVSS_ON, + }; + + for (i = 0; i < ARRAY_SIZE(init_seq); i++) { + ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]); + if (ret) + break; + } + + return ret; +} + +static int s6e63m0_ldi_enable(struct s6e63m0 *lcd) +{ + int ret = 0, i; + const unsigned short *enable_seq[] = { + SEQ_STAND_BY_OFF, + SEQ_DISPLAY_ON, + }; + + for (i = 0; i < ARRAY_SIZE(enable_seq); i++) { + ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]); + if (ret) + break; + } + + return ret; +} + +static int s6e63m0_ldi_disable(struct s6e63m0 *lcd) +{ + int ret; + + ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON); + + return ret; +} + +static int s6e63m0_power_on(struct s6e63m0 *lcd) +{ + int ret = 0; + struct lcd_platform_data *pd = NULL; + struct backlight_device *bd = NULL; + + pd = lcd->lcd_pd; + if (!pd) { + dev_err(lcd->dev, "platform data is NULL.\n"); + return -EFAULT; + } + + bd = lcd->bd; + if (!bd) { + dev_err(lcd->dev, "backlight device is NULL.\n"); + return -EFAULT; + } + + if (!pd->power_on) { + dev_err(lcd->dev, "power_on is NULL.\n"); + return -EFAULT; + } else { + pd->power_on(lcd->ld, 1); + mdelay(pd->power_on_delay); + } + + if (!pd->reset) { + dev_err(lcd->dev, "reset is NULL.\n"); + return -EFAULT; + } else { + pd->reset(lcd->ld); + mdelay(pd->reset_delay); + } + + ret = s6e63m0_ldi_init(lcd); + if (ret) { + dev_err(lcd->dev, "failed to initialize ldi.\n"); + return ret; + } + + ret = s6e63m0_ldi_enable(lcd); + if (ret) { + dev_err(lcd->dev, "failed to enable ldi.\n"); + return ret; + } + + /* set brightness to current value after power on or resume. */ + ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness); + if (ret) { + dev_err(lcd->dev, "lcd gamma setting failed.\n"); + return ret; + } + + return 0; +} + +static int s6e63m0_power_off(struct s6e63m0 *lcd) +{ + int ret = 0; + struct lcd_platform_data *pd = NULL; + + pd = lcd->lcd_pd; + if (!pd) { + dev_err(lcd->dev, "platform data is NULL.\n"); + return -EFAULT; + } + + ret = s6e63m0_ldi_disable(lcd); + if (ret) { + dev_err(lcd->dev, "lcd setting failed.\n"); + return -EIO; + } + + mdelay(pd->power_off_delay); + + if (!pd->power_on) { + dev_err(lcd->dev, "power_on is NULL.\n"); + return -EFAULT; + } else + pd->power_on(lcd->ld, 0); + + return 0; +} + +static int s6e63m0_power(struct s6e63m0 *lcd, int power) +{ + int ret = 0; + + if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) + ret = s6e63m0_power_on(lcd); + else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) + ret = s6e63m0_power_off(lcd); + + if (!ret) + lcd->power = power; + + return ret; +} + +static int s6e63m0_set_power(struct lcd_device *ld, int power) +{ + struct s6e63m0 *lcd = lcd_get_data(ld); + + if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && + power != FB_BLANK_NORMAL) { + dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); + return -EINVAL; + } + + return s6e63m0_power(lcd, power); +} + +static int s6e63m0_get_power(struct lcd_device *ld) +{ + struct s6e63m0 *lcd = lcd_get_data(ld); + + return lcd->power; +} + +static int s6e63m0_get_brightness(struct backlight_device *bd) +{ + return bd->props.brightness; +} + +static int s6e63m0_set_brightness(struct backlight_device *bd) +{ + int ret = 0, brightness = bd->props.brightness; + struct s6e63m0 *lcd = bl_get_data(bd); + + if (brightness < MIN_BRIGHTNESS || + brightness > bd->props.max_brightness) { + dev_err(&bd->dev, "lcd brightness should be %d to %d.\n", + MIN_BRIGHTNESS, MAX_BRIGHTNESS); + return -EINVAL; + } + + ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness); + if (ret) { + dev_err(&bd->dev, "lcd brightness setting failed.\n"); + return -EIO; + } + + return ret; +} + +static struct lcd_ops s6e63m0_lcd_ops = { + .set_power = s6e63m0_set_power, + .get_power = s6e63m0_get_power, +}; + +static const struct backlight_ops s6e63m0_backlight_ops = { + .get_brightness = s6e63m0_get_brightness, + .update_status = s6e63m0_set_brightness, +}; + +static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + char temp[10]; + + switch (lcd->gamma_mode) { + case 0: + sprintf(temp, "2.2 mode\n"); + strcat(buf, temp); + break; + case 1: + sprintf(temp, "1.9 mode\n"); + strcat(buf, temp); + break; + case 2: + sprintf(temp, "1.7 mode\n"); + strcat(buf, temp); + break; + default: + dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n"); + break; + } + + return strlen(buf); +} + +static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + struct backlight_device *bd = NULL; + int brightness, rc; + + rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode); + if (rc < 0) + return rc; + + bd = lcd->bd; + + brightness = bd->props.brightness; + + switch (lcd->gamma_mode) { + case 0: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]); + break; + case 1: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]); + break; + case 2: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]); + break; + default: + dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n"); + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]); + break; + } + return len; +} + +static DEVICE_ATTR(gamma_mode, 0644, + s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode); + +static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + char temp[3]; + + sprintf(temp, "%d\n", lcd->gamma_table_count); + strcpy(buf, temp); + + return strlen(buf); +} +static DEVICE_ATTR(gamma_table, 0644, + s6e63m0_sysfs_show_gamma_table, NULL); + +static int __init s6e63m0_probe(struct spi_device *spi) +{ + int ret = 0; + struct s6e63m0 *lcd = NULL; + struct lcd_device *ld = NULL; + struct backlight_device *bd = NULL; + + lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL); + if (!lcd) + return -ENOMEM; + + /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */ + spi->bits_per_word = 9; + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "spi setup failed.\n"); + goto out_free_lcd; + } + + lcd->spi = spi; + lcd->dev = &spi->dev; + + lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data; + if (!lcd->lcd_pd) { + dev_err(&spi->dev, "platform data is NULL.\n"); + goto out_free_lcd; + } + + ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops); + if (IS_ERR(ld)) { + ret = PTR_ERR(ld); + goto out_free_lcd; + } + + lcd->ld = ld; + + bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd, + &s6e63m0_backlight_ops, NULL); + if (IS_ERR(bd)) { + ret = PTR_ERR(bd); + goto out_lcd_unregister; + } + + bd->props.max_brightness = MAX_BRIGHTNESS; + bd->props.brightness = MAX_BRIGHTNESS; + lcd->bd = bd; + + /* + * it gets gamma table count available so it gets user + * know that. + */ + lcd->gamma_table_count = + sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int)); + + ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode); + if (ret < 0) + dev_err(&(spi->dev), "failed to add sysfs entries\n"); + + ret = device_create_file(&(spi->dev), &dev_attr_gamma_table); + if (ret < 0) + dev_err(&(spi->dev), "failed to add sysfs entries\n"); + + /* + * if lcd panel was on from bootloader like u-boot then + * do not lcd on. + */ + if (!lcd->lcd_pd->lcd_enabled) { + /* + * if lcd panel was off from bootloader then + * current lcd status is powerdown and then + * it enables lcd panel. + */ + lcd->power = FB_BLANK_POWERDOWN; + + s6e63m0_power(lcd, FB_BLANK_UNBLANK); + } else + lcd->power = FB_BLANK_UNBLANK; + + dev_set_drvdata(&spi->dev, lcd); + + dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n"); + + return 0; + +out_lcd_unregister: + lcd_device_unregister(ld); +out_free_lcd: + kfree(lcd); + return ret; +} + +static int __devexit s6e63m0_remove(struct spi_device *spi) +{ + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + s6e63m0_power(lcd, FB_BLANK_POWERDOWN); + lcd_device_unregister(lcd->ld); + kfree(lcd); + + return 0; +} + +#if defined(CONFIG_PM) +unsigned int before_power; + +static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg) +{ + int ret = 0; + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power); + + before_power = lcd->power; + + /* + * when lcd panel is suspend, lcd panel becomes off + * regardless of status. + */ + ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN); + + return ret; +} + +static int s6e63m0_resume(struct spi_device *spi) +{ + int ret = 0; + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + /* + * after suspended, if lcd panel status is FB_BLANK_UNBLANK + * (at that time, before_power is FB_BLANK_UNBLANK) then + * it changes that status to FB_BLANK_POWERDOWN to get lcd on. + */ + if (before_power == FB_BLANK_UNBLANK) + lcd->power = FB_BLANK_POWERDOWN; + + dev_dbg(&spi->dev, "before_power = %d\n", before_power); + + ret = s6e63m0_power(lcd, before_power); + + return ret; +} +#else +#define s6e63m0_suspend NULL +#define s6e63m0_resume NULL +#endif + +/* Power down all displays on reboot, poweroff or halt. */ +static void s6e63m0_shutdown(struct spi_device *spi) +{ + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + s6e63m0_power(lcd, FB_BLANK_POWERDOWN); +} + +static struct spi_driver s6e63m0_driver = { + .driver = { + .name = "s6e63m0", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = s6e63m0_probe, + .remove = __devexit_p(s6e63m0_remove), + .shutdown = s6e63m0_shutdown, + .suspend = s6e63m0_suspend, + .resume = s6e63m0_resume, +}; + +static int __init s6e63m0_init(void) +{ + return spi_register_driver(&s6e63m0_driver); +} + +static void __exit s6e63m0_exit(void) +{ + spi_unregister_driver(&s6e63m0_driver); +} + +module_init(s6e63m0_init); +module_exit(s6e63m0_exit); + +MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); +MODULE_DESCRIPTION("S6E63M0 LCD Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h new file mode 100644 index 000000000000..2c44bdb0696b --- /dev/null +++ b/drivers/video/backlight/s6e63m0_gamma.h @@ -0,0 +1,266 @@ +/* linux/drivers/video/samsung/s6e63m0_brightness.h + * + * Gamma level definitions. + * + * Copyright (c) 2009 Samsung Electronics + * InKi Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef _S6E63M0_BRIGHTNESS_H +#define _S6E63M0_BRIGHTNESS_H + +#define MAX_GAMMA_LEVEL 11 +#define GAMMA_TABLE_COUNT 21 + +/* gamma value: 2.2 */ +static const unsigned int s6e63m0_22_300[] = { + 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6, + 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0, + 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb +}; + +static const unsigned int s6e63m0_22_280[] = { + 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6, + 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1, + 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 +}; + +static const unsigned int s6e63m0_22_260[] = { + 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6, + 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2, + 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 + +}; + +static const unsigned int s6e63m0_22_240[] = { + 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9, + 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3, + 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA + +}; +static const unsigned int s6e63m0_22_220[] = { + 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8, + 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4, + 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 +}; + +static const unsigned int s6e63m0_22_200[] = { + 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA, + 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6, + 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA +}; + +static const unsigned int s6e63m0_22_170[] = { + 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB, + 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8, + 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB +}; + +static const unsigned int s6e63m0_22_140[] = { + 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC, + 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9, + 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E +}; + +static const unsigned int s6e63m0_22_110[] = { + 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF, + 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC, + 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_22_90[] = { + 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0, + 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF, + 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 +}; + +static const unsigned int s6e63m0_22_30[] = { + 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8, + 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7, + 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 +}; + +/* gamma value: 1.9 */ +static const unsigned int s6e63m0_19_300[] = { + 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA, + 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5, + 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB +}; + +static const unsigned int s6e63m0_19_280[] = { + 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB, + 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7, + 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5 +}; + +static const unsigned int s6e63m0_19_260[] = { + 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA, + 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8, + 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF +}; + +static const unsigned int s6e63m0_19_240[] = { + 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB, + 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9, + 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8 +}; + +static const unsigned int s6e63m0_19_220[] = { + 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC, + 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA, + 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0 +}; + +static const unsigned int s6e63m0_19_200[] = { + 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE, + 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB, + 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8 +}; + +static const unsigned int s6e63m0_19_170[] = { + 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF, + 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD, + 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA +}; + +static const unsigned int s6e63m0_19_140[] = { + 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0, + 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0, + 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C +}; + +static const unsigned int s6e63m0_19_110[] = { + 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2, + 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1, + 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_19_90[] = { + 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3, + 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3, + 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81 +}; + +static const unsigned int s6e63m0_19_30[] = { + 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA, + 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA, + 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E +}; + +/* gamma value: 1.7 */ +static const unsigned int s6e63m0_17_300[] = { + 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF, + 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD, + 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB +}; + +static const unsigned int s6e63m0_17_280[] = { + 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF, + 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD, + 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 +}; + +static const unsigned int s6e63m0_17_260[] = { + 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0, + 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF, + 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 +}; + +static const unsigned int s6e63m0_17_240[] = { + 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2, + 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF, + 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA +}; + +static const unsigned int s6e63m0_17_220[] = { + 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2, + 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1, + 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 +}; + +static const unsigned int s6e63m0_17_200[] = { + 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2, + 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1, + 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA +}; + +static const unsigned int s6e63m0_17_170[] = { + 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3, + 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3, + 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB +}; + +static const unsigned int s6e63m0_17_140[] = { + 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3, + 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4, + 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E +}; + +static const unsigned int s6e63m0_17_110[] = { + 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6, + 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8, + 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_17_90[] = { + 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8, + 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8, + 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 +}; + +static const unsigned int s6e63m0_17_30[] = { + 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1, + 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0, + 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 +}; + +struct s6e63m0_gamma { + unsigned int *gamma_22_table[MAX_GAMMA_LEVEL]; + unsigned int *gamma_19_table[MAX_GAMMA_LEVEL]; + unsigned int *gamma_17_table[MAX_GAMMA_LEVEL]; +}; + +static struct s6e63m0_gamma gamma_table = { + .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30, + .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90, + .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110, + .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140, + .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170, + .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200, + .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220, + .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240, + .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260, + .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280, + .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300, + + .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30, + .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90, + .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110, + .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140, + .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170, + .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200, + .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220, + .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240, + .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260, + .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280, + .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300, + + .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30, + .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90, + .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110, + .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140, + .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170, + .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200, + .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220, + .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240, + .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260, + .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280, + .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300, +}; + +#endif + diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 23b2a8c0dbfc..b020ba7f1cf2 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c @@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id) static int __devinit bfin_bf54x_probe(struct platform_device *pdev) { +#ifndef NO_BL_SUPPORT struct backlight_properties props; +#endif struct bfin_bf54xfb_info *info; struct fb_info *fbinfo; int ret; @@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) printk(KERN_ERR DRIVER_NAME ": unable to register backlight.\n"); ret = -EINVAL; - goto out9; + unregister_framebuffer(fbinfo); + goto out8; } lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); @@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) return 0; -out9: - unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c index 2baac7cc1425..c8e1f04941bd 100644 --- a/drivers/video/bfin-lq035q1-fb.c +++ b/drivers/video/bfin-lq035q1-fb.c @@ -61,47 +61,13 @@ #define LCD_X_RES 320 /* Horizontal Resolution */ #define LCD_Y_RES 240 /* Vertical Resolution */ #define DMA_BUS_SIZE 16 +#define U_LINE 4 /* Blanking Lines */ -#define USE_RGB565_16_BIT_PPI - -#ifdef USE_RGB565_16_BIT_PPI -#define LCD_BPP 16 /* Bit Per Pixel */ -#define CLOCKS_PER_PIX 1 -#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */ -#endif /* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD) * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165 */ -#ifdef USE_RGB565_8_BIT_PPI -#define LCD_BPP 16 /* Bit Per Pixel */ -#define CLOCKS_PER_PIX 2 -#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */ -#endif - -#ifdef USE_RGB888_8_BIT_PPI -#define LCD_BPP 24 /* Bit Per Pixel */ -#define CLOCKS_PER_PIX 3 -#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */ -#endif - - /* - * HS and VS timing parameters (all in number of PPI clk ticks) - */ - -#define U_LINE 4 /* Blanking Lines */ - -#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */ -#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */ -#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */ -#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */ - -#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */ -#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */ -#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */ - -#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8)) #define BFIN_LCD_NBR_PALETTE_ENTRIES 256 @@ -110,12 +76,6 @@ #define PPI_PORT_CFG_01 0x10 #define PPI_POLS_1 0x8000 -#if (CLOCKS_PER_PIX > 1) -#define PPI_PMODE (DLEN_8 | PACK_EN) -#else -#define PPI_PMODE (DLEN_16) -#endif - #define LQ035_INDEX 0x74 #define LQ035_DATA 0x76 @@ -139,6 +99,15 @@ struct bfin_lq035q1fb_info { int irq; spinlock_t lock; /* lock */ u32 pseudo_pal[16]; + + u32 lcd_bpp; + u32 h_actpix; + u32 h_period; + u32 h_pulse; + u32 h_start; + u32 v_lines; + u32 v_pulse; + u32 v_period; }; static int nocursor; @@ -234,16 +203,69 @@ static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg) return 0; } +static int bfin_lq035q1_calc_timing(struct bfin_lq035q1fb_info *fbi) +{ + unsigned long clocks_per_pix, cpld_pipeline_delay_cor; + + /* + * Interface 16/18-bit TFT over an 8-bit wide PPI using a small + * Programmable Logic Device (CPLD) + * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165 + */ + + switch (fbi->disp_info->ppi_mode) { + case USE_RGB565_16_BIT_PPI: + fbi->lcd_bpp = 16; + clocks_per_pix = 1; + cpld_pipeline_delay_cor = 0; + break; + case USE_RGB565_8_BIT_PPI: + fbi->lcd_bpp = 16; + clocks_per_pix = 2; + cpld_pipeline_delay_cor = 3; + break; + case USE_RGB888_8_BIT_PPI: + fbi->lcd_bpp = 24; + clocks_per_pix = 3; + cpld_pipeline_delay_cor = 5; + break; + default: + return -EINVAL; + } + + /* + * HS and VS timing parameters (all in number of PPI clk ticks) + */ + + fbi->h_actpix = (LCD_X_RES * clocks_per_pix); /* active horizontal pixel */ + fbi->h_period = (336 * clocks_per_pix); /* HS period */ + fbi->h_pulse = (2 * clocks_per_pix); /* HS pulse width */ + fbi->h_start = (7 * clocks_per_pix + cpld_pipeline_delay_cor); /* first valid pixel */ + + fbi->v_lines = (LCD_Y_RES + U_LINE); /* total vertical lines */ + fbi->v_pulse = (2 * clocks_per_pix); /* VS pulse width (1-5 H_PERIODs) */ + fbi->v_period = (fbi->h_period * fbi->v_lines); /* VS period */ + + return 0; +} + static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi) { - bfin_write_PPI_DELAY(H_START); - bfin_write_PPI_COUNT(H_ACTPIX - 1); - bfin_write_PPI_FRAME(V_LINES); + unsigned ppi_pmode; + + if (fbi->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI) + ppi_pmode = DLEN_16; + else + ppi_pmode = (DLEN_8 | PACK_EN); + + bfin_write_PPI_DELAY(fbi->h_start); + bfin_write_PPI_COUNT(fbi->h_actpix - 1); + bfin_write_PPI_FRAME(fbi->v_lines); bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */ PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */ PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */ - PPI_PMODE | /* 8/16 bit data length / PACK_EN? */ + ppi_pmode | /* 8/16 bit data length / PACK_EN? */ PPI_POLS_1); /* faling edge syncs POLS */ } @@ -272,19 +294,19 @@ static void bfin_lq035q1_stop_timers(void) } -static void bfin_lq035q1_init_timers(void) +static void bfin_lq035q1_init_timers(struct bfin_lq035q1fb_info *fbi) { bfin_lq035q1_stop_timers(); - set_gptimer_period(TIMER_HSYNC_id, H_PERIOD); - set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE); + set_gptimer_period(TIMER_HSYNC_id, fbi->h_period); + set_gptimer_pwidth(TIMER_HSYNC_id, fbi->h_pulse); set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | TIMER_TIN_SEL | TIMER_CLK_SEL| TIMER_EMU_RUN); - set_gptimer_period(TIMER_VSYNC_id, V_PERIOD); - set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE); + set_gptimer_period(TIMER_VSYNC_id, fbi->v_period); + set_gptimer_pwidth(TIMER_VSYNC_id, fbi->v_pulse); set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | TIMER_TIN_SEL | TIMER_CLK_SEL | TIMER_EMU_RUN); @@ -294,21 +316,21 @@ static void bfin_lq035q1_init_timers(void) static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi) { + set_dma_config(CH_PPI, set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, INTR_DISABLE, DIMENSION_2D, DATA_SIZE_16, DMA_NOSYNC_KEEP_DMA_BUF)); - set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE); + set_dma_x_count(CH_PPI, (LCD_X_RES * fbi->lcd_bpp) / DMA_BUS_SIZE); set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8); - set_dma_y_count(CH_PPI, V_LINES); + set_dma_y_count(CH_PPI, fbi->v_lines); set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8); set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer); } -#if (CLOCKS_PER_PIX == 1) static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, @@ -316,22 +338,27 @@ static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, 0}; -#else -static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, + +static const u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, 0}; -#endif -static inline void bfin_lq035q1_free_ports(void) +static inline void bfin_lq035q1_free_ports(unsigned ppi16) { - peripheral_free_list(ppi0_req_16); + if (ppi16) + peripheral_free_list(ppi0_req_16); + else + peripheral_free_list(ppi0_req_8); + if (ANOMALY_05000400) gpio_free(P_IDENT(P_PPI0_FS3)); } -static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev) +static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev, + unsigned ppi16) { + int ret; /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode: * Drive PPI_FS3 Low */ @@ -342,7 +369,12 @@ static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev) gpio_direction_output(P_IDENT(P_PPI0_FS3), 0); } - if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) { + if (ppi16) + ret = peripheral_request_list(ppi0_req_16, DRIVER_NAME); + else + ret = peripheral_request_list(ppi0_req_8, DRIVER_NAME); + + if (ret) { dev_err(&pdev->dev, "requesting peripherals failed\n"); return -EFAULT; } @@ -364,7 +396,7 @@ static int bfin_lq035q1_fb_open(struct fb_info *info, int user) bfin_lq035q1_config_dma(fbi); bfin_lq035q1_config_ppi(fbi); - bfin_lq035q1_init_timers(); + bfin_lq035q1_init_timers(fbi); /* start dma */ enable_dma(CH_PPI); @@ -402,12 +434,9 @@ static int bfin_lq035q1_fb_release(struct fb_info *info, int user) static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { - switch (var->bits_per_pixel) { -#if (LCD_BPP == 24) - case 24:/* TRUECOLOUR, 16m */ -#else - case 16:/* DIRECTCOLOUR, 64k */ -#endif + struct bfin_lq035q1fb_info *fbi = info->par; + + if (var->bits_per_pixel == fbi->lcd_bpp) { var->red.offset = info->var.red.offset; var->green.offset = info->var.green.offset; var->blue.offset = info->var.blue.offset; @@ -420,8 +449,7 @@ static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var, var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; - break; - default: + } else { pr_debug("%s: depth not supported: %u BPP\n", __func__, var->bits_per_pixel); return -EINVAL; @@ -528,6 +556,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) { struct bfin_lq035q1fb_info *info; struct fb_info *fbinfo; + u32 active_video_mem_offset; int ret; ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI"); @@ -550,6 +579,12 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fbinfo); + ret = bfin_lq035q1_calc_timing(info); + if (ret < 0) { + dev_err(&pdev->dev, "Failed PPI Mode\n"); + goto out3; + } + strcpy(fbinfo->fix.id, DRIVER_NAME); fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; @@ -571,46 +606,48 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) fbinfo->var.xres_virtual = LCD_X_RES; fbinfo->var.yres = LCD_Y_RES; fbinfo->var.yres_virtual = LCD_Y_RES; - fbinfo->var.bits_per_pixel = LCD_BPP; + fbinfo->var.bits_per_pixel = info->lcd_bpp; if (info->disp_info->mode & LQ035_BGR) { -#if (LCD_BPP == 24) - fbinfo->var.red.offset = 0; - fbinfo->var.green.offset = 8; - fbinfo->var.blue.offset = 16; -#else - fbinfo->var.red.offset = 0; - fbinfo->var.green.offset = 5; - fbinfo->var.blue.offset = 11; -#endif + if (info->lcd_bpp == 24) { + fbinfo->var.red.offset = 0; + fbinfo->var.green.offset = 8; + fbinfo->var.blue.offset = 16; + } else { + fbinfo->var.red.offset = 0; + fbinfo->var.green.offset = 5; + fbinfo->var.blue.offset = 11; + } } else { -#if (LCD_BPP == 24) - fbinfo->var.red.offset = 16; - fbinfo->var.green.offset = 8; - fbinfo->var.blue.offset = 0; -#else - fbinfo->var.red.offset = 11; - fbinfo->var.green.offset = 5; - fbinfo->var.blue.offset = 0; -#endif + if (info->lcd_bpp == 24) { + fbinfo->var.red.offset = 16; + fbinfo->var.green.offset = 8; + fbinfo->var.blue.offset = 0; + } else { + fbinfo->var.red.offset = 11; + fbinfo->var.green.offset = 5; + fbinfo->var.blue.offset = 0; + } } fbinfo->var.transp.offset = 0; -#if (LCD_BPP == 24) - fbinfo->var.red.length = 8; - fbinfo->var.green.length = 8; - fbinfo->var.blue.length = 8; -#else - fbinfo->var.red.length = 5; - fbinfo->var.green.length = 6; - fbinfo->var.blue.length = 5; -#endif + if (info->lcd_bpp == 24) { + fbinfo->var.red.length = 8; + fbinfo->var.green.length = 8; + fbinfo->var.blue.length = 8; + } else { + fbinfo->var.red.length = 5; + fbinfo->var.green.length = 6; + fbinfo->var.blue.length = 5; + } fbinfo->var.transp.length = 0; - fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8 - + ACTIVE_VIDEO_MEM_OFFSET; + active_video_mem_offset = ((U_LINE / 2) * LCD_X_RES * (info->lcd_bpp / 8)); + + fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * info->lcd_bpp / 8 + + active_video_mem_offset; fbinfo->fix.line_length = fbinfo->var.xres_virtual * fbinfo->var.bits_per_pixel / 8; @@ -629,8 +666,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) goto out3; } - fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; - fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; + fbinfo->screen_base = (void *)info->fb_buffer + active_video_mem_offset; + fbinfo->fix.smem_start = (int)info->fb_buffer + active_video_mem_offset; fbinfo->fbops = &bfin_lq035q1_fb_ops; @@ -643,7 +680,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) goto out4; } - ret = bfin_lq035q1_request_ports(pdev); + ret = bfin_lq035q1_request_ports(pdev, + info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI); if (ret) { dev_err(&pdev->dev, "couldn't request gpio port\n"); goto out6; @@ -693,7 +731,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n", - LCD_X_RES, LCD_Y_RES, LCD_BPP); + LCD_X_RES, LCD_Y_RES, info->lcd_bpp); return 0; @@ -705,7 +743,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev) out8: free_irq(info->irq, info); out7: - bfin_lq035q1_free_ports(); + bfin_lq035q1_free_ports(info->disp_info->ppi_mode == + USE_RGB565_16_BIT_PPI); out6: fb_dealloc_cmap(&fbinfo->cmap); out4: @@ -742,7 +781,8 @@ static int __devexit bfin_lq035q1_remove(struct platform_device *pdev) fb_dealloc_cmap(&fbinfo->cmap); - bfin_lq035q1_free_ports(); + bfin_lq035q1_free_ports(info->disp_info->ppi_mode == + USE_RGB565_16_BIT_PPI); platform_set_drvdata(pdev, NULL); framebuffer_release(fbinfo); @@ -781,7 +821,7 @@ static int bfin_lq035q1_resume(struct device *dev) bfin_lq035q1_config_dma(info); bfin_lq035q1_config_ppi(info); - bfin_lq035q1_init_timers(); + bfin_lq035q1_init_timers(info); /* start dma */ enable_dma(CH_PPI); diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index c2ec3dcd4e91..7a50272eaab9 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c @@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id) static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) { +#ifndef NO_BL_SUPPORT struct backlight_properties props; +#endif struct bfin_t350mcqbfb_info *info; struct fb_info *fbinfo; int ret; @@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) printk(KERN_ERR DRIVER_NAME ": unable to register backlight.\n"); ret = -EINVAL; - goto out9; + unregister_framebuffer(fbinfo); + goto out8; } lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); @@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) return 0; -out9: - unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c index 43320925c4ce..2c371c07f0da 100644 --- a/drivers/video/bw2.c +++ b/drivers/video/bw2.c @@ -376,8 +376,11 @@ static const struct of_device_id bw2_match[] = { MODULE_DEVICE_TABLE(of, bw2_match); static struct of_platform_driver bw2_driver = { - .name = "bw2", - .match_table = bw2_match, + .driver = { + .name = "bw2", + .owner = THIS_MODULE, + .of_match_table = bw2_match, + }, .probe = bw2_probe, .remove = __devexit_p(bw2_remove), }; diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c index 77a040af20a7..d12e05b6e63f 100644 --- a/drivers/video/cg14.c +++ b/drivers/video/cg14.c @@ -596,8 +596,11 @@ static const struct of_device_id cg14_match[] = { MODULE_DEVICE_TABLE(of, cg14_match); static struct of_platform_driver cg14_driver = { - .name = "cg14", - .match_table = cg14_match, + .driver = { + .name = "cg14", + .owner = THIS_MODULE, + .of_match_table = cg14_match, + }, .probe = cg14_probe, .remove = __devexit_p(cg14_remove), }; diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index 30eedf79322c..b98f93f7f663 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c @@ -463,8 +463,11 @@ static const struct of_device_id cg3_match[] = { MODULE_DEVICE_TABLE(of, cg3_match); static struct of_platform_driver cg3_driver = { - .name = "cg3", - .match_table = cg3_match, + .driver = { + .name = "cg3", + .owner = THIS_MODULE, + .of_match_table = cg3_match, + }, .probe = cg3_probe, .remove = __devexit_p(cg3_remove), }; diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c index 6d0fcb43696e..480d761a27a8 100644 --- a/drivers/video/cg6.c +++ b/drivers/video/cg6.c @@ -740,7 +740,7 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info, static int __devinit cg6_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg6_par *par; int linebytes, err; @@ -856,8 +856,11 @@ static const struct of_device_id cg6_match[] = { MODULE_DEVICE_TABLE(of, cg6_match); static struct of_platform_driver cg6_driver = { - .name = "cg6", - .match_table = cg6_match, + .driver = { + .name = "cg6", + .owner = THIS_MODULE, + .of_match_table = cg6_match, + }, .probe = cg6_probe, .remove = __devexit_p(cg6_remove), }; diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 8d244ba0f601..cad7d45c8bac 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -36,7 +36,9 @@ #define DRIVER_NAME "da8xx_lcdc" /* LCD Status Register */ +#define LCD_END_OF_FRAME1 BIT(9) #define LCD_END_OF_FRAME0 BIT(8) +#define LCD_PL_LOAD_DONE BIT(6) #define LCD_FIFO_UNDERFLOW BIT(5) #define LCD_SYNC_LOST BIT(2) @@ -58,11 +60,13 @@ #define LCD_PALETTE_LOAD_MODE(x) ((x) << 20) #define PALETTE_AND_DATA 0x00 #define PALETTE_ONLY 0x01 +#define DATA_ONLY 0x02 #define LCD_MONO_8BIT_MODE BIT(9) #define LCD_RASTER_ORDER BIT(8) #define LCD_TFT_MODE BIT(7) #define LCD_UNDERFLOW_INT_ENA BIT(6) +#define LCD_PL_ENABLE BIT(4) #define LCD_MONOCHROME_MODE BIT(1) #define LCD_RASTER_ENABLE BIT(0) #define LCD_TFT_ALT_ENABLE BIT(23) @@ -87,6 +91,10 @@ #define LCD_DMA_CTRL_REG 0x40 #define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44 #define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48 +#define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C +#define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50 + +#define LCD_NUM_BUFFERS 2 #define WSI_TIMEOUT 50 #define PALETTE_SIZE 256 @@ -111,13 +119,20 @@ static inline void lcdc_write(unsigned int val, unsigned int addr) struct da8xx_fb_par { resource_size_t p_palette_base; unsigned char *v_palette_base; + dma_addr_t vram_phys; + unsigned long vram_size; + void *vram_virt; + unsigned int dma_start; + unsigned int dma_end; struct clk *lcdc_clk; int irq; unsigned short pseudo_palette[16]; - unsigned int databuf_sz; unsigned int palette_sz; unsigned int pxl_clk; int blank; + wait_queue_head_t vsync_wait; + int vsync_flag; + int vsync_timeout; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif @@ -148,9 +163,9 @@ static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = { .type = FB_TYPE_PACKED_PIXELS, .type_aux = 0, .visual = FB_VISUAL_PSEUDOCOLOR, - .xpanstep = 1, + .xpanstep = 0, .ypanstep = 1, - .ywrapstep = 1, + .ywrapstep = 0, .accel = FB_ACCEL_NONE }; @@ -221,22 +236,48 @@ static inline void lcd_disable_raster(void) static void lcd_blit(int load_mode, struct da8xx_fb_par *par) { - u32 tmp = par->p_palette_base + par->databuf_sz - 4; - u32 reg; + u32 start; + u32 end; + u32 reg_ras; + u32 reg_dma; + + /* init reg to clear PLM (loading mode) fields */ + reg_ras = lcdc_read(LCD_RASTER_CTRL_REG); + reg_ras &= ~(3 << 20); + + reg_dma = lcdc_read(LCD_DMA_CTRL_REG); + + if (load_mode == LOAD_DATA) { + start = par->dma_start; + end = par->dma_end; + + reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY); + reg_dma |= LCD_END_OF_FRAME_INT_ENA; + reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE; + + lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); + lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); + } else if (load_mode == LOAD_PALETTE) { + start = par->p_palette_base; + end = start + par->palette_sz - 1; + + reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY); + reg_ras |= LCD_PL_ENABLE; + + lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + } - /* Update the databuf in the hw. */ - lcdc_write(par->p_palette_base, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); - lcdc_write(tmp, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + lcdc_write(reg_dma, LCD_DMA_CTRL_REG); + lcdc_write(reg_ras, LCD_RASTER_CTRL_REG); - /* Start the DMA. */ - reg = lcdc_read(LCD_RASTER_CTRL_REG); - reg &= ~(3 << 20); - if (load_mode == LOAD_DATA) - reg |= LCD_PALETTE_LOAD_MODE(PALETTE_AND_DATA); - else if (load_mode == LOAD_PALETTE) - reg |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY); - - lcdc_write(reg, LCD_RASTER_CTRL_REG); + /* + * The Raster enable bit must be set after all other control fields are + * set. + */ + lcd_enable_raster(); } /* Configure the Burst Size of DMA */ @@ -368,12 +409,8 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg) static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height, u32 bpp, u32 raster_order) { - u32 bpl, reg; + u32 reg; - /* Disable Dual Frame Buffer. */ - reg = lcdc_read(LCD_DMA_CTRL_REG); - lcdc_write(reg & ~LCD_DUAL_FRAME_BUFFER_ENABLE, - LCD_DMA_CTRL_REG); /* Set the Panel Width */ /* Pixels per line = (PPL + 1)*16 */ /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/ @@ -410,9 +447,6 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height, return -EINVAL; } - bpl = width * bpp / 8; - par->databuf_sz = height * bpl + par->palette_sz; - return 0; } @@ -421,8 +455,9 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green, struct fb_info *info) { struct da8xx_fb_par *par = info->par; - unsigned short *palette = (unsigned short *)par->v_palette_base; + unsigned short *palette = (unsigned short *) par->v_palette_base; u_short pal; + int update_hw = 0; if (regno > 255) return 1; @@ -439,8 +474,10 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green, pal |= (green & 0x00f0); pal |= (blue & 0x000f); - palette[regno] = pal; - + if (palette[regno] != pal) { + update_hw = 1; + palette[regno] = pal; + } } else if ((info->var.bits_per_pixel == 16) && regno < 16) { red >>= (16 - info->var.red.length); red <<= info->var.red.offset; @@ -453,9 +490,16 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green, par->pseudo_palette[regno] = red | green | blue; - palette[0] = 0x4000; + if (palette[0] != 0x4000) { + update_hw = 1; + palette[0] = 0x4000; + } } + /* Update the palette in the h/w as needed. */ + if (update_hw) + lcd_blit(LOAD_PALETTE, par); + return 0; } @@ -541,15 +585,54 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, static irqreturn_t lcdc_irq_handler(int irq, void *arg) { + struct da8xx_fb_par *par = arg; u32 stat = lcdc_read(LCD_STAT_REG); + u32 reg_ras; if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { lcd_disable_raster(); lcdc_write(stat, LCD_STAT_REG); lcd_enable_raster(); - } else + } else if (stat & LCD_PL_LOAD_DONE) { + /* + * Must disable raster before changing state of any control bit. + * And also must be disabled before clearing the PL loading + * interrupt via the following write to the status register. If + * this is done after then one gets multiple PL done interrupts. + */ + lcd_disable_raster(); + lcdc_write(stat, LCD_STAT_REG); + /* Disable PL completion inerrupt */ + reg_ras = lcdc_read(LCD_RASTER_CTRL_REG); + reg_ras &= ~LCD_PL_ENABLE; + lcdc_write(reg_ras, LCD_RASTER_CTRL_REG); + + /* Setup and start data loading mode */ + lcd_blit(LOAD_DATA, par); + } else { + lcdc_write(stat, LCD_STAT_REG); + + if (stat & LCD_END_OF_FRAME0) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + par->vsync_flag = 1; + wake_up_interruptible(&par->vsync_wait); + } + + if (stat & LCD_END_OF_FRAME1) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); + par->vsync_flag = 1; + wake_up_interruptible(&par->vsync_wait); + } + } + return IRQ_HANDLED; } @@ -654,9 +737,10 @@ static int __devexit fb_remove(struct platform_device *dev) unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); - dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, - info->screen_base - PAGE_SIZE, - info->fix.smem_start); + dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, + par->p_palette_base); + dma_free_coherent(NULL, par->vram_size, par->vram_virt, + par->vram_phys); free_irq(par->irq, par); clk_disable(par->lcdc_clk); clk_put(par->lcdc_clk); @@ -668,6 +752,39 @@ static int __devexit fb_remove(struct platform_device *dev) return 0; } +/* + * Function to wait for vertical sync which for this LCD peripheral + * translates into waiting for the current raster frame to complete. + */ +static int fb_wait_for_vsync(struct fb_info *info) +{ + struct da8xx_fb_par *par = info->par; + int ret; + + /* + * Set flag to 0 and wait for isr to set to 1. It would seem there is a + * race condition here where the ISR could have occured just before or + * just after this set. But since we are just coarsely waiting for + * a frame to complete then that's OK. i.e. if the frame completed + * just before this code executed then we have to wait another full + * frame time but there is no way to avoid such a situation. On the + * other hand if the frame completed just after then we don't need + * to wait long at all. Either way we are guaranteed to return to the + * user immediately after a frame completion which is all that is + * required. + */ + par->vsync_flag = 0; + ret = wait_event_interruptible_timeout(par->vsync_wait, + par->vsync_flag != 0, + par->vsync_timeout); + if (ret < 0) + return ret; + if (ret == 0) + return -ETIMEDOUT; + + return 0; +} + static int fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { @@ -697,6 +814,8 @@ static int fb_ioctl(struct fb_info *info, unsigned int cmd, sync_arg.pulse_width, sync_arg.front_porch); break; + case FBIO_WAITFORVSYNC: + return fb_wait_for_vsync(info); default: return -EINVAL; } @@ -732,10 +851,47 @@ static int cfb_blank(int blank, struct fb_info *info) return ret; } +/* + * Set new x,y offsets in the virtual display for the visible area and switch + * to the new mode. + */ +static int da8xx_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fbi) +{ + int ret = 0; + struct fb_var_screeninfo new_var; + struct da8xx_fb_par *par = fbi->par; + struct fb_fix_screeninfo *fix = &fbi->fix; + unsigned int end; + unsigned int start; + + if (var->xoffset != fbi->var.xoffset || + var->yoffset != fbi->var.yoffset) { + memcpy(&new_var, &fbi->var, sizeof(new_var)); + new_var.xoffset = var->xoffset; + new_var.yoffset = var->yoffset; + if (fb_check_var(&new_var, fbi)) + ret = -EINVAL; + else { + memcpy(&fbi->var, &new_var, sizeof(new_var)); + + start = fix->smem_start + + new_var.yoffset * fix->line_length + + new_var.xoffset * var->bits_per_pixel / 8; + end = start + var->yres * fix->line_length - 1; + par->dma_start = start; + par->dma_end = end; + } + } + + return ret; +} + static struct fb_ops da8xx_fb_ops = { .owner = THIS_MODULE, .fb_check_var = fb_check_var, .fb_setcolreg = fb_setcolreg, + .fb_pan_display = da8xx_pan_display, .fb_ioctl = fb_ioctl, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, @@ -829,40 +985,53 @@ static int __init fb_probe(struct platform_device *device) } /* allocate frame buffer */ - da8xx_fb_info->screen_base = dma_alloc_coherent(NULL, - par->databuf_sz + PAGE_SIZE, - (resource_size_t *) - &da8xx_fb_info->fix.smem_start, - GFP_KERNEL | GFP_DMA); - - if (!da8xx_fb_info->screen_base) { + par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp; + par->vram_size = PAGE_ALIGN(par->vram_size/8); + par->vram_size = par->vram_size * LCD_NUM_BUFFERS; + + par->vram_virt = dma_alloc_coherent(NULL, + par->vram_size, + (resource_size_t *) &par->vram_phys, + GFP_KERNEL | GFP_DMA); + if (!par->vram_virt) { dev_err(&device->dev, "GLCD: kmalloc for frame buffer failed\n"); ret = -EINVAL; goto err_release_fb; } - /* move palette base pointer by (PAGE_SIZE - palette_sz) bytes */ - par->v_palette_base = da8xx_fb_info->screen_base + - (PAGE_SIZE - par->palette_sz); - par->p_palette_base = da8xx_fb_info->fix.smem_start + - (PAGE_SIZE - par->palette_sz); - - /* the rest of the frame buffer is pixel data */ - da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz; - da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; - da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; - da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; + da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt; + da8xx_fb_fix.smem_start = par->vram_phys; + da8xx_fb_fix.smem_len = par->vram_size; + da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; + + par->dma_start = par->vram_phys; + par->dma_end = par->dma_start + lcdc_info->height * + da8xx_fb_fix.line_length - 1; + + /* allocate palette buffer */ + par->v_palette_base = dma_alloc_coherent(NULL, + PALETTE_SIZE, + (resource_size_t *) + &par->p_palette_base, + GFP_KERNEL | GFP_DMA); + if (!par->v_palette_base) { + dev_err(&device->dev, + "GLCD: kmalloc for palette buffer failed\n"); + ret = -EINVAL; + goto err_release_fb_mem; + } + memset(par->v_palette_base, 0, PALETTE_SIZE); par->irq = platform_get_irq(device, 0); if (par->irq < 0) { ret = -ENOENT; - goto err_release_fb_mem; + goto err_release_pl_mem; } ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); if (ret) - goto err_release_fb_mem; + goto err_release_pl_mem; /* Initialize par */ da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; @@ -870,8 +1039,8 @@ static int __init fb_probe(struct platform_device *device) da8xx_fb_var.xres = lcdc_info->width; da8xx_fb_var.xres_virtual = lcdc_info->width; - da8xx_fb_var.yres = lcdc_info->height; - da8xx_fb_var.yres_virtual = lcdc_info->height; + da8xx_fb_var.yres = lcdc_info->height; + da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS; da8xx_fb_var.grayscale = lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0; @@ -892,18 +1061,18 @@ static int __init fb_probe(struct platform_device *device) ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); if (ret) goto err_free_irq; - - /* First palette_sz byte of the frame buffer is the palette */ da8xx_fb_info->cmap.len = par->palette_sz; - /* Flush the buffer to the screen. */ - lcd_blit(LOAD_DATA, par); - /* initialize var_screeninfo */ da8xx_fb_var.activate = FB_ACTIVATE_FORCE; fb_set_var(da8xx_fb_info, &da8xx_fb_var); dev_set_drvdata(&device->dev, da8xx_fb_info); + + /* initialize the vsync wait queue */ + init_waitqueue_head(&par->vsync_wait); + par->vsync_timeout = HZ / 5; + /* Register the Frame Buffer */ if (register_framebuffer(da8xx_fb_info) < 0) { dev_err(&device->dev, @@ -919,10 +1088,6 @@ static int __init fb_probe(struct platform_device *device) goto err_cpu_freq; } #endif - - /* enable raster engine */ - lcd_enable_raster(); - return 0; #ifdef CONFIG_CPU_FREQ @@ -936,10 +1101,12 @@ err_dealloc_cmap: err_free_irq: free_irq(par->irq, par); +err_release_pl_mem: + dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, + par->p_palette_base); + err_release_fb_mem: - dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, - da8xx_fb_info->screen_base - PAGE_SIZE, - da8xx_fb_info->fix.smem_start); + dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys); err_release_fb: framebuffer_release(da8xx_fb_info); diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 6113c47e095a..073c9b408cf7 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, return 0; } -int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) +int fb_deferred_io_fsync(struct file *file, int datasync) { struct fb_info *info = file->private_data; @@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); - struct list_head *node, *next; - struct page *cur; struct fb_deferred_io *fbdefio = info->fbdefio; + struct page *page, *tmp_page; + struct list_head *node, *tmp_node; + struct list_head non_dirty; + + INIT_LIST_HEAD(&non_dirty); /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); - list_for_each_entry(cur, &fbdefio->pagelist, lru) { - lock_page(cur); - page_mkclean(cur); - unlock_page(cur); + list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) { + lock_page(page); + /* + * The workqueue callback can be triggered after a + * ->page_mkwrite() call but before the PTE has been marked + * dirty. In this case page_mkclean() won't "rearm" the page. + * + * To avoid this, remove those "non-dirty" pages from the + * pagelist before calling the driver's callback, then add + * them back to get processed on the next work iteration. + * At that time, their PTEs will hopefully be dirty for real. + */ + if (!page_mkclean(page)) + list_move_tail(&page->lru, &non_dirty); + unlock_page(page); } /* driver's callback with pagelist */ fbdefio->deferred_io(info, &fbdefio->pagelist); - /* clear the list */ - list_for_each_safe(node, next, &fbdefio->pagelist) { + /* clear the list... */ + list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { list_del(node); } + /* ... and add back the "non-dirty" pages to the list */ + list_splice_tail(&non_dirty, &fbdefio->pagelist); mutex_unlock(&fbdefio->lock); } @@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open); void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; + struct list_head *node, *tmp_node; struct page *page; int i; @@ -209,6 +226,13 @@ void fb_deferred_io_cleanup(struct fb_info *info) cancel_delayed_work(&info->deferred_work); flush_scheduled_work(); + /* the list may have still some non-dirty pages at this point */ + mutex_lock(&fbdefio->lock); + list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { + list_del(node); + } + mutex_unlock(&fbdefio->lock); + /* clear out the mapping that we setup */ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { page = fb_deferred_io_page(info, i); diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c index a42fabab69df..95c0227f47fc 100644 --- a/drivers/video/ffb.c +++ b/drivers/video/ffb.c @@ -896,7 +896,7 @@ static void ffb_init_fix(struct fb_info *info) static int __devinit ffb_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct ffb_fbc __iomem *fbc; struct ffb_dac __iomem *dac; struct fb_info *info; @@ -1053,8 +1053,11 @@ static const struct of_device_id ffb_match[] = { MODULE_DEVICE_TABLE(of, ffb_match); static struct of_platform_driver ffb_driver = { - .name = "ffb", - .match_table = ffb_match, + .driver = { + .name = "ffb", + .owner = THIS_MODULE, + .of_match_table = ffb_match, + }, .probe = ffb_probe, .remove = __devexit_p(ffb_remove), }; diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 994358a4f302..27455ce298b7 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -1421,7 +1421,7 @@ static ssize_t show_monitor(struct device *device, static int __devinit fsl_diu_probe(struct of_device *ofdev, const struct of_device_id *match) { - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct mfb_info *mfbi; phys_addr_t dummy_ad_addr; int ret, i, error = 0; @@ -1647,9 +1647,11 @@ static struct of_device_id fsl_diu_match[] = { MODULE_DEVICE_TABLE(of, fsl_diu_match); static struct of_platform_driver fsl_diu_driver = { - .owner = THIS_MODULE, - .name = "fsl_diu", - .match_table = fsl_diu_match, + .driver = { + .name = "fsl_diu", + .owner = THIS_MODULE, + .of_match_table = fsl_diu_match, + }, .probe = fsl_diu_probe, .remove = fsl_diu_remove, .suspend = fsl_diu_suspend, diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c index 8bbf251f83d9..af8f0f2cc782 100644 --- a/drivers/video/hgafb.c +++ b/drivers/video/hgafb.c @@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock); /* Framebuffer driver structures */ -static struct fb_var_screeninfo __initdata hga_default_var = { +static struct fb_var_screeninfo hga_default_var __devinitdata = { .xres = 720, .yres = 348, .xres_virtual = 720, @@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = { .width = -1, }; -static struct fb_fix_screeninfo __initdata hga_fix = { +static struct fb_fix_screeninfo hga_fix __devinitdata = { .id = "HGA", .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */ .visual = FB_VISUAL_MONO10, @@ -276,7 +276,7 @@ static void hga_blank(int blank_mode) spin_unlock_irqrestore(&hga_reg_lock, flags); } -static int __init hga_card_detect(void) +static int __devinit hga_card_detect(void) { int count = 0; void __iomem *p, *q; @@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev) return 0; } -static int hgafb_remove(struct platform_device *pdev) +static int __devexit hgafb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); @@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev) static struct platform_driver hgafb_driver = { .probe = hgafb_probe, - .remove = hgafb_remove, + .remove = __devexit_p(hgafb_remove), .driver = { .name = "hgafb", }, diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index 393f3f3d3dfe..cfb8d6451014 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c @@ -30,14 +30,14 @@ #define WIDTH 640 -static struct fb_var_screeninfo hitfb_var __initdata = { +static struct fb_var_screeninfo hitfb_var __devinitdata = { .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo hitfb_fix __initdata = { +static struct fb_fix_screeninfo hitfb_fix __devinitdata = { .id = "Hitachi HD64461", .type = FB_TYPE_PACKED_PIXELS, .accel = FB_ACCEL_NONE, @@ -417,7 +417,7 @@ err_fb: return ret; } -static int __exit hitfb_remove(struct platform_device *dev) +static int __devexit hitfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -462,7 +462,7 @@ static const struct dev_pm_ops hitfb_dev_pm_ops = { static struct platform_driver hitfb_driver = { .probe = hitfb_probe, - .remove = __exit_p(hitfb_remove), + .remove = __devexit_p(hitfb_remove), .driver = { .name = "hitfb", .owner = THIS_MODULE, diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h index 40984551c927..6b51175629c7 100644 --- a/drivers/video/intelfb/intelfb.h +++ b/drivers/video/intelfb/intelfb.h @@ -371,10 +371,6 @@ struct intelfb_info { ((dinfo)->chipset == INTEL_965G) || \ ((dinfo)->chipset == INTEL_965GM)) -#ifndef FBIO_WAITFORVSYNC -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) -#endif - /*** function prototypes ***/ extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var); diff --git a/drivers/video/leo.c b/drivers/video/leo.c index 1db55f128490..3d7895316eaf 100644 --- a/drivers/video/leo.c +++ b/drivers/video/leo.c @@ -663,8 +663,11 @@ static const struct of_device_id leo_match[] = { MODULE_DEVICE_TABLE(of, leo_match); static struct of_platform_driver leo_driver = { - .name = "leo", - .match_table = leo_match, + .driver = { + .name = "leo", + .owner = THIS_MODULE, + .of_match_table = leo_match, + }, .probe = leo_probe, .remove = __devexit_p(leo_remove), }; diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c index 8280a58a0e55..0540de4f5cb4 100644 --- a/drivers/video/mb862xx/mb862xxfb.c +++ b/drivers/video/mb862xx/mb862xxfb.c @@ -718,9 +718,11 @@ static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = { }; static struct of_platform_driver of_platform_mb862xxfb_driver = { - .owner = THIS_MODULE, - .name = DRV_NAME, - .match_table = of_platform_mb862xx_tbl, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_platform_mb862xx_tbl, + }, .probe = of_platform_mb862xx_probe, .remove = __devexit_p(of_platform_mb862xx_remove), }; diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c index 6bf0d460a738..d4cde79ea15e 100644 --- a/drivers/video/nuc900fb.c +++ b/drivers/video/nuc900fb.c @@ -667,7 +667,7 @@ release_irq: release_regs: iounmap(fbi->io); release_mem_region: - release_mem_region((unsigned long)fbi->mem, size); + release_mem_region(res->start, size); free_fb: framebuffer_release(fbinfo); return ret; diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c index 81440f2b9091..c85dd408a9b8 100644 --- a/drivers/video/p9100.c +++ b/drivers/video/p9100.c @@ -353,8 +353,11 @@ static const struct of_device_id p9100_match[] = { MODULE_DEVICE_TABLE(of, p9100_match); static struct of_platform_driver p9100_driver = { - .name = "p9100", - .match_table = p9100_match, + .driver = { + .name = "p9100", + .owner = THIS_MODULE, + .of_match_table = p9100_match, + }, .probe = p9100_probe, .remove = __devexit_p(p9100_remove), }; diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c index 8a204e7a5b5b..72a1f4c04732 100644 --- a/drivers/video/platinumfb.c +++ b/drivers/video/platinumfb.c @@ -536,7 +536,7 @@ static int __init platinumfb_setup(char *options) static int __devinit platinumfb_probe(struct of_device* odev, const struct of_device_id *match) { - struct device_node *dp = odev->node; + struct device_node *dp = odev->dev.of_node; struct fb_info *info; struct fb_info_platinum *pinfo; volatile __u8 *fbuffer; @@ -679,8 +679,11 @@ static struct of_device_id platinumfb_match[] = static struct of_platform_driver platinum_driver = { - .name = "platinumfb", - .match_table = platinumfb_match, + .driver = { + .name = "platinumfb", + .owner = THIS_MODULE, + .of_match_table = platinumfb_match, + }, .probe = platinumfb_probe, .remove = platinumfb_remove, }; diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 2b094dec4a56..46b430978bcc 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -631,7 +631,7 @@ static struct fb_ops s3c2410fb_ops = { * cache. Once this area is remapped, all virtual memory * access to the video memory should occur at the new region. */ -static int __init s3c2410fb_map_video_memory(struct fb_info *info) +static int __devinit s3c2410fb_map_video_memory(struct fb_info *info) { struct s3c2410fb_info *fbi = info->par; dma_addr_t map_dma; @@ -814,7 +814,7 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info) static char driver_name[] = "s3c2410fb"; -static int __init s3c24xxfb_probe(struct platform_device *pdev, +static int __devinit s3c24xxfb_probe(struct platform_device *pdev, enum s3c_drv_type drv_type) { struct s3c2410fb_info *info; @@ -1018,7 +1018,7 @@ static int __devinit s3c2412fb_probe(struct platform_device *pdev) /* * Cleanup */ -static int s3c2410fb_remove(struct platform_device *pdev) +static int __devexit s3c2410fb_remove(struct platform_device *pdev) { struct fb_info *fbinfo = platform_get_drvdata(pdev); struct s3c2410fb_info *info = fbinfo->par; @@ -1096,7 +1096,7 @@ static int s3c2410fb_resume(struct platform_device *dev) static struct platform_driver s3c2410fb_driver = { .probe = s3c2410fb_probe, - .remove = s3c2410fb_remove, + .remove = __devexit_p(s3c2410fb_remove), .suspend = s3c2410fb_suspend, .resume = s3c2410fb_resume, .driver = { @@ -1107,7 +1107,7 @@ static struct platform_driver s3c2410fb_driver = { static struct platform_driver s3c2412fb_driver = { .probe = s3c2412fb_probe, - .remove = s3c2410fb_remove, + .remove = __devexit_p(s3c2410fb_remove), .suspend = s3c2410fb_suspend, .resume = s3c2410fb_resume, .driver = { diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index d4471b4c0374..dce8c97b4333 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", - "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; + "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P", + "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"}; #define CHIP_UNKNOWN 0x00 #define CHIP_732_TRIO32 0x01 @@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_356_VIRGE_GX2 0x0D #define CHIP_357_VIRGE_GX2P 0x0E #define CHIP_359_VIRGE_GX2P 0x0F +#define CHIP_360_TRIO3D_1X 0x10 +#define CHIP_362_TRIO3D_2X 0x11 +#define CHIP_368_TRIO3D_2X 0x12 #define CHIP_XXX_TRIO 0x80 #define CHIP_XXX_TRIO64V2_DXGX 0x81 #define CHIP_XXX_VIRGE_DXGX 0x82 +#define CHIP_36X_TRIO3D_1X_2X 0x83 #define CHIP_UNDECIDED_FLAG 0x80 #define CHIP_MASK 0xFF @@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) static void s3_set_pixclock(struct fb_info *info, u32 pixclock) { + struct s3fb_info *par = info->par; u16 m, n, r; u8 regval; int rv; @@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock) vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); /* Set S3 clock registers */ - vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ + vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */ + } else + vga_wseq(NULL, 0x12, (n - 2) | (r << 5)); vga_wseq(NULL, 0x13, m - 2); udelay(1000); @@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) static int s3fb_set_par(struct fb_info *info) { struct s3fb_info *par = info->par; - u32 value, mode, hmul, offset_value, screen_size, multiplex; + u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes; u32 bpp = info->var.bits_per_pixel; if (bpp != 0) { @@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info) svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ - svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits + svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */ /* svga_wcrt_mask(0x58, 0x03, 0x03); */ @@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info) pr_debug("fb%d: offset register : %d\n", info->node, offset_value); svga_wcrt_multi(s3_offset_regs, offset_value); - vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ - vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ - vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ - vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) { + vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ + vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ + vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ + vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ + } vga_wcrt(NULL, 0x3A, 0x35); svga_wattr(0x33, 0x00); @@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info) vga_wcrt(NULL, 0x66, 0x90); } + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + dbytes = info->var.xres * ((bpp+7)/8); + vga_wcrt(NULL, 0x91, (dbytes + 7) / 8); + vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80); + + vga_wcrt(NULL, 0x66, 0x81); + } + svga_wcrt_mask(0x31, 0x00, 0x40); multiplex = 0; hmul = 1; @@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info) break; case 3: pr_debug("fb%d: 8 bit pseudocolor\n", info->node); - if (info->var.pixclock > 20000) { - svga_wcrt_mask(0x50, 0x00, 0x30); + svga_wcrt_mask(0x50, 0x00, 0x30); + if (info->var.pixclock > 20000 || + par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) svga_wcrt_mask(0x67, 0x00, 0xF0); - } else { - svga_wcrt_mask(0x50, 0x00, 0x30); + else { svga_wcrt_mask(0x67, 0x10, 0xF0); multiplex = 1; } @@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info) } else { svga_wcrt_mask(0x50, 0x10, 0x30); svga_wcrt_mask(0x67, 0x30, 0xF0); - hmul = 2; + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) + hmul = 2; } break; case 5: @@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info) } else { svga_wcrt_mask(0x50, 0x10, 0x30); svga_wcrt_mask(0x67, 0x50, 0xF0); - hmul = 2; + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) + hmul = 2; } break; case 6: @@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip) return CHIP_385_VIRGE_GX; } + if (chip == CHIP_36X_TRIO3D_1X_2X) { + switch (vga_rcrt(NULL, 0x2f)) { + case 0x00: + return CHIP_360_TRIO3D_1X; + case 0x01: + return CHIP_362_TRIO3D_2X; + case 0x02: + return CHIP_368_TRIO3D_2X; + } + } + return CHIP_UNKNOWN; } @@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i vga_wcrt(NULL, 0x38, 0x48); vga_wcrt(NULL, 0x39, 0xA5); - /* Find how many physical memory there is on card */ - /* 0x36 register is accessible even if other registers are locked */ - regval = vga_rcrt(NULL, 0x36); - info->screen_size = s3_memsizes[regval >> 5] << 10; - info->fix.smem_len = info->screen_size; - + /* Identify chip type */ par->chip = id->driver_data & CHIP_MASK; par->rev = vga_rcrt(NULL, 0x2f); if (par->chip & CHIP_UNDECIDED_FLAG) par->chip = s3_identification(par->chip); + /* Find how many physical memory there is on card */ + /* 0x36 register is accessible even if other registers are locked */ + regval = vga_rcrt(NULL, 0x36); + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + switch ((regval & 0xE0) >> 5) { + case 0: /* 8MB -- only 4MB usable for display */ + case 1: /* 4MB with 32-bit bus */ + case 2: /* 4MB */ + info->screen_size = 4 << 20; + break; + case 6: /* 2MB */ + info->screen_size = 2 << 20; + break; + } + } else + info->screen_size = s3_memsizes[regval >> 5] << 10; + info->fix.smem_len = info->screen_size; + /* Find MCLK frequency */ regval = vga_rseq(NULL, 0x10); par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); @@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, + {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c index 7a3a5e28eca1..53455f295510 100644 --- a/drivers/video/sgivwfb.c +++ b/drivers/video/sgivwfb.c @@ -47,7 +47,7 @@ static int ywrap = 0; static int flatpanel_id = -1; -static struct fb_fix_screeninfo sgivwfb_fix __initdata = { +static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = { .id = "SGI Vis WS FB", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, @@ -57,7 +57,7 @@ static struct fb_fix_screeninfo sgivwfb_fix __initdata = { .line_length = 640, }; -static struct fb_var_screeninfo sgivwfb_var __initdata = { +static struct fb_var_screeninfo sgivwfb_var __devinitdata = { /* 640x480, 8 bpp */ .xres = 640, .yres = 480, @@ -79,7 +79,7 @@ static struct fb_var_screeninfo sgivwfb_var __initdata = { .vmode = FB_VMODE_NONINTERLACED }; -static struct fb_var_screeninfo sgivwfb_var1600sw __initdata = { +static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = { /* 1600x1024, 8 bpp */ .xres = 1600, .yres = 1024, @@ -825,7 +825,7 @@ fail_ioremap_regs: return -ENXIO; } -static int sgivwfb_remove(struct platform_device *dev) +static int __devexit sgivwfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -845,7 +845,7 @@ static int sgivwfb_remove(struct platform_device *dev) static struct platform_driver sgivwfb_driver = { .probe = sgivwfb_probe, - .remove = sgivwfb_remove, + .remove = __devexit_p(sgivwfb_remove), .driver = { .name = "sgivwfb", }, diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index a531a0f7cdf2..559bf1727a2b 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1845,7 +1845,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info) memset(fix, 0, sizeof(struct fb_fix_screeninfo)); - strcpy(fix->id, ivideo->myid); + strlcpy(fix->id, ivideo->myid, sizeof(fix->id)); mutex_lock(&info->mm_lock); fix->smem_start = ivideo->video_base + ivideo->video_offset; diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c index 23e69e834a18..489b44e8db81 100644 --- a/drivers/video/sunxvr1000.c +++ b/drivers/video/sunxvr1000.c @@ -114,7 +114,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp) static int __devinit gfb_probe(struct of_device *op, const struct of_device_id *match) { - struct device_node *dp = op->node; + struct device_node *dp = op->dev.of_node; struct fb_info *info; struct gfb_info *gp; int err; @@ -199,10 +199,13 @@ static const struct of_device_id gfb_match[] = { MODULE_DEVICE_TABLE(of, ffb_match); static struct of_platform_driver gfb_driver = { - .name = "gfb", - .match_table = gfb_match, .probe = gfb_probe, .remove = __devexit_p(gfb_remove), + .driver = { + .name = "gfb", + .owner = THIS_MODULE, + .of_match_table = gfb_match, + }, }; static int __init gfb_init(void) diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c index c0c2b18fcdcf..ef7a7bd8b503 100644 --- a/drivers/video/tcx.c +++ b/drivers/video/tcx.c @@ -512,8 +512,11 @@ static const struct of_device_id tcx_match[] = { MODULE_DEVICE_TABLE(of, tcx_match); static struct of_platform_driver tcx_driver = { - .name = "tcx", - .match_table = tcx_match, + .driver = { + .name = "tcx", + .owner = THIS_MODULE, + .of_match_table = tcx_match, + }, .probe = tcx_probe, .remove = __devexit_p(tcx_remove), }; diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c index 9b5532b4de35..bc67251f1a2f 100644 --- a/drivers/video/vfb.c +++ b/drivers/video/vfb.c @@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size) vfree(mem); } -static struct fb_var_screeninfo vfb_default __initdata = { +static struct fb_var_screeninfo vfb_default __devinitdata = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -static struct fb_fix_screeninfo vfb_fix __initdata = { +static struct fb_fix_screeninfo vfb_fix __devinitdata = { .id = "Virtual FB", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c index 149c47ac7e93..28ccab44a391 100644 --- a/drivers/video/vga16fb.c +++ b/drivers/video/vga16fb.c @@ -65,7 +65,7 @@ struct vga16fb_par { /* --------------------------------------------------------------------- */ -static struct fb_var_screeninfo vga16fb_defined __initdata = { +static struct fb_var_screeninfo vga16fb_defined __devinitdata = { .xres = 640, .yres = 480, .xres_virtual = 640, @@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = { }; /* name should not depend on EGA/VGA */ -static struct fb_fix_screeninfo vga16fb_fix __initdata = { +static struct fb_fix_screeninfo vga16fb_fix __devinitdata = { .id = "VGA16 VGA", .smem_start = VGA_FB_PHYS, .smem_len = VGA_FB_PHYS_LEN, @@ -1287,7 +1287,7 @@ static struct fb_ops vga16fb_ops = { }; #ifndef MODULE -static int vga16fb_setup(char *options) +static int __init vga16fb_setup(char *options) { char *this_opt; @@ -1393,7 +1393,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev) return ret; } -static int vga16fb_remove(struct platform_device *dev) +static int __devexit vga16fb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -1405,7 +1405,7 @@ static int vga16fb_remove(struct platform_device *dev) static struct platform_driver vga16fb_driver = { .probe = vga16fb_probe, - .remove = vga16fb_remove, + .remove = __devexit_p(vga16fb_remove), .driver = { .name = "vga16fb", }, diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index 2bc40e682f95..1082541358f0 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c @@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) break; case VIAFB_SET_GAMMA_LUT: - viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); - if (!viafb_gamma_table) - return -ENOMEM; - if (copy_from_user(viafb_gamma_table, argp, - 256 * sizeof(u32))) { - kfree(viafb_gamma_table); - return -EFAULT; - } + viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); + if (IS_ERR(viafb_gamma_table)) + return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); kfree(viafb_gamma_table); break; diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index 31b0e17ed090..e66b8b19ce5d 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -53,7 +53,7 @@ static void w100_update_enable(void); static void w100_update_disable(void); static void calc_hsync(struct w100fb_par *par); static void w100_init_graphic_engine(struct w100fb_par *par); -struct w100_pll_info *w100_get_xtal_table(unsigned int freq); +struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit; /* Pseudo palette size */ #define MAX_PALETTES 16 @@ -782,7 +782,7 @@ out: } -static int w100fb_remove(struct platform_device *pdev) +static int __devexit w100fb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct w100fb_par *par=info->par; @@ -1020,7 +1020,7 @@ static struct pll_entries { { 0 }, }; -struct w100_pll_info *w100_get_xtal_table(unsigned int freq) +struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq) { struct pll_entries *pll_entry = w100_pll_tables; @@ -1611,7 +1611,7 @@ static void w100_vsync(void) static struct platform_driver w100fb_driver = { .probe = w100fb_probe, - .remove = w100fb_remove, + .remove = __devexit_p(w100fb_remove), .suspend = w100fb_suspend, .resume = w100fb_resume, .driver = { @@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = { }, }; -int __devinit w100fb_init(void) +int __init w100fb_init(void) { return platform_driver_register(&w100fb_driver); } diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c index 3fcb83f03881..574dc54e12d4 100644 --- a/drivers/video/xilinxfb.c +++ b/drivers/video/xilinxfb.c @@ -423,7 +423,7 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match) * To check whether the core is connected directly to DCR or PLB * interface and initialize the tft_access accordingly. */ - p = (u32 *)of_get_property(op->node, "xlnx,dcr-splb-slave-if", NULL); + p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL); tft_access = p ? *p : 0; /* @@ -432,41 +432,41 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match) */ if (tft_access) { drvdata->flags |= PLB_ACCESS_FLAG; - rc = of_address_to_resource(op->node, 0, &res); + rc = of_address_to_resource(op->dev.of_node, 0, &res); if (rc) { dev_err(&op->dev, "invalid address\n"); goto err; } } else { res.start = 0; - start = dcr_resource_start(op->node, 0); - drvdata->dcr_len = dcr_resource_len(op->node, 0); - drvdata->dcr_host = dcr_map(op->node, start, drvdata->dcr_len); + start = dcr_resource_start(op->dev.of_node, 0); + drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0); + drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len); if (!DCR_MAP_OK(drvdata->dcr_host)) { dev_err(&op->dev, "invalid DCR address\n"); goto err; } } - prop = of_get_property(op->node, "phys-size", &size); + prop = of_get_property(op->dev.of_node, "phys-size", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.screen_width_mm = prop[0]; pdata.screen_height_mm = prop[1]; } - prop = of_get_property(op->node, "resolution", &size); + prop = of_get_property(op->dev.of_node, "resolution", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.xres = prop[0]; pdata.yres = prop[1]; } - prop = of_get_property(op->node, "virtual-resolution", &size); + prop = of_get_property(op->dev.of_node, "virtual-resolution", &size); if ((prop) && (size >= sizeof(u32)*2)) { pdata.xvirt = prop[0]; pdata.yvirt = prop[1]; } - if (of_find_property(op->node, "rotate-display", NULL)) + if (of_find_property(op->dev.of_node, "rotate-display", NULL)) pdata.rotate_screen = 1; dev_set_drvdata(&op->dev, drvdata); @@ -492,13 +492,12 @@ static struct of_device_id xilinxfb_of_match[] __devinitdata = { MODULE_DEVICE_TABLE(of, xilinxfb_of_match); static struct of_platform_driver xilinxfb_of_driver = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - .match_table = xilinxfb_of_match, .probe = xilinxfb_of_probe, .remove = __devexit_p(xilinxfb_of_remove), .driver = { .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = xilinxfb_of_match, }, }; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b87ba23442d2..afcfacc9bbe2 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -145,13 +145,19 @@ config KS8695_WATCHDOG Watchdog timer embedded into KS8695 processor. This will reboot your system when the timeout is reached. +config HAVE_S3C2410_WATCHDOG + bool + help + This will include watchdog timer support for Samsung SoCs. If + you want to include watchdog support for any machine, kindly + select this in the respective mach-XXXX/Kconfig file. + config S3C2410_WATCHDOG tristate "S3C2410 Watchdog" - depends on ARCH_S3C2410 + depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG help - Watchdog timer block in the Samsung S3C2410 chips. This will - reboot the system when the timer expires with the watchdog - enabled. + Watchdog timer block in the Samsung SoCs. This will reboot + the system when the timer expires with the watchdog enabled. The driver is limited by the speed of the system's PCLK signal, so with reasonably fast systems (PCLK around 50-66MHz) @@ -306,6 +312,18 @@ config MAX63XX_WATCHDOG help Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. +config IMX2_WDT + tristate "IMX2+ Watchdog" + depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5 + help + This is the driver for the hardware watchdog + on the Freescale IMX2 and later processors. + If you have one of these processors and wish to have + watchdog support enabled, say Y, otherwise say N. + + To compile this driver as a module, choose M here: the + module will be called imx2_wdt. + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 5e3cb95bb0e9..72f3e2073f8e 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o +obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 9c7ccd1e9088..9042a95fc98c 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/uaccess.h> #include <asm/blackfin.h> +#include <asm/bfin_watchdog.h> #define stamp(fmt, args...) \ pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) @@ -49,24 +50,6 @@ # define bfin_write_WDOG_STAT(x) bfin_write_WDOGA_STAT(x) #endif -/* Bit in SWRST that indicates boot caused by watchdog */ -#define SWRST_RESET_WDOG 0x4000 - -/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */ -#define WDOG_EXPIRED 0x8000 - -/* Masks for WDEV field in WDOG_CTL register */ -#define ICTL_RESET 0x0 -#define ICTL_NMI 0x2 -#define ICTL_GPI 0x4 -#define ICTL_NONE 0x6 -#define ICTL_MASK 0x6 - -/* Masks for WDEN field in WDOG_CTL register */ -#define WDEN_MASK 0x0FF0 -#define WDEN_ENABLE 0x0000 -#define WDEN_DISABLE 0x0AD0 - /* some defaults */ #define WATCHDOG_TIMEOUT 20 diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 801ead191499..3d49671cdf5a 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -137,12 +137,12 @@ static long booke_wdt_ioctl(struct file *file, if (copy_to_user((void *)arg, &ident, sizeof(ident))) return -EFAULT; case WDIOC_GETSTATUS: - return put_user(ident.options, p); + return put_user(0, p); case WDIOC_GETBOOTSTATUS: /* XXX: something is clearing TSR */ tmp = mfspr(SPRN_TSR) & TSR_WRS(3); - /* returns 1 if last reset was caused by the WDT */ - return (tmp ? 1 : 0); + /* returns CARDRESET if last reset was caused by the WDT */ + return (tmp ? WDIOF_CARDRESET : 0); case WDIOC_SETOPTIONS: if (get_user(tmp, p)) return -EINVAL; diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index ba2efce4b40e..d62b9ce8f773 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -577,7 +577,7 @@ static int __devinit cpwd_probe(struct of_device *op, * interrupt_mask register cannot be written, so no timer * interrupts can be masked within the PLD. */ - str_prop = of_get_property(op->node, "model", NULL); + str_prop = of_get_property(op->dev.of_node, "model", NULL); p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL)); if (!p->enabled) @@ -677,8 +677,11 @@ static const struct of_device_id cpwd_match[] = { MODULE_DEVICE_TABLE(of, cpwd_match); static struct of_platform_driver cpwd_driver = { - .name = DRIVER_NAME, - .match_table = cpwd_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = cpwd_match, + }, .probe = cpwd_probe, .remove = __devexit_p(cpwd_remove), }; diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index d1c4e55b1db0..3f3dc093ad68 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -68,7 +68,6 @@ static spinlock_t eurwdt_lock; /* * You must set these - there is no sane way to probe for this board. - * You can use eurwdt=x,y to set these now. */ static int io = 0x3f0; diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index abdbad034a6c..ca0f4c6cf5ab 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -303,9 +303,11 @@ static const struct of_device_id gef_wdt_ids[] = { }; static struct of_platform_driver gef_wdt_driver = { - .owner = THIS_MODULE, - .name = "gef_wdt", - .match_table = gef_wdt_ids, + .driver = { + .name = "gef_wdt", + .owner = THIS_MODULE, + .of_match_table = gef_wdt_ids, + }, .probe = gef_wdt_probe, }; diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c index 5133bca5ccbe..481d1ad43464 100644 --- a/drivers/watchdog/iTCO_vendor_support.c +++ b/drivers/watchdog/iTCO_vendor_support.c @@ -101,13 +101,6 @@ static void supermicro_old_pre_stop(unsigned long acpibase) outl(val32, SMI_EN); /* Needed to deactivate watchdog */ } -static void supermicro_old_pre_keepalive(unsigned long acpibase) -{ - /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */ - /* Clear "Expire Flag" (Bit 3 of TC01_STS register) */ - outb(0x08, TCO1_STS); -} - /* * Vendor Support: 2 * Board: Super Micro Computer Inc. P4SBx, P4DPx @@ -337,9 +330,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop); void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat) { - if (vendorsupport == SUPERMICRO_OLD_BOARD) - supermicro_old_pre_keepalive(acpibase); - else if (vendorsupport == SUPERMICRO_NEW_BOARD) + if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 8da886035374..69de8713b8e4 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -40,7 +40,7 @@ /* Module and version information */ #define DRV_NAME "iTCO_wdt" -#define DRV_VERSION "1.05" +#define DRV_VERSION "1.06" #define PFX DRV_NAME ": " /* Includes */ @@ -391,8 +391,8 @@ static struct platform_device *iTCO_wdt_platform_device; #define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ module_param(heartbeat, int, 0); -MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " - "(2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=" +MODULE_PARM_DESC(heartbeat, "Watchdog timeout in seconds. " + "5..76 (TCO v1) or 3..614 (TCO v2), default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; @@ -523,8 +523,13 @@ static int iTCO_wdt_keepalive(void) /* Reload the timer by writing to the TCO Timer Counter register */ if (iTCO_wdt_private.iTCO_version == 2) outw(0x01, TCO_RLD); - else if (iTCO_wdt_private.iTCO_version == 1) + else if (iTCO_wdt_private.iTCO_version == 1) { + /* Reset the timeout status bit so that the timer + * needs to count down twice again before rebooting */ + outw(0x0008, TCO1_STS); /* write 1 to clear bit */ + outb(0x01, TCO_RLD); + } spin_unlock(&iTCO_wdt_private.io_lock); return 0; @@ -537,6 +542,11 @@ static int iTCO_wdt_set_heartbeat(int t) unsigned int tmrval; tmrval = seconds_to_ticks(t); + + /* For TCO v1 the timer counts down twice before rebooting */ + if (iTCO_wdt_private.iTCO_version == 1) + tmrval /= 2; + /* from the specs: */ /* "Values of 0h-3h are ignored and should not be attempted" */ if (tmrval < 0x04) @@ -593,6 +603,8 @@ static int iTCO_wdt_get_timeleft(int *time_left) spin_lock(&iTCO_wdt_private.io_lock); val8 = inb(TCO_RLD); val8 &= 0x3f; + if (!(inw(TCO1_STS) & 0x0008)) + val8 += (inb(TCOv1_TMR) & 0x3f); spin_unlock(&iTCO_wdt_private.io_lock); *time_left = (val8 * 6) / 10; @@ -832,9 +844,9 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, TCOBASE); /* Clear out the (probably old) status */ - outb(8, TCO1_STS); /* Clear the Time Out Status bit */ - outb(2, TCO2_STS); /* Clear SECOND_TO_STS bit */ - outb(4, TCO2_STS); /* Clear BOOT_STS bit */ + outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */ + outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */ + outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */ /* Make sure the watchdog is not running */ iTCO_wdt_stop(); @@ -844,8 +856,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, if (iTCO_wdt_set_heartbeat(heartbeat)) { iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT); printk(KERN_INFO PFX - "heartbeat value must be 2 < heartbeat < 39 (TCO v1) " - "or 613 (TCO v2), using %d\n", heartbeat); + "timeout value out of range, using %d\n", heartbeat); } ret = misc_register(&iTCO_wdt_miscdev); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c new file mode 100644 index 000000000000..ea25885781bb --- /dev/null +++ b/drivers/watchdog/imx2_wdt.c @@ -0,0 +1,358 @@ +/* + * Watchdog driver for IMX2 and later processors + * + * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de> + * + * some parts adapted by similar drivers from Darius Augulis and Vladimir + * Zapolskiy, additional improvements by Wim Van Sebroeck. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * NOTE: MX1 has a slightly different Watchdog than MX2 and later: + * + * MX1: MX2+: + * ---- ----- + * Registers: 32-bit 16-bit + * Stopable timer: Yes No + * Need to enable clk: No Yes + * Halt on suspend: Manual Can be automatic + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> +#include <linux/clk.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <linux/timer.h> +#include <linux/jiffies.h> +#include <mach/hardware.h> + +#define DRIVER_NAME "imx2-wdt" + +#define IMX2_WDT_WCR 0x00 /* Control Register */ +#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */ +#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */ +#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */ + +#define IMX2_WDT_WSR 0x02 /* Service Register */ +#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */ +#define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */ + +#define IMX2_WDT_MAX_TIME 128 +#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ + +#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8) + +#define IMX2_WDT_STATUS_OPEN 0 +#define IMX2_WDT_STATUS_STARTED 1 +#define IMX2_WDT_EXPECT_CLOSE 2 + +static struct { + struct clk *clk; + void __iomem *base; + unsigned timeout; + unsigned long status; + struct timer_list timer; /* Pings the watchdog when closed */ +} imx2_wdt; + +static struct miscdevice imx2_wdt_miscdev; + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + + +static unsigned timeout = IMX2_WDT_DEFAULT_TIME; +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" + __MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")"); + +static const struct watchdog_info imx2_wdt_info = { + .identity = "imx2+ watchdog", + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, +}; + +static inline void imx2_wdt_setup(void) +{ + u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); + + /* Strip the old watchdog Time-Out value */ + val &= ~IMX2_WDT_WCR_WT; + /* Generate reset if WDOG times out */ + val &= ~IMX2_WDT_WCR_WRE; + /* Keep Watchdog Disabled */ + val &= ~IMX2_WDT_WCR_WDE; + /* Set the watchdog's Time-Out value */ + val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout); + + __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); + + /* enable the watchdog */ + val |= IMX2_WDT_WCR_WDE; + __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); +} + +static inline void imx2_wdt_ping(void) +{ + __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR); + __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR); +} + +static void imx2_wdt_timer_ping(unsigned long arg) +{ + /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */ + imx2_wdt_ping(); + mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2); +} + +static void imx2_wdt_start(void) +{ + if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { + /* at our first start we enable clock and do initialisations */ + clk_enable(imx2_wdt.clk); + + imx2_wdt_setup(); + } else /* delete the timer that pings the watchdog after close */ + del_timer_sync(&imx2_wdt.timer); + + /* Watchdog is enabled - time to reload the timeout value */ + imx2_wdt_ping(); +} + +static void imx2_wdt_stop(void) +{ + /* we don't need a clk_disable, it cannot be disabled once started. + * We use a timer to ping the watchdog while /dev/watchdog is closed */ + imx2_wdt_timer_ping(0); +} + +static void imx2_wdt_set_timeout(int new_timeout) +{ + u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); + + /* set the new timeout value in the WSR */ + val &= ~IMX2_WDT_WCR_WT; + val |= WDOG_SEC_TO_COUNT(new_timeout); + __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); +} + +static int imx2_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status)) + return -EBUSY; + + imx2_wdt_start(); + return nonseekable_open(inode, file); +} + +static int imx2_wdt_close(struct inode *inode, struct file *file) +{ + if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout) + imx2_wdt_stop(); + else { + dev_crit(imx2_wdt_miscdev.parent, + "Unexpected close: Expect reboot!\n"); + imx2_wdt_ping(); + } + + clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); + clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status); + return 0; +} + +static long imx2_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int new_value; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &imx2_wdt_info, + sizeof(struct watchdog_info)) ? -EFAULT : 0; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + + case WDIOC_KEEPALIVE: + imx2_wdt_ping(); + return 0; + + case WDIOC_SETTIMEOUT: + if (get_user(new_value, p)) + return -EFAULT; + if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME)) + return -EINVAL; + imx2_wdt_set_timeout(new_value); + imx2_wdt.timeout = new_value; + imx2_wdt_ping(); + + /* Fallthrough to return current value */ + case WDIOC_GETTIMEOUT: + return put_user(imx2_wdt.timeout, p); + + default: + return -ENOTTY; + } +} + +static ssize_t imx2_wdt_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + size_t i; + char c; + + if (len == 0) /* Can we see this even ? */ + return 0; + + clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); + /* scan to see whether or not we got the magic character */ + for (i = 0; i != len; i++) { + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); + } + + imx2_wdt_ping(); + return len; +} + +static const struct file_operations imx2_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = imx2_wdt_ioctl, + .open = imx2_wdt_open, + .release = imx2_wdt_close, + .write = imx2_wdt_write, +}; + +static struct miscdevice imx2_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &imx2_wdt_fops, +}; + +static int __init imx2_wdt_probe(struct platform_device *pdev) +{ + int ret; + int res_size; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "can't get device resources\n"); + return -ENODEV; + } + + res_size = resource_size(res); + if (!devm_request_mem_region(&pdev->dev, res->start, res_size, + res->name)) { + dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", + res_size, res->start); + return -ENOMEM; + } + + imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size); + if (!imx2_wdt.base) { + dev_err(&pdev->dev, "ioremap failed\n"); + return -ENOMEM; + } + + imx2_wdt.clk = clk_get_sys("imx-wdt.0", NULL); + if (IS_ERR(imx2_wdt.clk)) { + dev_err(&pdev->dev, "can't get Watchdog clock\n"); + return PTR_ERR(imx2_wdt.clk); + } + + imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME); + if (imx2_wdt.timeout != timeout) + dev_warn(&pdev->dev, "Initial timeout out of range! " + "Clamped from %u to %u\n", timeout, imx2_wdt.timeout); + + setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0); + + imx2_wdt_miscdev.parent = &pdev->dev; + ret = misc_register(&imx2_wdt_miscdev); + if (ret) + goto fail; + + dev_info(&pdev->dev, + "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n", + imx2_wdt.timeout, nowayout); + return 0; + +fail: + imx2_wdt_miscdev.parent = NULL; + clk_put(imx2_wdt.clk); + return ret; +} + +static int __exit imx2_wdt_remove(struct platform_device *pdev) +{ + misc_deregister(&imx2_wdt_miscdev); + + if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { + del_timer_sync(&imx2_wdt.timer); + + dev_crit(imx2_wdt_miscdev.parent, + "Device removed: Expect reboot!\n"); + } else + clk_put(imx2_wdt.clk); + + imx2_wdt_miscdev.parent = NULL; + return 0; +} + +static void imx2_wdt_shutdown(struct platform_device *pdev) +{ + if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { + /* we are running, we need to delete the timer but will give + * max timeout before reboot will take place */ + del_timer_sync(&imx2_wdt.timer); + imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME); + imx2_wdt_ping(); + + dev_crit(imx2_wdt_miscdev.parent, + "Device shutdown: Expect reboot!\n"); + } +} + +static struct platform_driver imx2_wdt_driver = { + .probe = imx2_wdt_probe, + .remove = __exit_p(imx2_wdt_remove), + .shutdown = imx2_wdt_shutdown, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init imx2_wdt_init(void) +{ + return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe); +} +module_init(imx2_wdt_init); + +static void __exit imx2_wdt_exit(void) +{ + platform_driver_unregister(&imx2_wdt_driver); +} +module_exit(imx2_wdt_exit); + +MODULE_AUTHOR("Wolfram Sang"); +MODULE_DESCRIPTION("Watchdog driver for IMX2 and later"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 4e3941c5e293..6622335773bb 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -53,7 +53,7 @@ static int mpc8xxx_wdt_init_late(void); static u16 timeout = 0xffff; module_param(timeout, ushort, 0); MODULE_PARM_DESC(timeout, - "Watchdog timeout in ticks. (0<timeout<65536, default=65535"); + "Watchdog timeout in ticks. (0<timeout<65536, default=65535)"); static int reset = 1; module_param(reset, bool, 0); @@ -273,12 +273,12 @@ static const struct of_device_id mpc8xxx_wdt_match[] = { MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match); static struct of_platform_driver mpc8xxx_wdt_driver = { - .match_table = mpc8xxx_wdt_match, .probe = mpc8xxx_wdt_probe, .remove = __devexit_p(mpc8xxx_wdt_remove), - .driver = { - .name = "mpc8xxx_wdt", - .owner = THIS_MODULE, + .driver = { + .name = "mpc8xxx_wdt", + .owner = THIS_MODULE, + .of_match_table = mpc8xxx_wdt_match, }, }; diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index d3aa2f1fe61d..3a56bc360924 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -53,7 +53,9 @@ #define WDTO 0x11 /* Watchdog timeout register */ #define WDCFG 0x12 /* Watchdog config register */ -static int io = 0x2E; /* Address used on Portwell Boards */ +#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ + +static int io = IO_DEFAULT; static int timeout = DEFAULT_TIMEOUT; /* timeout value */ static unsigned long timer_enabled; /* is the timer enabled? */ @@ -583,12 +585,13 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(io, int, 0); -MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ")."); +MODULE_PARM_DESC(io, MODNAME " I/O port (default: " + __MODULE_STRING(IO_DEFAULT) ")."); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" - __MODULE_STRING(timeout) ")."); + __MODULE_STRING(DEFAULT_TIMEOUT) ")."); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c index 09102f09e681..a7b5ad2a98bd 100644 --- a/drivers/watchdog/pnx833x_wdt.c +++ b/drivers/watchdog/pnx833x_wdt.c @@ -33,6 +33,8 @@ #define PFX "pnx833x: " #define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */ #define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */ +#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY) +#define PNX_TIMEOUT_VALUE 2040000000U /** CONFIG block */ #define PNX833X_CONFIG (0x07000U) @@ -47,20 +49,21 @@ static int pnx833x_wdt_alive; /* Set default timeout in MHZ.*/ -static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY); +static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT; module_param(pnx833x_wdt_timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default=" - __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds)."); + __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds)."); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -static int start_enabled = 1; +#define START_DEFAULT 1 +static int start_enabled = START_DEFAULT; module_param(start_enabled, int, 0); MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion " - "(default=" __MODULE_STRING(start_enabled) ")"); + "(default=" __MODULE_STRING(START_DEFAULT) ")"); static void pnx833x_wdt_start(void) { diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 69c6adbd8205..428f8a1583e8 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -1,7 +1,7 @@ /* * RDC321x watchdog driver * - * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org> + * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org> * * This driver is highly inspired from the cpu5_wdt driver * @@ -36,8 +36,7 @@ #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> - -#include <asm/rdc321x_defs.h> +#include <linux/mfd/rdc321x.h> #define RDC_WDT_MASK 0x80000000 /* Mask */ #define RDC_WDT_EN 0x00800000 /* Enable bit */ @@ -63,6 +62,8 @@ static struct { int default_ticks; unsigned long inuse; spinlock_t lock; + struct pci_dev *sb_pdev; + int base_reg; } rdc321x_wdt_device; /* generic helper functions */ @@ -70,14 +71,18 @@ static struct { static void rdc321x_wdt_trigger(unsigned long unused) { unsigned long flags; + u32 val; if (rdc321x_wdt_device.running) ticks--; /* keep watchdog alive */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA), - RDC3210_CFGREG_DATA); + pci_read_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, &val); + val |= RDC_WDT_EN; + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, val); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); /* requeue?? */ @@ -105,10 +110,13 @@ static void rdc321x_wdt_start(void) /* Clear the timer */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, RDC_CLS_TMR); /* Enable watchdog and set the timeout to 81.92 us */ - outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, + RDC_WDT_EN | RDC_WDT_CNT); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); mod_timer(&rdc321x_wdt_device.timer, @@ -148,7 +156,7 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; - unsigned int value; + u32 value; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET, .identity = "RDC321x WDT", @@ -162,9 +170,10 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_GETSTATUS: /* Read the value from the DATA register */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - value = inl(RDC3210_CFGREG_DATA); + pci_read_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, &value); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); - if (copy_to_user(argp, &value, sizeof(int))) + if (copy_to_user(argp, &value, sizeof(u32))) return -EFAULT; break; case WDIOC_GETSUPPORT: @@ -219,17 +228,35 @@ static struct miscdevice rdc321x_wdt_misc = { static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) { int err; + struct resource *r; + struct rdc321x_wdt_pdata *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data supplied\n"); + return -ENODEV; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "wdt-reg"); + if (!r) { + dev_err(&pdev->dev, "failed to get wdt-reg resource\n"); + return -ENODEV; + } + + rdc321x_wdt_device.sb_pdev = pdata->sb_pdev; + rdc321x_wdt_device.base_reg = r->start; err = misc_register(&rdc321x_wdt_misc); if (err < 0) { - printk(KERN_ERR PFX "watchdog misc_register failed\n"); + dev_err(&pdev->dev, "misc_register failed\n"); return err; } spin_lock_init(&rdc321x_wdt_device.lock); /* Reset the watchdog */ - outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, RDC_WDT_RST); init_completion(&rdc321x_wdt_device.stop); rdc321x_wdt_device.queue = 0; @@ -240,7 +267,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) rdc321x_wdt_device.default_ticks = ticks; - printk(KERN_INFO PFX "watchdog init success\n"); + dev_info(&pdev->dev, "watchdog init success\n"); return 0; } diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index ea7f803f6248..5dceeddc8859 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -239,8 +239,11 @@ static const struct of_device_id riowd_match[] = { MODULE_DEVICE_TABLE(of, riowd_match); static struct of_platform_driver riowd_driver = { - .name = DRIVER_NAME, - .match_table = riowd_match, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = riowd_match, + }, .probe = riowd_probe, .remove = __devexit_p(riowd_remove), }; diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index e4cebef55177..300932580ded 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -63,7 +63,7 @@ module_param(nowayout, int, 0); module_param(soft_noboot, int, 0); module_param(debug, int, 0); -MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default=" +MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default=" __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")"); MODULE_PARM_DESC(tmr_atboot, "Watchdog is started at boot time if set to 1, default=" @@ -71,8 +71,8 @@ MODULE_PARM_DESC(tmr_atboot, MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " - "0 to reboot (default depends on ONLY_TESTING)"); -MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)"); + "0 to reboot (default 0)"); +MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)"); static unsigned long open_lock; static struct device *wdt_dev; /* platform device attached to */ @@ -426,8 +426,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) wdt_mem = request_mem_region(res->start, size, pdev->name); if (wdt_mem == NULL) { dev_err(dev, "failed to get memory region\n"); - ret = -ENOENT; - goto err_req; + return -EBUSY; } wdt_base = ioremap(res->start, size); diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index a03f84e5ee1f..6fc74065abee 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -496,7 +496,7 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(clock_division_ratio, int, 0); MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) " - "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); + "to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c index dcabe77ad141..b5045ca7e61c 100644 --- a/drivers/watchdog/twl4030_wdt.c +++ b/drivers/watchdog/twl4030_wdt.c @@ -190,6 +190,8 @@ static int __devinit twl4030_wdt_probe(struct platform_device *pdev) twl4030_wdt_dev = pdev; + twl4030_wdt_disable(wdt); + ret = misc_register(&wdt->miscdev); if (ret) { dev_err(wdt->miscdev.parent, diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index bfda2e99dd89..552a4381e78f 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC(tachometer, static int type = 500; module_param(type, int, 0); MODULE_PARM_DESC(type, - "WDT501-P Card type (500 or 501 , default=500)"); + "WDT501-P Card type (500 or 501, default=500)"); /* * Programming support diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 90ef70eb47d7..5c2521fc836c 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -63,7 +63,7 @@ static char expect_close; static DEFINE_SPINLOCK(spinlock); module_param(timeout, int, 0); -MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300), default=" +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")"); module_param(testmode, int, 0); MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 8943b8ccee1a..07e857b0de13 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -185,6 +185,7 @@ static void shutdown_handler(struct xenbus_watch *watch, kfree(str); } +#ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { @@ -214,15 +215,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, handle_sysrq(sysrq_key, NULL); } -static struct xenbus_watch shutdown_watch = { - .node = "control/shutdown", - .callback = shutdown_handler -}; - static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; +#endif + +static struct xenbus_watch shutdown_watch = { + .node = "control/shutdown", + .callback = shutdown_handler +}; static int setup_shutdown_watcher(void) { @@ -234,11 +236,13 @@ static int setup_shutdown_watcher(void) return err; } +#ifdef CONFIG_MAGIC_SYSRQ err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } +#endif return 0; } |