From 4af57b787b4be09419a2bb48aa705fa87ef41cca Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:34 +0100 Subject: Rename .data.cacheline_aligned to .data..cacheline_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/cache.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0e..78450aaab9ef 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -189,7 +189,7 @@ #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c6..4c570653ab84 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@ #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) + __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp -- cgit v1.2.3 From 2af7687f1ad2c4571b9835f9bb2e3db9a738d258 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:35 +0100 Subject: Rename .data.init_task to .data..init_task. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/kernel/init_task.c | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 4 +--- include/asm-generic/vmlinux.lds.h | 4 ++-- include/linux/init_task.h | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8fcbc8..f9efe9739d3f 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 3229c0622161..136dcf3ce7bd 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -223,9 +223,7 @@ SECTIONS #endif /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { - INIT_TASK_DATA(THREAD_SIZE) - } + INIT_TASK_DATA_SECTION(THREAD_SIZE) .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 78450aaab9ef..9cb9a9021e6e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -193,7 +193,7 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : { \ INIT_TASK_DATA(align) \ } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index abec69b63d7e..f00253b3fc47 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -191,7 +191,7 @@ extern struct cred init_cred; } /* Attach to the init_task data structure for proper alignment */ -#define __init_task_data __attribute__((__section__(".data.init_task"))) +#define __init_task_data __attribute__((__section__(".data..init_task"))) #endif -- cgit v1.2.3 From 75b134837263eb919d91678f7fcf3d54cd088c8d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:37 +0100 Subject: Rename .data.page_aligned to .data..page_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/kernel/vmlinux.lds.S | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/linkage.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba327f6f..7fb1198611fe 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -175,7 +175,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 136dcf3ce7bd..951e6c5b2c8e 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -225,7 +225,7 @@ SECTIONS /* The initial task and kernel stack */ INIT_TASK_DATA_SECTION(THREAD_SIZE) - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9cb9a9021e6e..569c25a85558 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -181,7 +181,7 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5126cceb6ae9..05f4406d5995 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -18,7 +18,7 @@ # define asmregparm #endif -#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) #define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) /* @@ -27,7 +27,7 @@ * Note when using these that you must specify the appropriate * alignment directives yourself */ -#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" #define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" /* -- cgit v1.2.3 From 7c74df07f90cabe61d700727bca04682b4e477f3 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:38 +0100 Subject: Rename .bss.page_aligned to .bss..page_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/x86/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/linkage.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index f92a0da608cb..8b6bb4e7f5c5 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -305,7 +305,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; - *(.bss.page_aligned) + *(.bss..page_aligned) *(.bss) . = ALIGN(4); __bss_stop = .; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 569c25a85558..32cddc155940 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -499,7 +499,7 @@ #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ - *(.bss.page_aligned) \ + *(.bss..page_aligned) \ *(.dynbss) \ *(.bss) \ *(COMMON) \ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 05f4406d5995..7135ebc8428c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -19,7 +19,7 @@ #endif #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) -#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) /* * For assembly routines. @@ -28,7 +28,7 @@ * alignment directives yourself */ #define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" -#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" /* * This is used by architectures to keep arguments on the stack -- cgit v1.2.3 From 3d9a854c2dac3e888393b23ba7adafcce4d6d4b9 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Sat, 20 Feb 2010 01:03:43 +0100 Subject: Rename .data[.percpu][.XXX] to .data[..percpu][..XXX]. Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/include/asm/percpu.h | 2 +- include/asm-generic/percpu.h | 10 +++++----- include/asm-generic/vmlinux.lds.h | 24 ++++++++++++------------ include/linux/percpu-defs.h | 4 ++-- kernel/module.c | 2 +- 5 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 30cf46534dd2..35d9aeb6d85f 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -31,7 +31,7 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" /* * Be extremely careful when taking the address of this variable! Due to virtual diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 8087b90d4673..1202a1550e91 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -76,7 +76,7 @@ extern void setup_per_cpu_areas(void); #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif @@ -88,15 +88,15 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "" #else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION ".first" +#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_FIRST_SECTION "" #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 32cddc155940..e304fcd10bc7 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -666,16 +666,16 @@ */ #define PERCPU_VADDR(vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } phdr \ - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU - define output section for percpu area, simple version @@ -687,18 +687,18 @@ * * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except * that __per_cpu_load is defined as a relative symbol against - * .data.percpu which is required for relocatable x86_32 + * .data..percpu which is required for relocatable x86_32 * configuration. */ #define PERCPU(align) \ . = ALIGN(align); \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 5a5d6ce4bd55..2351191f8c82 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -127,11 +127,11 @@ * Declaration/definition used for per-CPU variables that must be page aligned. */ #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ - DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ - DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) /* diff --git a/kernel/module.c b/kernel/module.c index f82386bd9ee9..5daf0abd63c1 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -397,7 +397,7 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings) { - return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); + return find_sec(hdr, sechdrs, secstrings, ".data..percpu"); } static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) -- cgit v1.2.3 From 54cb27a71f51d304342c79e62fd7667f2171062b Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Sat, 20 Feb 2010 01:03:44 +0100 Subject: Rename .data.read_mostly to .data..read_mostly. Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/ia64/include/asm/cache.h | 2 +- arch/ia64/kernel/paravirtentry.S | 2 +- arch/ia64/xen/xensetup.S | 2 +- arch/parisc/include/asm/cache.h | 2 +- arch/parisc/kernel/head.S | 2 +- arch/powerpc/include/asm/cache.h | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/s390/include/asm/cache.h | 2 +- arch/sh/include/asm/cache.h | 2 +- arch/sparc/include/asm/cache.h | 2 +- arch/x86/include/asm/cache.h | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd628ff..988254a7d349 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560d7f17..92d880c4d3d1 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346ea193..b820ed02ab9f 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include #include - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 32c2cca74345..45effe6978fa 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -28,7 +28,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9b9e33..4dbdf0ed6fa0 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 81de6eb3455d..3f41ab9c1e4e 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -38,7 +38,7 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 951e6c5b2c8e..8a0deefac08d 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -233,7 +233,7 @@ SECTIONS CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { READ_MOSTLY_DATA(L1_CACHE_BYTES) } diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 9b866816863c..24aafa68b643 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -14,6 +14,6 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index 02df18ea9608..455a9a9bd7dd 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -14,7 +14,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifndef __ASSEMBLY__ struct cache_info { diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index 41f85ae4bd4a..2909f0ab6f9e 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -19,7 +19,7 @@ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_SPARC32 #include diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047cfaaca..48f99f15452e 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -7,7 +7,7 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e304fcd10bc7..6f6da4f080f2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -185,7 +185,7 @@ #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ -- cgit v1.2.3 From 75ddb0e87d0d31ad44d574e7fe2e962e4efecadb Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Sat, 20 Feb 2010 01:03:48 +0100 Subject: Rename .text.lock to .text..lock. Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- Documentation/mutex-design.txt | 4 ++-- arch/ia64/kernel/vmlinux.lds.S | 4 ++-- arch/m68knommu/kernel/vmlinux.lds.S | 2 +- include/linux/spinlock.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index aa60d1f627e5..c91ccc0720fa 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt @@ -66,14 +66,14 @@ of advantages of mutexes: c0377ccb : c0377ccb: f0 ff 08 lock decl (%eax) - c0377cce: 78 0e js c0377cde <.text.lock.mutex> + c0377cce: 78 0e js c0377cde <.text..lock.mutex> c0377cd0: c3 ret the unlocking fastpath is equally tight: c0377cd1 : c0377cd1: f0 ff 00 lock incl (%eax) - c0377cd4: 7e 0f jle c0377ce5 <.text.lock.mutex+0x7> + c0377cd4: 7e 0f jle c0377ce5 <.text..lock.mutex+0x7> c0377cd6: c3 ret - 'struct mutex' semantics are well-defined and are enforced if diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 5c01d3c7020d..e07218a2577f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -54,8 +54,8 @@ SECTIONS .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) - { *(.text.lock) } + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) + { *(.text..lock) } #endif _etext = .; diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 9f1784f586b9..fd3df56fc458 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -68,7 +68,7 @@ SECTIONS { TEXT_TEXT SCHED_TEXT LOCK_TEXT - *(.text.lock) + *(.text..lock) . = ALIGN(16); /* Exception table */ __start___ex_table = .; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 86088213334a..dd57af413266 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -60,7 +60,7 @@ /* * Must define these before including other files, inline functions need them */ -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME +#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ -- cgit v1.2.3 From 07b3bb1ef211fdf20eddcae902d1098788ea2f6e Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Sat, 20 Feb 2010 01:03:52 +0100 Subject: Rename .data.nosave to .data..nosave. Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/s390/kernel/swsusp_asm64.S | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/init.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 0c26cc1898ec..e5cd623cb025 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -261,7 +261,7 @@ restore_registers: lghi %r2,0 br %r14 - .section .data.nosave,"aw",@progbits + .section .data..nosave,"aw",@progbits .align 8 .Ldisabled_wait_31: .long 0x000a0000,0x00000000 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 6f6da4f080f2..ea3660526e91 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -175,7 +175,7 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; diff --git a/include/linux/init.h b/include/linux/init.h index ab1d31f9352b..de994304e0bb 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline); #endif /* Data marked not to be saved by software suspend */ -#define __nosavedata __section(.data.nosave) +#define __nosavedata __section(.data..nosave) /* This means "can be init if no module support, otherwise module load may call it." */ -- cgit v1.2.3 From a0c36a1f0fbab42590dab3c13c10fa7d20e6c2cd Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 22 Jun 2009 22:41:15 -0300 Subject: i7core_edac: Add an EDAC memory controller driver for Nehalem chipsets This driver is meant to support i7 core/i7core extreme desktop processors and Xeon 35xx/55xx series with integrated memory controller. It is likely that it can be expanded in the future to work with other processor series based at the same Memory Controller design. For now, it has just a few MCH status reads. Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/Kconfig | 7 + drivers/edac/Makefile | 1 + drivers/edac/i7core_edac.c | 462 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/pci_ids.h | 16 ++ 4 files changed, 486 insertions(+) create mode 100644 drivers/edac/i7core_edac.c (limited to 'include') diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 55c9c59b3f71..391ddbfb2a34 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -166,6 +166,13 @@ config EDAC_I5400 Support for error detection and correction the Intel i5400 MCH chipset (Seaburg). +config EDAC_I7CORE + tristate "Intel i7 Core (Nehalem) processors" + depends on EDAC_MM_EDAC && PCI && X86 + help + Support for error detection and correction the Intel + i7 Core (Nehalem) Integrated Memory Controller + config EDAC_I82860 tristate "Intel 82860" depends on EDAC_MM_EDAC && PCI && X86_32 diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index bc5dc232a0fb..b9996195b233 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o obj-$(CONFIG_EDAC_I5000) += i5000_edac.o obj-$(CONFIG_EDAC_I5100) += i5100_edac.o obj-$(CONFIG_EDAC_I5400) += i5400_edac.o +obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c new file mode 100644 index 000000000000..7ecf15e66a3f --- /dev/null +++ b/drivers/edac/i7core_edac.c @@ -0,0 +1,462 @@ +/* Intel 7 core Memory Controller kernel module (Nehalem) + * + * This file may be distributed under the terms of the + * GNU General Public License version 2 only. + * + * Copyright (c) 2009 by: + * Mauro Carvalho Chehab + * + * Red Hat Inc. http://www.redhat.com + * + * Forked and adapted from the i5400_edac driver + * + * Based on the following public Intel datasheets: + * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor + * Datasheet, Volume 2: + * http://download.intel.com/design/processor/datashts/320835.pdf + * Intel Xeon Processor 5500 Series Datasheet Volume 2 + * http://www.intel.com/Assets/PDF/datasheet/321322.pdf + * also available at: + * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf + */ + + +#include +#include +#include +#include +#include +#include +#include + +#include "edac_core.h" + + +/* + * Alter this version for the module when modifications are made + */ +#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ +#define EDAC_MOD_STR "i7core_edac" + +/* HACK: temporary, just to enable all logs, for now */ +#undef debugf0 +#define debugf0(fmt, arg...) edac_printk(KERN_INFO, "i7core", fmt, ##arg) + +/* + * Debug macros + */ +#define i7core_printk(level, fmt, arg...) \ + edac_printk(level, "i7core", fmt, ##arg) + +#define i7core_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg) + +/* + * i7core Memory Controller Registers + */ + + /* OFFSETS for Device 3 Function 0 */ + +#define MC_CONTROL 0x48 +#define MC_STATUS 0x4c +#define MC_MAX_DOD 0x64 + + /* OFFSETS for Devices 4,5 and 6 Function 0 */ + +#define MC_CHANNEL_ADDR_MATCH 0xf0 + +#define MC_MASK_DIMM (1 << 41) +#define MC_MASK_RANK (1 << 40) +#define MC_MASK_BANK (1 << 39) +#define MC_MASK_PAGE (1 << 38) +#define MC_MASK_COL (1 << 37) + +/* + * i7core structs + */ + +#define NUM_CHANS 3 +#define NUM_FUNCS 1 + +struct i7core_info { + u32 mc_control; + u32 mc_status; + u32 max_dod; +}; + +struct i7core_pvt { + struct pci_dev *pci_mcr; /* Dev 3:0 */ + struct pci_dev *pci_ch[NUM_CHANS][NUM_FUNCS]; + struct i7core_info info; +}; + +/* Device name and register DID (Device ID) */ +struct i7core_dev_info { + const char *ctl_name; /* name for this device */ + u16 fsb_mapping_errors; /* DID for the branchmap,control */ +}; + +static int chan_pci_ids[NUM_CHANS] = { + PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL, /* Dev 4 */ + PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL, /* Dev 5 */ + PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL, /* Dev 6 */ +}; + +/* Table of devices attributes supported by this driver */ +static const struct i7core_dev_info i7core_devs[] = { + { + .ctl_name = "i7 Core", + .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7_MCR, + }, +}; + +static struct edac_pci_ctl_info *i7core_pci; + +/**************************************************************************** + Anciliary status routines + ****************************************************************************/ + + /* MC_CONTROL bits */ +#define CH2_ACTIVE(pvt) ((pvt)->info.mc_control & 1 << 10) +#define CH1_ACTIVE(pvt) ((pvt)->info.mc_control & 1 << 9) +#define CH0_ACTIVE(pvt) ((pvt)->info.mc_control & 1 << 8) +#define ECCx8(pvt) ((pvt)->info.mc_control & 1 << 1) + + /* MC_STATUS bits */ +#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & 1 << 3) +#define CH2_DISABLED(pvt) ((pvt)->info.mc_status & 1 << 2) +#define CH1_DISABLED(pvt) ((pvt)->info.mc_status & 1 << 1) +#define CH0_DISABLED(pvt) ((pvt)->info.mc_status & 1 << 0) + + /* MC_MAX_DOD read functions */ +static inline int maxnumdimms(struct i7core_pvt *pvt) +{ + return (pvt->info.max_dod & 0x3) + 1; +} + +static inline int maxnumrank(struct i7core_pvt *pvt) +{ + static int ranks[4] = { 1, 2, 4, -EINVAL }; + + return ranks[(pvt->info.max_dod >> 2) & 0x3]; +} + +static inline int maxnumbank(struct i7core_pvt *pvt) +{ + static int banks[4] = { 4, 8, 16, -EINVAL }; + + return banks[(pvt->info.max_dod >> 4) & 0x3]; +} + +static inline int maxnumrow(struct i7core_pvt *pvt) +{ + static int rows[8] = { + 1 << 12, 1 << 13, 1 << 14, 1 << 15, + 1 << 16, -EINVAL, -EINVAL, -EINVAL, + }; + + return rows[((pvt->info.max_dod >> 6) & 0x7)]; +} + +static inline int maxnumcol(struct i7core_pvt *pvt) +{ + static int cols[8] = { + 1 << 10, 1 << 11, 1 << 12, -EINVAL, + }; + return cols[((pvt->info.max_dod >> 9) & 0x3) << 12]; +} + +/**************************************************************************** + Memory check routines + ****************************************************************************/ +static int get_dimm_config(struct mem_ctl_info *mci) +{ + struct i7core_pvt *pvt = mci->pvt_info; + + pci_read_config_dword(pvt->pci_mcr, MC_CONTROL, &pvt->info.mc_control); + pci_read_config_dword(pvt->pci_mcr, MC_STATUS, &pvt->info.mc_status); + pci_read_config_dword(pvt->pci_mcr, MC_MAX_DOD, &pvt->info.max_dod); + + debugf0("Channels active [%c][%c][%c] - enabled [%c][%c][%c]\n", + CH0_ACTIVE(pvt)?'0':'-', + CH1_ACTIVE(pvt)?'1':'-', + CH2_ACTIVE(pvt)?'2':'-', + CH0_DISABLED(pvt)?'-':'0', + CH1_DISABLED(pvt)?'-':'1', + CH2_DISABLED(pvt)?'-':'2'); + + if (ECC_ENABLED(pvt)) + debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt)?8:4); + else + debugf0("ECC disabled\n"); + + /* FIXME: need to handle the error codes */ + debugf0("DOD Maximum limits: DIMMS: %d, %d-ranked, %d-banked\n", + maxnumdimms(pvt), maxnumrank(pvt), maxnumbank(pvt)); + debugf0("DOD Maximum rows x colums = 0x%x x 0x%x\n", + maxnumrow(pvt), maxnumcol(pvt)); + + return 0; +} + +/**************************************************************************** + Device initialization routines: put/get, init/exit + ****************************************************************************/ + +/* + * i7core_put_devices 'put' all the devices that we have + * reserved via 'get' + */ +static void i7core_put_devices(struct mem_ctl_info *mci) +{ + struct i7core_pvt *pvt = mci->pvt_info; + int i, n; + + pci_dev_put(pvt->pci_mcr); + + /* Release all PCI device functions at MTR channel controllers */ + for (i = 0; i < NUM_CHANS; i++) + for (n = 0; n < NUM_FUNCS; n++) + pci_dev_put(pvt->pci_ch[i][n]); +} + +/* + * i7core_get_devices Find and perform 'get' operation on the MCH's + * device/functions we want to reference for this driver + * + * Need to 'get' device 16 func 1 and func 2 + */ +static int i7core_get_devices(struct mem_ctl_info *mci, int dev_idx) +{ + struct i7core_pvt *pvt; + struct pci_dev *pdev; + int i, n, func; + + pvt = mci->pvt_info; + memset(pvt, 0, sizeof(*pvt)); + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_MCR, + NULL); + if (!pdev) { + i7core_printk(KERN_ERR, + "Couldn't get PCI ID %04x:%04x function 0\n", + PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_MCR); + return -ENODEV; + } + pvt->pci_mcr=pdev; + + /* Get dimm basic config */ + get_dimm_config(mci); + + /* Retrieve all needed functions at MTR channel controllers */ + for (i = 0; i < NUM_CHANS; i++) { + pdev = NULL; + for (n = 0; n < NUM_FUNCS; n++) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + chan_pci_ids[i], pdev); + if (!pdev) { + /* End of list, leave */ + i7core_printk(KERN_ERR, + "Device not found: PCI ID %04x:%04x " + "found only %d functions " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + chan_pci_ids[i], n); + i7core_put_devices(mci); + return -ENODEV; + } + func = PCI_FUNC(pdev->devfn); + pvt->pci_ch[i][func] = pdev; + } + } + i7core_printk(KERN_INFO, "Driver loaded.\n"); + + return 0; +} + +/* + * i7core_probe Probe for ONE instance of device to see if it is + * present. + * return: + * 0 for FOUND a device + * < 0 for error code + */ +static int __devinit i7core_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct mem_ctl_info *mci; + struct i7core_pvt *pvt; + int rc; + int num_channels; + int num_csrows; + int num_dimms_per_channel; + int dev_idx = id->driver_data; + + if (dev_idx >= ARRAY_SIZE(i7core_devs)) + return -EINVAL; + + /* wake up device */ + rc = pci_enable_device(pdev); + if (rc == -EIO) + return rc; + + debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __func__, + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + /* We only are looking for func 0 of the set */ + if (PCI_FUNC(pdev->devfn) != 0) + return -ENODEV; + + num_channels = NUM_CHANS; + + /* FIXME: FAKE data, since we currently don't now how to get this */ + num_dimms_per_channel = 4; + num_csrows = num_dimms_per_channel; + + /* allocate a new MC control structure */ + mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); + if (mci == NULL) + return -ENOMEM; + + debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + + mci->dev = &pdev->dev; /* record ptr to the generic device */ + dev_set_drvdata(mci->dev, mci); + + pvt = mci->pvt_info; +// pvt->system_address = pdev; /* Record this device in our private */ +// pvt->maxch = num_channels; +// pvt->maxdimmperch = num_dimms_per_channel; + + /* 'get' the pci devices we want to reserve for our use */ + if (i7core_get_devices(mci, dev_idx)) + goto fail0; + + mci->mc_idx = 0; + mci->mtype_cap = MEM_FLAG_FB_DDR2; /* FIXME: it uses DDR3 */ + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = "i7core_edac.c"; + mci->mod_ver = I7CORE_REVISION; + mci->ctl_name = i7core_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); + mci->ctl_page_to_phys = NULL; + + /* add this new MC control structure to EDAC's list of MCs */ + if (edac_mc_add_mc(mci)) { + debugf0("MC: " __FILE__ + ": %s(): failed edac_mc_add_mc()\n", __func__); + /* FIXME: perhaps some code should go here that disables error + * reporting if we just enabled it + */ + goto fail1; + } + + /* allocating generic PCI control info */ + i7core_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i7core_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + + return 0; + +fail1: + i7core_put_devices(mci); + +fail0: + edac_mc_free(mci); + return -ENODEV; +} + +/* + * i7core_remove destructor for one instance of device + * + */ +static void __devexit i7core_remove(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + + debugf0(__FILE__ ": %s()\n", __func__); + + if (i7core_pci) + edac_pci_release_generic_ctl(i7core_pci); + + mci = edac_mc_del_mc(&pdev->dev); + if (!mci) + return; + + /* retrieve references to resources, and free those resources */ + i7core_put_devices(mci); + + edac_mc_free(mci); +} + +/* + * pci_device_id table for which devices we are looking for + * + * The "E500P" device is the first device supported. + */ +static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_MCR)}, + {0,} /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i7core_pci_tbl); + +/* + * i7core_driver pci_driver structure for this module + * + */ +static struct pci_driver i7core_driver = { + .name = "i7core_edac", + .probe = i7core_probe, + .remove = __devexit_p(i7core_remove), + .id_table = i7core_pci_tbl, +}; + +/* + * i7core_init Module entry function + * Try to initialize this module for its devices + */ +static int __init i7core_init(void) +{ + int pci_rc; + + debugf2("MC: " __FILE__ ": %s()\n", __func__); + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + pci_rc = pci_register_driver(&i7core_driver); + + return (pci_rc < 0) ? pci_rc : 0; +} + +/* + * i7core_exit() Module exit function + * Unregister the driver + */ +static void __exit i7core_exit(void) +{ + debugf2("MC: " __FILE__ ": %s()\n", __func__); + pci_unregister_driver(&i7core_driver); +} + +module_init(i7core_init); +module_exit(i7core_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); +MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); +MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - " + I7CORE_REVISION); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9f688d243b86..c5dd0994bd7c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2532,6 +2532,22 @@ #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 +#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 +#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 +#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a +#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From 696e409dbd1ce325129c5030267365619364dfa0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Thu, 23 Jul 2009 06:57:45 -0300 Subject: edac_mce: Add an interface driver to report mce errors via edac edac_mce module is an interface module that gets mcelog data and forwards to any registered edac module that expects to receive data via mce. Signed-off-by: Mauro Carvalho Chehab --- arch/x86/kernel/cpu/mcheck/mce.c | 10 +++++++ drivers/edac/Kconfig | 8 +++++- drivers/edac/Makefile | 1 + drivers/edac/edac_mce.c | 58 ++++++++++++++++++++++++++++++++++++++++ include/linux/edac_mce.h | 31 +++++++++++++++++++++ 5 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 drivers/edac/edac_mce.c create mode 100644 include/linux/edac_mce.h (limited to 'include') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8a6f0afa767e..6585ff07ddf5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -168,6 +169,15 @@ void mce_log(struct mce *mce) for (;;) { entry = rcu_dereference_check_mce(mcelog.next); for (;;) { + /* + * If edac_mce is enabled, it will check the error type + * and will process it, if it is a known error. + * Otherwise, the error will be sent through mcelog + * interface + */ + if (edac_mce_parse(mce)) + return; + /* * When the buffer fills up discard new entries. * Assume that the earlier errors are the more diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 391ddbfb2a34..5b7fbc5aec87 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -69,6 +69,9 @@ config EDAC_MM_EDAC occurred so that a particular failing memory module can be replaced. If unsure, select 'Y'. +config EDAC_MCE + tristate + config EDAC_AMD64 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE @@ -169,9 +172,12 @@ config EDAC_I5400 config EDAC_I7CORE tristate "Intel i7 Core (Nehalem) processors" depends on EDAC_MM_EDAC && PCI && X86 + select EDAC_MCE help Support for error detection and correction the Intel - i7 Core (Nehalem) Integrated Memory Controller + i7 Core (Nehalem) Integrated Memory Controller that exists on + newer processors like i7 Core, i7 Core Extreme, Xeon 35xx + and Xeon 55xx processors. config EDAC_I82860 tristate "Intel 82860" diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index b9996195b233..ca6b1bb24ccc 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_EDAC) := edac_stub.o obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o +obj-$(CONFIG_EDAC_MCE) += edac_mce.o edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o edac_core-objs += edac_module.o edac_device_sysfs.o diff --git a/drivers/edac/edac_mce.c b/drivers/edac/edac_mce.c new file mode 100644 index 000000000000..b1efa8e51921 --- /dev/null +++ b/drivers/edac/edac_mce.c @@ -0,0 +1,58 @@ +/* Provides edac interface to mcelog events + * + * This file may be distributed under the terms of the + * GNU General Public License version 2. + * + * Copyright (c) 2009 by: + * Mauro Carvalho Chehab + * + * Red Hat Inc. http://www.redhat.com + */ + +#include +#include +#include + +int edac_mce_enabled; +EXPORT_SYMBOL_GPL(edac_mce_enabled); + + +/* + * Extension interface + */ + +static LIST_HEAD(edac_mce_list); +static DEFINE_MUTEX(edac_mce_lock); + +int edac_mce_register(struct edac_mce *edac_mce) +{ + mutex_lock(&edac_mce_lock); + list_add_tail(&edac_mce->list, &edac_mce_list); + mutex_unlock(&edac_mce_lock); + return 0; +} +EXPORT_SYMBOL(edac_mce_register); + +void edac_mce_unregister(struct edac_mce *edac_mce) +{ + mutex_lock(&edac_mce_lock); + list_del(&edac_mce->list); + mutex_unlock(&edac_mce_lock); +} +EXPORT_SYMBOL(edac_mce_unregister); + + + +int edac_mce_queue(struct mce *mce) +{ + struct edac_mce *edac_mce; + + list_for_each_entry(edac_mce, &edac_mce_list, list) { + if (edac_mce->check_error(edac_mce->priv, mce)) + return 1; + } + + /* Nobody queued the error */ + return 0; +} +EXPORT_SYMBOL_GPL(edac_mce_queue); diff --git a/include/linux/edac_mce.h b/include/linux/edac_mce.h new file mode 100644 index 000000000000..f974fc035363 --- /dev/null +++ b/include/linux/edac_mce.h @@ -0,0 +1,31 @@ +/* Provides edac interface to mcelog events + * + * This file may be distributed under the terms of the + * GNU General Public License version 2. + * + * Copyright (c) 2009 by: + * Mauro Carvalho Chehab + * + * Red Hat Inc. http://www.redhat.com + */ + +#if defined(CONFIG_EDAC_MCE) || \ + (defined(CONFIG_EDAC_MCE_MODULE) && defined(MODULE)) + +#include +#include + +struct edac_mce { + struct list_head list; + + void *priv; + int (*check_error)(void *priv, struct mce *mce); +}; + +int edac_mce_register(struct edac_mce *edac_mce); +void edac_mce_unregister(struct edac_mce *edac_mce); +int edac_mce_parse(struct mce *mce); + +#else +#define edac_mce_parse(mce) (0) +#endif -- cgit v1.2.3 From e9bd2e73793bf0f7fcd8f94b532bb8f5c5b44171 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Thu, 9 Jul 2009 22:14:35 -0300 Subject: i7core_edac: Adds write unlock to MC registers The public Intel Xeon 5500 volume 2 datasheet describes, on page 53, session 2.6.7 a register that can lock/unlock Memory Controller the configuration register, called MC_CFG_CONTROL. Adds support for it in the hope that software error injection would work. With my tests with Xeon 35xx, there's still something missing. With a program that does sequencial bit writes at dev 0.0, sometimes, it produces error injection, after unblocking the MC_CFG_CONTROL (and, sometimes, it just locks my testing machine). I'll try later to discover by trial and error what's the register that solves this issue on Xeon 35xx. Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 30 +++++++++++++++++++++++++++--- include/linux/pci_ids.h | 1 + 2 files changed, 28 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 3c7bb5f405f6..26cd5c924d56 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -33,7 +33,7 @@ #include "edac_core.h" /* To use the new pci_[read/write]_config_qword instead of two dword */ -#define USE_QWORD 1 +#define USE_QWORD 0 /* * Alter this version for the module when modifications are made @@ -58,6 +58,10 @@ * i7core Memory Controller Registers */ + /* OFFSETS for Device 0 Function 0 */ + +#define MC_CFG_CONTROL 0x90 + /* OFFSETS for Device 3 Function 0 */ #define MC_CONTROL 0x48 @@ -186,6 +190,7 @@ struct pci_id_descr { }; struct i7core_pvt { + struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; struct i7core_info info; @@ -222,6 +227,9 @@ struct pci_id_descr pci_devs[] = { { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS) }, /* if RDIMM is supported */ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, + /* Generic Non-core registers */ + { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, + /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, @@ -882,6 +890,16 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, else mask |= (pvt->inject.col & 0x3fffL); + /* Unlock writes to registers */ + pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0x2); + msleep(100); + + /* Zeroes error count registers */ + pci_write_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, 0); + pci_write_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, 0); + pvt->ce_count_available = 0; + + #if USE_QWORD pci_write_config_qword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ADDR_MATCH, mask); @@ -929,12 +947,15 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_MASK, injectmask); +#if 0 + /* lock writes to registers */ + pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0); +#endif debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," " inject 0x%08x\n", mask, pvt->inject.eccmask, injectmask); - return count; } @@ -1124,12 +1145,15 @@ static int mci_bind_devs(struct mem_ctl_info *mci) if (unlikely(func > MAX_CHAN_FUNC)) goto error; pvt->pci_ch[slot - 4][func] = pdev; - } else + } else if (!slot && !func) + pvt->pci_noncore = pdev; + else goto error; debugf0("Associated fn %d.%d, dev = %p\n", PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev); } + return 0; error: diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c5dd0994bd7c..9d5bfe86ba73 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2548,6 +2548,7 @@ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 +#define PCI_DEVICE_ID_INTEL_I7_NOCORE 0x2c41 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From d1fd4fb69eeeb7db0693df58b9116db498d5bfe1 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 10 Jul 2009 18:39:53 -0300 Subject: i7core_edac: Add a code to probe Xeon 55xx bus This code changes the detection procedure of i7core_edac. Instead of directly probing for MC registers, it probes for another register found on Nehalem. If found, it tries to pick the first MC PCI BUS. This should work fine with Xeon 35xx, but, on Xeon 55xx, this is at bus 254 and 255 that are not properly detected by the non-legacy PCI methods. The new detection code scans specifically at buses 254 and 255 for the Xeon 55xx devices. This code has not tested yet. After working, a change at the code will be needed, since the i7core is not yet ready for working with 2 sets of MC. Signed-off-by: Mauro Carvalho Chehab --- arch/x86/pci/legacy.c | 1 + drivers/edac/i7core_edac.c | 17 +++++++++++++---- include/linux/pci.h | 1 + include/linux/pci_ids.h | 1 + 4 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index c734c277b116..d6cc2eddf339 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -57,6 +57,7 @@ void pcibios_scan_specific_bus(int busn) } } } +EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus); int __init pci_subsys_init(void) { diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 26cd5c924d56..eec0c13c0205 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -221,15 +221,15 @@ struct i7core_dev_info { .dev_id = (device_id) struct pci_id_descr pci_devs[] = { + /* Generic Non-core registers */ + { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, + /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS) }, /* if RDIMM is supported */ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) }, - /* Generic Non-core registers */ - { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, - /* Channel 0 */ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) }, { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) }, @@ -255,7 +255,7 @@ struct pci_id_descr pci_devs[] = { * This should match the first device at pci_devs table */ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_MCR)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, {0,} /* 0 terminated list. */ }; @@ -1069,6 +1069,15 @@ static int i7core_get_devices(void) for (i = 0; i < N_DEVS; i++) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id, NULL); + + if (!pdev && !i) { + pcibios_scan_specific_bus(254); + pcibios_scan_specific_bus(255); + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + pci_devs[i].dev_id, NULL); + } + if (likely(pdev)) pci_devs[i].pdev = pdev; else { diff --git a/include/linux/pci.h b/include/linux/pci.h index a788fa12ff31..5e2c7e15187d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -621,6 +621,7 @@ void pci_fixup_cardbus(struct pci_bus *); /* Generic PCI functions used internally */ +void pcibios_scan_specific_bus(int busn); extern struct pci_bus *pci_find_bus(int domain, int busnr); void pci_bus_add_devices(const struct pci_bus *bus); struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9d5bfe86ba73..12c3da6ef14d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2554,6 +2554,7 @@ #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a #define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b #define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e #define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 #define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 #define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 -- cgit v1.2.3 From 310cbb7284fab9fc9cbb6bb893e51c414e15bba3 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 17 Jul 2009 00:09:10 -0300 Subject: i7core: fix probing on Xeon55xx Xeon55xx fails to probe with this error message: EDAC DEBUG: in drivers/edac/i7core_edac.c, line at 1660: MC: drivers/edac/i7core_edac.c: i7core_init() EDAC i7core: Device not found: dev 00:00.0 PCI ID 8086:2c41 i7core_edac: probe of 0000:00:14.0 failed with error -22 This is due to the fact that, on Xeon35xx (and i7core), device 00.0 has PCI ID 8086:2c40. Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 23 ++++++++++++++++++++--- include/linux/pci_ids.h | 1 + 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 67822976992e..e2f6dfdca841 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -227,9 +227,6 @@ struct i7core_dev_info { .dev_id = (device_id) struct pci_id_descr pci_devs[] = { - /* Generic Non-core registers */ - { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, - /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, @@ -253,6 +250,16 @@ struct pci_id_descr pci_devs[] = { { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) }, { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) }, { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) }, + + /* Generic Non-core registers */ + /* + * This is the PCI device on i7core and on Xeon 35xx (8086:2c41) + * On Xeon 55xx, however, it has a different id (8086:2c40). So, + * the probing code needs to test for the other address in case of + * failure of this one + */ + { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, + }; #define N_DEVS ARRAY_SIZE(pci_devs) @@ -1138,6 +1145,16 @@ static int i7core_get_devices(void) pci_devs[i].dev_id, NULL); } + /* + * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs + * is at addr 8086:2c40, instead of 8086:2c41. So, we need + * to probe for the alternate address in case of failure + */ + if (pci_devs[i].dev_id == PCI_DEVICE_ID_INTEL_I7_NOCORE + && !pdev) + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I7_NOCORE_ALT, NULL); + if (likely(pdev)) { bus = pdev->bus->number; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 12c3da6ef14d..bf6db4814c27 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2549,6 +2549,7 @@ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 #define PCI_DEVICE_ID_INTEL_I7_NOCORE 0x2c41 +#define PCI_DEVICE_ID_INTEL_I7_NOCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From fd3826549db7f73d22b9c9abb80e01effb95c2ba Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 14 Oct 2009 06:07:07 -0300 Subject: i7core_edac: PCI device is called NONCORE, instead of NOCORE Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 6 +++--- include/linux/pci_ids.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index c2857f60ae6a..bb538dfbdc6c 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -291,7 +291,7 @@ struct pci_id_descr pci_dev_descr[] = { * the probing code needs to test for the other address in case of * failure of this one */ - { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) }, + { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) }, }; #define N_DEVS ARRAY_SIZE(pci_dev_descr) @@ -1177,9 +1177,9 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno) * is at addr 8086:2c40, instead of 8086:2c41. So, we need * to probe for the alternate address in case of failure */ - if (pci_dev_descr[devno].dev_id == PCI_DEVICE_ID_INTEL_I7_NOCORE && !pdev) + if (pci_dev_descr[devno].dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_I7_NOCORE_ALT, *prev); + PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); if (!pdev) { if (*prev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index bf6db4814c27..382476a8a339 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2548,8 +2548,8 @@ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 -#define PCI_DEVICE_ID_INTEL_I7_NOCORE 0x2c41 -#define PCI_DEVICE_ID_INTEL_I7_NOCORE_ALT 0x2c40 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From 52a2e4fc3712d12888decd386d78ad526078a1fa Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 14 Oct 2009 11:21:58 -0300 Subject: i7core_edac: Add initial support for Lynnfield Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 39 +++++++++++++++++++++++++++++++++++++-- include/linux/pci_ids.h | 15 +++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e944b63d9f06..e525d571cb25 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -299,11 +299,30 @@ struct pci_id_descr pci_dev_descr_i7core[] = { }; +struct pci_id_descr pci_dev_descr_lynnfield[] = { + { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) }, + { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) }, + { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) }, + + { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) }, + { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) }, + { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) }, + { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) }, + + { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) }, + { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) }, + { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, + { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, + + { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, +}; + /* * pci_device_id table for which devices we are looking for */ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)}, {0,} /* 0 terminated list. */ }; @@ -522,6 +541,9 @@ static int get_dimm_config(struct mem_ctl_info *mci, int *csrow) for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; + if (!pvt->pci_ch[i][0]) + continue; + if (!CH_ACTIVE(pvt, i)) { debugf0("Channel %i is not active\n", i); continue; @@ -1001,6 +1023,9 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; + if (!pvt->pci_ch[pvt->inject.channel][0]) + return 0; + pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); @@ -1841,8 +1866,18 @@ static int __devinit i7core_probe(struct pci_dev *pdev, /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); - rc = i7core_get_devices(pci_dev_descr_i7core, - ARRAY_SIZE(pci_dev_descr_i7core)); + if (pdev->device == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) { + printk(KERN_INFO "i7core_edac: detected a " + "Lynnfield processor\n"); + rc = i7core_get_devices(pci_dev_descr_lynnfield, + ARRAY_SIZE(pci_dev_descr_lynnfield)); + } else { + printk(KERN_INFO "i7core_edac: detected a " + "Nehalem/Nehalem-EP processor\n"); + rc = i7core_get_devices(pci_dev_descr_i7core, + ARRAY_SIZE(pci_dev_descr_i7core)); + } + if (unlikely(rc < 0)) goto fail0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 382476a8a339..ebc0fa4c7a66 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2550,6 +2550,21 @@ #define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 #define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From f05da2f7855b3b88a831ca79e037245872549ec0 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 14 Oct 2009 13:31:06 -0300 Subject: i7core: add support for Lynnfield alternate address Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 13 +++++++++++-- include/linux/pci_ids.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e525d571cb25..d3f5c016c5eb 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -314,6 +314,10 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) }, { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) }, + /* + * This is the PCI device has an alternate address on some + * processors like Core i7 860 + */ { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, }; @@ -322,7 +326,7 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = { */ static const struct pci_device_id i7core_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, {0,} /* 0 terminated list. */ }; @@ -1209,6 +1213,11 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, + *prev); + if (!pdev) { if (*prev) { *prev = pdev; @@ -1866,7 +1875,7 @@ static int __devinit i7core_probe(struct pci_dev *pdev, /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); - if (pdev->device == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) { + if (pdev->device == PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0) { printk(KERN_INFO "i7core_edac: detected a " "Lynnfield processor\n"); rc = i7core_get_devices(pci_dev_descr_lynnfield, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ebc0fa4c7a66..e67cb20b8401 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2551,6 +2551,7 @@ #define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 -- cgit v1.2.3 From 4e639fdf0d0d745648aa62228ab8a0d9c03a563f Mon Sep 17 00:00:00 2001 From: Peter Jones Date: Thu, 25 Feb 2010 15:37:17 -0500 Subject: ibft: Update iBFT handling for v1.03 of the spec. - Use struct acpi_table_ibft instead of struct ibft_table_header - Don't do reserve_ibft_region() on UEFI machines (section 1.4.3.1) - If ibft_addr isn't initialized when ibft_init() is called, check for ACPI-based tables. - Fix compiler error when CONFIG_ACPI is not defined. Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Peter Jones Signed-off-by: Mike Christie --- drivers/firmware/iscsi_ibft.c | 30 ++++++++++++++++++------------ drivers/firmware/iscsi_ibft_find.c | 35 ++++++++++++++++++++++++++++++----- include/linux/iscsi_ibft.h | 12 ++---------- 3 files changed, 50 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index ed2801c378de..b3ab24f9d78f 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -1,5 +1,5 @@ /* - * Copyright 2007 Red Hat, Inc. + * Copyright 2007-2010 Red Hat, Inc. * by Peter Jones * Copyright 2008 IBM, Inc. * by Konrad Rzeszutek @@ -19,6 +19,9 @@ * * Changelog: * + * 06 Jan 2010 - Peter Jones + * New changelog entries are in the git log from now on. Not here. + * * 14 Mar 2008 - Konrad Rzeszutek * Updated comments and copyrights. (v0.4.9) * @@ -78,9 +81,10 @@ #include #include #include +#include -#define IBFT_ISCSI_VERSION "0.4.9" -#define IBFT_ISCSI_DATE "2008-Mar-14" +#define IBFT_ISCSI_VERSION "0.5.0" +#define IBFT_ISCSI_DATE "2010-Feb-25" MODULE_AUTHOR("Peter Jones and \ Konrad Rzeszutek "); @@ -238,7 +242,7 @@ static const char *ibft_initiator_properties[] = */ struct ibft_kobject { - struct ibft_table_header *header; + struct acpi_table_ibft *header; union { struct ibft_initiator *initiator; struct ibft_nic *nic; @@ -536,12 +540,13 @@ static int __init ibft_check_device(void) u8 *pos; u8 csum = 0; - len = ibft_addr->length; + len = ibft_addr->header.length; /* Sanity checking of iBFT. */ - if (ibft_addr->revision != 1) { + if (ibft_addr->header.revision != 1) { printk(KERN_ERR "iBFT module supports only revision 1, " \ - "while this is %d.\n", ibft_addr->revision); + "while this is %d.\n", + ibft_addr->header.revision); return -ENOENT; } for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++) @@ -558,7 +563,7 @@ static int __init ibft_check_device(void) /* * Helper function for ibft_register_kobjects. */ -static int __init ibft_create_kobject(struct ibft_table_header *header, +static int __init ibft_create_kobject(struct acpi_table_ibft *header, struct ibft_hdr *hdr, struct list_head *list) { @@ -596,7 +601,7 @@ static int __init ibft_create_kobject(struct ibft_table_header *header, default: printk(KERN_ERR "iBFT has unknown structure type (%d). " \ "Report this bug to %.6s!\n", hdr->id, - header->oem_id); + header->header.oem_id); rc = 1; break; } @@ -649,7 +654,7 @@ out_invalid_struct: * found add them on the passed-in list. We do not support the other * fields at this point, so they are skipped. */ -static int __init ibft_register_kobjects(struct ibft_table_header *header, +static int __init ibft_register_kobjects(struct acpi_table_ibft *header, struct list_head *list) { struct ibft_control *control = NULL; @@ -660,7 +665,7 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header, control = (void *)header + sizeof(*header); end = (void *)control + control->hdr.length; - eot_offset = (void *)header + header->length - (void *)control; + eot_offset = (void *)header + header->header.length - (void *)control; rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, sizeof(*control)); @@ -672,7 +677,8 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header, } for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) { offset = *(u16 *)ptr; - if (offset && offset < header->length && offset < eot_offset) { + if (offset && offset < header->header.length && + offset < eot_offset) { rc = ibft_create_kobject(header, (void *)header + offset, list); diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index d6470ef36e4a..dd85555d3296 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -1,5 +1,5 @@ /* - * Copyright 2007 Red Hat, Inc. + * Copyright 2007-2010 Red Hat, Inc. * by Peter Jones * Copyright 2007 IBM, Inc. * by Konrad Rzeszutek @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -30,13 +31,15 @@ #include #include #include +#include +#include #include /* * Physical location of iSCSI Boot Format Table. */ -struct ibft_table_header *ibft_addr; +struct acpi_table_ibft *ibft_addr; EXPORT_SYMBOL_GPL(ibft_addr); #define IBFT_SIGN "iBFT" @@ -46,6 +49,13 @@ EXPORT_SYMBOL_GPL(ibft_addr); #define VGA_MEM 0xA0000 /* VGA buffer */ #define VGA_SIZE 0x20000 /* 128kB */ +#ifdef CONFIG_ACPI +static int __init acpi_find_ibft(struct acpi_table_header *header) +{ + ibft_addr = (struct acpi_table_ibft *)header; + return 0; +} +#endif /* CONFIG_ACPI */ /* * Routine used to find the iSCSI Boot Format Table. The logical @@ -59,6 +69,11 @@ unsigned long __init find_ibft_region(unsigned long *sizep) ibft_addr = NULL; + /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will + * only use ACPI for this */ + if (efi_enabled) + return 0; + for (pos = IBFT_START; pos < IBFT_END; pos += 16) { /* The table can't be inside the VGA BIOS reserved space, * so skip that area */ @@ -72,14 +87,24 @@ unsigned long __init find_ibft_region(unsigned long *sizep) /* if the length of the table extends past 1M, * the table cannot be valid. */ if (pos + len <= (IBFT_END-1)) { - ibft_addr = (struct ibft_table_header *)virt; + ibft_addr = (struct acpi_table_ibft *)virt; break; } } } +#ifdef CONFIG_ACPI + /* + * One spec says "IBFT", the other says "iBFT". We have to check + * for both. + */ + if (!ibft_addr) + acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); + if (!ibft_addr) + acpi_table_parse("iBFT", acpi_find_ibft); +#endif /* CONFIG_ACPI */ if (ibft_addr) { - *sizep = PAGE_ALIGN(len); - return pos; + *sizep = PAGE_ALIGN(ibft_addr->header.length); + return (u64)isa_virt_to_bus(ibft_addr); } *sizep = 0; diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index d2e4042f8f5e..8ba7e5b9d62c 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -21,21 +21,13 @@ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -struct ibft_table_header { - char signature[4]; - u32 length; - u8 revision; - u8 checksum; - char oem_id[6]; - char oem_table_id[8]; - char reserved[24]; -} __attribute__((__packed__)); +#include /* * Logical location of iSCSI Boot Format Table. * If the value is NULL there is no iBFT on the machine. */ -extern struct ibft_table_header *ibft_addr; +extern struct acpi_table_ibft *ibft_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The -- cgit v1.2.3 From ba4ee30c6c797de148dcc7254cf6d531aba71d9b Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 12 Apr 2010 18:06:17 +0000 Subject: ibft: separate ibft parsing from sysfs interface Not all iscsi drivers support ibft. For drivers like be2iscsi that do not but are bootable through a vendor firmware specific format/process this patch moves the sysfs interface from the ibft code to a lib module. This then allows userspace tools to search for iscsi boot info in a common place and in a common format. ibft iscsi boot info is exported in the same place as it was before: /sys/firmware/ibft. vendor/fw boot info gets export in /sys/firmware/iscsi_bootX, where X is the scsi host number of the HBA. Underneath these parent dirs, the target, ethernet, and initiator dirs are the same as they were before. Signed-off-by: Mike Christie Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Peter Jones --- drivers/firmware/Kconfig | 8 + drivers/firmware/Makefile | 1 + drivers/firmware/iscsi_boot_sysfs.c | 481 ++++++++++++++++++++++++++++++++++++ include/linux/iscsi_boot_sysfs.h | 123 +++++++++ 4 files changed, 613 insertions(+) create mode 100644 drivers/firmware/iscsi_boot_sysfs.c create mode 100644 include/linux/iscsi_boot_sysfs.h (limited to 'include') diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 1b03ba1d0834..571d2182613d 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -122,6 +122,14 @@ config ISCSI_IBFT_FIND is necessary for iSCSI Boot Firmware Table Attributes module to work properly. +config ISCSI_BOOT_SYSFS + tristate "iSCSI Boot Sysfs Interface" + default n + help + This option enables support for exposing iSCSI boot information + via sysfs to userspace. If you wish to export this information, + say Y. Otherwise, say N. + config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" depends on ISCSI_IBFT_FIND diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 1c3c17343dbe..5fe7e1662922 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -10,4 +10,5 @@ obj-$(CONFIG_DCDBAS) += dcdbas.o obj-$(CONFIG_DMIID) += dmi-id.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o +obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o diff --git a/drivers/firmware/iscsi_boot_sysfs.c b/drivers/firmware/iscsi_boot_sysfs.c new file mode 100644 index 000000000000..df6bff7366cf --- /dev/null +++ b/drivers/firmware/iscsi_boot_sysfs.c @@ -0,0 +1,481 @@ +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + + +MODULE_AUTHOR("Mike Christie "); +MODULE_DESCRIPTION("sysfs interface and helpers to export iSCSI boot information"); +MODULE_LICENSE("GPL"); +/* + * The kobject and attribute structures. + */ +struct iscsi_boot_attr { + struct attribute attr; + int type; + ssize_t (*show) (void *data, int type, char *buf); +}; + +/* + * The routine called for all sysfs attributes. + */ +static ssize_t iscsi_boot_show_attribute(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + struct iscsi_boot_attr *boot_attr = + container_of(attr, struct iscsi_boot_attr, attr); + ssize_t ret = -EIO; + char *str = buf; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (boot_kobj->show) + ret = boot_kobj->show(boot_kobj->data, boot_attr->type, str); + return ret; +} + +static const struct sysfs_ops iscsi_boot_attr_ops = { + .show = iscsi_boot_show_attribute, +}; + +static void iscsi_boot_kobj_release(struct kobject *kobj) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + kfree(boot_kobj->data); + kfree(boot_kobj); +} + +static struct kobj_type iscsi_boot_ktype = { + .release = iscsi_boot_kobj_release, + .sysfs_ops = &iscsi_boot_attr_ops, +}; + +#define iscsi_boot_rd_attr(fnname, sysfs_name, attr_type) \ +static struct iscsi_boot_attr iscsi_boot_attr_##fnname = { \ + .attr = { .name = __stringify(sysfs_name), .mode = 0444 }, \ + .type = attr_type, \ +} + +/* Target attrs */ +iscsi_boot_rd_attr(tgt_index, index, ISCSI_BOOT_TGT_INDEX); +iscsi_boot_rd_attr(tgt_flags, flags, ISCSI_BOOT_TGT_FLAGS); +iscsi_boot_rd_attr(tgt_ip, ip-addr, ISCSI_BOOT_TGT_IP_ADDR); +iscsi_boot_rd_attr(tgt_port, port, ISCSI_BOOT_TGT_PORT); +iscsi_boot_rd_attr(tgt_lun, lun, ISCSI_BOOT_TGT_LUN); +iscsi_boot_rd_attr(tgt_chap, chap-type, ISCSI_BOOT_TGT_CHAP_TYPE); +iscsi_boot_rd_attr(tgt_nic, nic-assoc, ISCSI_BOOT_TGT_NIC_ASSOC); +iscsi_boot_rd_attr(tgt_name, target-name, ISCSI_BOOT_TGT_NAME); +iscsi_boot_rd_attr(tgt_chap_name, chap-name, ISCSI_BOOT_TGT_CHAP_NAME); +iscsi_boot_rd_attr(tgt_chap_secret, chap-secret, ISCSI_BOOT_TGT_CHAP_SECRET); +iscsi_boot_rd_attr(tgt_chap_rev_name, rev-chap-name, + ISCSI_BOOT_TGT_REV_CHAP_NAME); +iscsi_boot_rd_attr(tgt_chap_rev_secret, rev-chap-name-secret, + ISCSI_BOOT_TGT_REV_CHAP_SECRET); + +static struct attribute *target_attrs[] = { + &iscsi_boot_attr_tgt_index.attr, + &iscsi_boot_attr_tgt_flags.attr, + &iscsi_boot_attr_tgt_ip.attr, + &iscsi_boot_attr_tgt_port.attr, + &iscsi_boot_attr_tgt_lun.attr, + &iscsi_boot_attr_tgt_chap.attr, + &iscsi_boot_attr_tgt_nic.attr, + &iscsi_boot_attr_tgt_name.attr, + &iscsi_boot_attr_tgt_chap_name.attr, + &iscsi_boot_attr_tgt_chap_secret.attr, + &iscsi_boot_attr_tgt_chap_rev_name.attr, + &iscsi_boot_attr_tgt_chap_rev_secret.attr, + NULL +}; + +static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_tgt_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_INDEX); + else if (attr == &iscsi_boot_attr_tgt_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_FLAGS); + else if (attr == &iscsi_boot_attr_tgt_ip.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_IP_ADDR); + else if (attr == &iscsi_boot_attr_tgt_port.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_PORT); + else if (attr == &iscsi_boot_attr_tgt_lun.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_LUN); + else if (attr == &iscsi_boot_attr_tgt_chap.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_TYPE); + else if (attr == &iscsi_boot_attr_tgt_nic.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_NIC_ASSOC); + else if (attr == &iscsi_boot_attr_tgt_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_secret.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_CHAP_SECRET); + else if (attr == &iscsi_boot_attr_tgt_chap_rev_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_REV_CHAP_NAME); + else if (attr == &iscsi_boot_attr_tgt_chap_rev_secret.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_TGT_REV_CHAP_SECRET); + return 0; +} + +static struct attribute_group iscsi_boot_target_attr_group = { + .attrs = target_attrs, + .is_visible = iscsi_boot_tgt_attr_is_visible, +}; + +/* Ethernet attrs */ +iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX); +iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS); +iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR); +iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK); +iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN); +iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY); +iscsi_boot_rd_attr(eth_primary_dns, primary-dns, ISCSI_BOOT_ETH_PRIMARY_DNS); +iscsi_boot_rd_attr(eth_secondary_dns, secondary-dns, + ISCSI_BOOT_ETH_SECONDARY_DNS); +iscsi_boot_rd_attr(eth_dhcp, dhcp, ISCSI_BOOT_ETH_DHCP); +iscsi_boot_rd_attr(eth_vlan, vlan, ISCSI_BOOT_ETH_VLAN); +iscsi_boot_rd_attr(eth_mac, mac, ISCSI_BOOT_ETH_MAC); +iscsi_boot_rd_attr(eth_hostname, hostname, ISCSI_BOOT_ETH_HOSTNAME); + +static struct attribute *ethernet_attrs[] = { + &iscsi_boot_attr_eth_index.attr, + &iscsi_boot_attr_eth_flags.attr, + &iscsi_boot_attr_eth_ip.attr, + &iscsi_boot_attr_eth_subnet.attr, + &iscsi_boot_attr_eth_origin.attr, + &iscsi_boot_attr_eth_gateway.attr, + &iscsi_boot_attr_eth_primary_dns.attr, + &iscsi_boot_attr_eth_secondary_dns.attr, + &iscsi_boot_attr_eth_dhcp.attr, + &iscsi_boot_attr_eth_vlan.attr, + &iscsi_boot_attr_eth_mac.attr, + &iscsi_boot_attr_eth_hostname.attr, + NULL +}; + +static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_eth_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_INDEX); + else if (attr == &iscsi_boot_attr_eth_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_FLAGS); + else if (attr == &iscsi_boot_attr_eth_ip.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_IP_ADDR); + else if (attr == &iscsi_boot_attr_eth_subnet.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_SUBNET_MASK); + else if (attr == &iscsi_boot_attr_eth_origin.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_ORIGIN); + else if (attr == &iscsi_boot_attr_eth_gateway.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_GATEWAY); + else if (attr == &iscsi_boot_attr_eth_primary_dns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_PRIMARY_DNS); + else if (attr == &iscsi_boot_attr_eth_secondary_dns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_SECONDARY_DNS); + else if (attr == &iscsi_boot_attr_eth_dhcp.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_DHCP); + else if (attr == &iscsi_boot_attr_eth_vlan.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_VLAN); + else if (attr == &iscsi_boot_attr_eth_mac.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_MAC); + else if (attr == &iscsi_boot_attr_eth_hostname.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_ETH_HOSTNAME); + return 0; +} + +static struct attribute_group iscsi_boot_ethernet_attr_group = { + .attrs = ethernet_attrs, + .is_visible = iscsi_boot_eth_attr_is_visible, +}; + +/* Initiator attrs */ +iscsi_boot_rd_attr(ini_index, index, ISCSI_BOOT_INI_INDEX); +iscsi_boot_rd_attr(ini_flags, flags, ISCSI_BOOT_INI_FLAGS); +iscsi_boot_rd_attr(ini_isns, isns-server, ISCSI_BOOT_INI_ISNS_SERVER); +iscsi_boot_rd_attr(ini_slp, slp-server, ISCSI_BOOT_INI_SLP_SERVER); +iscsi_boot_rd_attr(ini_primary_radius, pri-radius-server, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER); +iscsi_boot_rd_attr(ini_secondary_radius, sec-radius-server, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER); +iscsi_boot_rd_attr(ini_name, initiator-name, ISCSI_BOOT_INI_INITIATOR_NAME); + +static struct attribute *initiator_attrs[] = { + &iscsi_boot_attr_ini_index.attr, + &iscsi_boot_attr_ini_flags.attr, + &iscsi_boot_attr_ini_isns.attr, + &iscsi_boot_attr_ini_slp.attr, + &iscsi_boot_attr_ini_primary_radius.attr, + &iscsi_boot_attr_ini_secondary_radius.attr, + &iscsi_boot_attr_ini_name.attr, + NULL +}; + +static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct iscsi_boot_kobj *boot_kobj = + container_of(kobj, struct iscsi_boot_kobj, kobj); + + if (attr == &iscsi_boot_attr_ini_index.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_INDEX); + if (attr == &iscsi_boot_attr_ini_flags.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_FLAGS); + if (attr == &iscsi_boot_attr_ini_isns.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_ISNS_SERVER); + if (attr == &iscsi_boot_attr_ini_slp.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_SLP_SERVER); + if (attr == &iscsi_boot_attr_ini_primary_radius.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER); + if (attr == &iscsi_boot_attr_ini_secondary_radius.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER); + if (attr == &iscsi_boot_attr_ini_name.attr) + return boot_kobj->is_visible(boot_kobj->data, + ISCSI_BOOT_INI_INITIATOR_NAME); + + return 0; +} + +static struct attribute_group iscsi_boot_initiator_attr_group = { + .attrs = initiator_attrs, + .is_visible = iscsi_boot_ini_attr_is_visible, +}; + +static struct iscsi_boot_kobj * +iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, + struct attribute_group *attr_group, + const char *name, int index, void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)) +{ + struct iscsi_boot_kobj *boot_kobj; + + boot_kobj = kzalloc(sizeof(*boot_kobj), GFP_KERNEL); + if (!boot_kobj) + return NULL; + INIT_LIST_HEAD(&boot_kobj->list); + + boot_kobj->kobj.kset = boot_kset->kset; + if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, + NULL, name, index)) { + kfree(boot_kobj); + return NULL; + } + boot_kobj->data = data; + boot_kobj->show = show; + boot_kobj->is_visible = is_visible; + + if (sysfs_create_group(&boot_kobj->kobj, attr_group)) { + /* + * We do not want to free this because the caller + * will assume that since the creation call failed + * the boot kobj was not setup and the normal release + * path is not being run. + */ + boot_kobj->data = NULL; + kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->attr_group = attr_group; + + kobject_uevent(&boot_kobj->kobj, KOBJ_ADD); + /* Nothing broke so lets add it to the list. */ + list_add_tail(&boot_kobj->list, &boot_kset->kobj_list); + return boot_kobj; +} + +static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj) +{ + list_del(&boot_kobj->list); + sysfs_remove_group(&boot_kobj->kobj, boot_kobj->attr_group); + kobject_put(&boot_kobj->kobj); +} + +/** + * iscsi_boot_create_target() - create boot target sysfs dir + * @boot_kset: boot kset + * @index: the target id + * @data: driver specific data for target + * @show: attr show function + * @is_visible: attr visibility function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the target kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)) +{ + return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group, + "target%d", index, data, show, is_visible); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_target); + +/** + * iscsi_boot_create_initiator() - create boot initiator sysfs dir + * @boot_kset: boot kset + * @index: the initiator id + * @data: driver specific data + * @show: attr show function + * @is_visible: attr visibility function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the initiator kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)) +{ + return iscsi_boot_create_kobj(boot_kset, + &iscsi_boot_initiator_attr_group, + "initiator", index, data, show, + is_visible); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator); + +/** + * iscsi_boot_create_ethernet() - create boot ethernet sysfs dir + * @boot_kset: boot kset + * @index: the ethernet device id + * @data: driver specific data + * @show: attr show function + * @is_visible: attr visibility function + * + * Note: The boot sysfs lib will free the data passed in for the caller + * when all refs to the ethernet kobject have been released. + */ +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)) +{ + return iscsi_boot_create_kobj(boot_kset, + &iscsi_boot_ethernet_attr_group, + "ethernet%d", index, data, show, + is_visible); +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet); + +/** + * iscsi_boot_create_kset() - creates root sysfs tree + * @set_name: name of root dir + */ +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name) +{ + struct iscsi_boot_kset *boot_kset; + + boot_kset = kzalloc(sizeof(*boot_kset), GFP_KERNEL); + if (!boot_kset) + return NULL; + + boot_kset->kset = kset_create_and_add(set_name, NULL, firmware_kobj); + if (!boot_kset->kset) { + kfree(boot_kset); + return NULL; + } + + INIT_LIST_HEAD(&boot_kset->kobj_list); + return boot_kset; +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_kset); + +/** + * iscsi_boot_create_host_kset() - creates root sysfs tree for a scsi host + * @hostno: host number of scsi host + */ +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno) +{ + struct iscsi_boot_kset *boot_kset; + char *set_name; + + set_name = kasprintf(GFP_KERNEL, "iscsi_boot%u", hostno); + if (!set_name) + return NULL; + + boot_kset = iscsi_boot_create_kset(set_name); + kfree(set_name); + return boot_kset; +} +EXPORT_SYMBOL_GPL(iscsi_boot_create_host_kset); + +/** + * iscsi_boot_destroy_kset() - destroy kset and kobjects under it + * @boot_kset: boot kset + * + * This will remove the kset and kobjects and attrs under it. + */ +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset) +{ + struct iscsi_boot_kobj *boot_kobj, *tmp_kobj; + + list_for_each_entry_safe(boot_kobj, tmp_kobj, + &boot_kset->kobj_list, list) + iscsi_boot_remove_kobj(boot_kobj); + + kset_unregister(boot_kset->kset); +} +EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset); diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h new file mode 100644 index 000000000000..f1e6c184f14f --- /dev/null +++ b/include/linux/iscsi_boot_sysfs.h @@ -0,0 +1,123 @@ +/* + * Export the iSCSI boot info to userland via sysfs. + * + * Copyright (C) 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _ISCSI_BOOT_SYSFS_ +#define _ISCSI_BOOT_SYSFS_ + +/* + * The text attributes names for each of the kobjects. +*/ +enum iscsi_boot_eth_properties_enum { + ISCSI_BOOT_ETH_INDEX, + ISCSI_BOOT_ETH_FLAGS, + ISCSI_BOOT_ETH_IP_ADDR, + ISCSI_BOOT_ETH_SUBNET_MASK, + ISCSI_BOOT_ETH_ORIGIN, + ISCSI_BOOT_ETH_GATEWAY, + ISCSI_BOOT_ETH_PRIMARY_DNS, + ISCSI_BOOT_ETH_SECONDARY_DNS, + ISCSI_BOOT_ETH_DHCP, + ISCSI_BOOT_ETH_VLAN, + ISCSI_BOOT_ETH_MAC, + /* eth_pci_bdf - this is replaced by link to the device itself. */ + ISCSI_BOOT_ETH_HOSTNAME, + ISCSI_BOOT_ETH_END_MARKER, +}; + +enum iscsi_boot_tgt_properties_enum { + ISCSI_BOOT_TGT_INDEX, + ISCSI_BOOT_TGT_FLAGS, + ISCSI_BOOT_TGT_IP_ADDR, + ISCSI_BOOT_TGT_PORT, + ISCSI_BOOT_TGT_LUN, + ISCSI_BOOT_TGT_CHAP_TYPE, + ISCSI_BOOT_TGT_NIC_ASSOC, + ISCSI_BOOT_TGT_NAME, + ISCSI_BOOT_TGT_CHAP_NAME, + ISCSI_BOOT_TGT_CHAP_SECRET, + ISCSI_BOOT_TGT_REV_CHAP_NAME, + ISCSI_BOOT_TGT_REV_CHAP_SECRET, + ISCSI_BOOT_TGT_END_MARKER, +}; + +enum iscsi_boot_initiator_properties_enum { + ISCSI_BOOT_INI_INDEX, + ISCSI_BOOT_INI_FLAGS, + ISCSI_BOOT_INI_ISNS_SERVER, + ISCSI_BOOT_INI_SLP_SERVER, + ISCSI_BOOT_INI_PRI_RADIUS_SERVER, + ISCSI_BOOT_INI_SEC_RADIUS_SERVER, + ISCSI_BOOT_INI_INITIATOR_NAME, + ISCSI_BOOT_INI_END_MARKER, +}; + +struct attribute_group; + +struct iscsi_boot_kobj { + struct kobject kobj; + struct attribute_group *attr_group; + struct list_head list; + + /* + * Pointer to store driver specific info. If set this will + * be freed for the LLD when the kobj release function is called. + */ + void *data; + /* + * Driver specific show function. + * + * The enum of the type. This can be any value of the above + * properties. + */ + ssize_t (*show) (void *data, int type, char *buf); + + /* + * Drivers specific visibility function. + * The function should return if they the attr should be readable + * writable or should not be shown. + * + * The enum of the type. This can be any value of the above + * properties. + */ + mode_t (*is_visible) (void *data, int type); +}; + +struct iscsi_boot_kset { + struct list_head kobj_list; + struct kset *kset; +}; + +struct iscsi_boot_kobj * +iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); + +struct iscsi_boot_kobj * +iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); +struct iscsi_boot_kobj * +iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, + void *data, + ssize_t (*show) (void *data, int type, char *buf), + mode_t (*is_visible) (void *data, int type)); + +struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); +struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); +void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); + +#endif -- cgit v1.2.3 From ac1ececea995fd77c8da6a1299674f22991cecaa Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 18 May 2010 13:00:31 -0300 Subject: i7core_edac: Add support for X5670 As reported by Vernon Mauery , X5670 (Westmere-EP) uses a different register for one of the uncore PCI devices. Add support for it. Those are the PCI ID's on this new chipset: fe:00.0 0600: 8086:2c70 (rev 02) fe:00.1 0600: 8086:2d81 (rev 02) fe:02.0 0600: 8086:2d90 (rev 02) fe:02.1 0600: 8086:2d91 (rev 02) fe:02.2 0600: 8086:2d92 (rev 02) fe:02.3 0600: 8086:2d93 (rev 02) fe:02.4 0600: 8086:2d94 (rev 02) fe:02.5 0600: 8086:2d95 (rev 02) fe:03.0 0600: 8086:2d98 (rev 02) fe:03.1 0600: 8086:2d99 (rev 02) fe:03.2 0600: 8086:2d9a (rev 02) fe:03.4 0600: 8086:2d9c (rev 02) fe:04.0 0600: 8086:2da0 (rev 02) fe:04.1 0600: 8086:2da1 (rev 02) fe:04.2 0600: 8086:2da2 (rev 02) fe:04.3 0600: 8086:2da3 (rev 02) fe:05.0 0600: 8086:2da8 (rev 02) fe:05.1 0600: 8086:2da9 (rev 02) fe:05.2 0600: 8086:2daa (rev 02) fe:05.3 0600: 8086:2dab (rev 02) fe:06.0 0600: 8086:2db0 (rev 02) fe:06.1 0600: 8086:2db1 (rev 02) fe:06.2 0600: 8086:2db2 (rev 02) fe:06.3 0600: 8086:2db3 (rev 02) (as usual, the same PCI devices repeat at ff: bus) The PCI device 8086:2c70 is shown as: fe:00.0 Host bridge: Intel Corporation QuickPath Architecture Generic Non-core Registers (rev 02) So, for this device to be recognized, it is only a matter of adding this new PCI ID to the driver. Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 7 ++++++- include/linux/pci_ids.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index cd51709c4d89..82acfbd01779 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1213,10 +1213,15 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); + if (!pdev) + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2, + *prev); + } if (!pdev) { if (*prev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e67cb20b8401..46d76e985bac 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2552,6 +2552,7 @@ #define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91 -- cgit v1.2.3 From bd9e19ca46b54fa85141c4d20afd668379d94c81 Mon Sep 17 00:00:00 2001 From: Vernon Mauery Date: Tue, 18 May 2010 19:02:50 -0300 Subject: Add support for Westmere to i7core_edac driver This adds new PCI IDs for the Westmere's memory controller devices and modifies the i7core_edac driver to be able to probe both Nehalem and Westmere processors. Signed-off-by: Vernon Mauery Signed-off-by: Mauro Carvalho Chehab --- drivers/edac/i7core_edac.c | 117 +++++++++++++++++++++++++++++++-------------- include/linux/pci_ids.h | 16 +++++++ 2 files changed, 96 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 3e2b5379bc05..8d63b0046480 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -206,6 +206,11 @@ struct pci_id_descr { int optional; }; +struct pci_id_table { + struct pci_id_descr *descr; + int n_devs; +}; + struct i7core_dev { struct list_head list; u8 socket; @@ -262,7 +267,7 @@ static DEFINE_MUTEX(i7core_edac_lock); .func = (function), \ .dev_id = (device_id) -struct pci_id_descr pci_dev_descr_i7core[] = { +struct pci_id_descr pci_dev_descr_i7core_nehalem[] = { /* Memory controller */ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) }, { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) }, @@ -321,6 +326,44 @@ struct pci_id_descr pci_dev_descr_lynnfield[] = { { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) }, }; +struct pci_id_descr pci_dev_descr_i7core_westmere[] = { + /* Memory controller */ + { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) }, + { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) }, + /* Exists only for RDIMM */ + { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 }, + { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) }, + + /* Channel 0 */ + { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) }, + { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) }, + { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) }, + { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) }, + + /* Channel 1 */ + { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) }, + { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) }, + { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) }, + { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) }, + + /* Channel 2 */ + { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) }, + { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) }, + { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) }, + { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) }, + + /* Generic Non-core registers */ + { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) }, + +}; + +#define PCI_ID_TABLE_ENTRY(A) { A, ARRAY_SIZE(A) } +struct pci_id_table pci_dev_table[] = { + PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem), + PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield), + PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere), +}; + /* * pci_device_id table for which devices we are looking for */ @@ -1170,7 +1213,7 @@ static void i7core_put_all_devices(void) i7core_put_devices(i7core_dev); } -static void __init i7core_xeon_pci_fixup(int dev_id) +static void __init i7core_xeon_pci_fixup(struct pci_id_table *table) { struct pci_dev *pdev = NULL; int i; @@ -1179,10 +1222,13 @@ static void __init i7core_xeon_pci_fixup(int dev_id) * aren't announced by acpi. So, we need to use a legacy scan probing * to detect them */ - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); - if (unlikely(!pdev)) { - for (i = 0; i < MAX_SOCKET_BUSES; i++) - pcibios_scan_specific_bus(255-i); + while (table && table->descr) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL); + if (unlikely(!pdev)) { + for (i = 0; i < MAX_SOCKET_BUSES; i++) + pcibios_scan_specific_bus(255-i); + } + table++; } } @@ -1213,15 +1259,10 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) { + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); - if (!pdev) - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2, - *prev); - } if (!pdev) { if (*prev) { @@ -1232,6 +1273,9 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, if (dev_descr->optional) return 0; + if (devno == 0) + return -ENODEV; + i7core_printk(KERN_ERR, "Device not found: dev %02x.%d PCI ID %04x:%04x\n", dev_descr->dev, dev_descr->func, @@ -1307,24 +1351,34 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno, return 0; } -static int i7core_get_devices(struct pci_id_descr dev_descr[], unsigned n_devs) +static int i7core_get_devices(struct pci_id_table *table) { int i, rc; struct pci_dev *pdev = NULL; - - for (i = 0; i < n_devs; i++) { - pdev = NULL; - do { - rc = i7core_get_onedevice(&pdev, i, &dev_descr[i], - n_devs); - if (rc < 0) { - i7core_put_all_devices(); - return -ENODEV; - } - } while (pdev); + struct pci_id_descr *dev_descr; + + while (table && table->descr) { + dev_descr = table->descr; + for (i = 0; i < table->n_devs; i++) { + pdev = NULL; + do { + rc = i7core_get_onedevice(&pdev, i, &dev_descr[i], + table->n_devs); + if (rc < 0) { + if (i == 0) { + i = table->n_devs; + break; + } + i7core_put_all_devices(); + return -ENODEV; + } + } while (pdev); + } + table++; } return 0; + return 0; } static int mci_bind_devs(struct mem_ctl_info *mci, @@ -1884,18 +1938,7 @@ static int __devinit i7core_probe(struct pci_dev *pdev, /* get the pci devices we want to reserve for our use */ mutex_lock(&i7core_edac_lock); - if (pdev->device == PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0) { - printk(KERN_INFO "i7core_edac: detected a " - "Lynnfield processor\n"); - rc = i7core_get_devices(pci_dev_descr_lynnfield, - ARRAY_SIZE(pci_dev_descr_lynnfield)); - } else { - printk(KERN_INFO "i7core_edac: detected a " - "Nehalem/Nehalem-EP processor\n"); - rc = i7core_get_devices(pci_dev_descr_i7core, - ARRAY_SIZE(pci_dev_descr_i7core)); - } - + rc = i7core_get_devices(pci_dev_table); if (unlikely(rc < 0)) goto fail0; @@ -1994,7 +2037,7 @@ static int __init i7core_init(void) /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - i7core_xeon_pci_fixup(pci_dev_descr_i7core[0].dev_id); + i7core_xeon_pci_fixup(pci_dev_table); pci_rc = pci_register_driver(&i7core_driver); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 46d76e985bac..413fab765a5f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2567,6 +2567,22 @@ #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9 #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa #define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2 +#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 #define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 #define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a -- cgit v1.2.3 From bca4b914b5da3d8e7b9b647f620b71dc85c0c394 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 20 May 2010 23:21:34 +0400 Subject: cfq-iosched: remove dead_key from cfq_io_context Remove ->dead_key field from cfq_io_context to shrink its size to 128 bytes. (64 bytes for 32-bit hosts) Use lower bit in ->key as dead-mark, instead of moving key to separate field. After this for dead cfq_io_context we got cic->key != cfqd automatically. Thus, io_context's last-hit cache should work without changing. Now to check ->key for non-dead state compare it with cfqd, instead of checking ->key for non-null value as it was before. Plus remove obsolete race protection in cfq_cic_lookup. This race gone after v2.6.24-1728-g4ac845a Signed-off-by: Konstantin Khlebnikov Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 41 ++++++++++++++++++++++++++++------------- include/linux/iocontext.h | 1 - 2 files changed, 28 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ed897b5ef315..407602350350 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -430,6 +430,23 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic, cic->cfqq[is_sync] = cfqq; } +#define CIC_DEAD_KEY 1ul + +static inline void *cfqd_dead_key(struct cfq_data *cfqd) +{ + return (void *)((unsigned long) cfqd | CIC_DEAD_KEY); +} + +static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) +{ + struct cfq_data *cfqd = cic->key; + + if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY)) + return NULL; + + return cfqd; +} + /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). @@ -2510,11 +2527,12 @@ static void cfq_cic_free(struct cfq_io_context *cic) static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; + unsigned long dead_key = (unsigned long) cic->key; - BUG_ON(!cic->dead_key); + BUG_ON(!(dead_key & CIC_DEAD_KEY)); spin_lock_irqsave(&ioc->lock, flags); - radix_tree_delete(&ioc->radix_root, cic->dead_key); + radix_tree_delete(&ioc->radix_root, dead_key & ~CIC_DEAD_KEY); hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); @@ -2573,11 +2591,10 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, list_del_init(&cic->queue_list); /* - * Make sure key == NULL is seen for dead queues + * Make sure dead mark is seen for dead queues */ smp_wmb(); - cic->dead_key = (unsigned long) cic->key; - cic->key = NULL; + cic->key = cfqd_dead_key(cfqd); if (ioc->ioc_data == cic) rcu_assign_pointer(ioc->ioc_data, NULL); @@ -2596,7 +2613,7 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, static void cfq_exit_single_io_context(struct io_context *ioc, struct cfq_io_context *cic) { - struct cfq_data *cfqd = cic->key; + struct cfq_data *cfqd = cic_to_cfqd(cic); if (cfqd) { struct request_queue *q = cfqd->queue; @@ -2609,7 +2626,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc, * race between exiting task and queue */ smp_read_barrier_depends(); - if (cic->key) + if (cic->key == cfqd) __cfq_exit_single_io_context(cfqd, cic); spin_unlock_irqrestore(q->queue_lock, flags); @@ -2689,7 +2706,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) { - struct cfq_data *cfqd = cic->key; + struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; unsigned long flags; @@ -2746,7 +2763,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); - struct cfq_data *cfqd = cic->key; + struct cfq_data *cfqd = cic_to_cfqd(cic); unsigned long flags; struct request_queue *q; @@ -2883,6 +2900,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, unsigned long flags; WARN_ON(!list_empty(&cic->queue_list)); + BUG_ON(cic->key != cfqd_dead_key(cfqd)); spin_lock_irqsave(&ioc->lock, flags); @@ -2900,7 +2918,6 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { struct cfq_io_context *cic; unsigned long flags; - void *k; if (unlikely(!ioc)) return NULL; @@ -2921,9 +2938,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) rcu_read_unlock(); if (!cic) break; - /* ->key must be copied to avoid race with cfq_exit_queue() */ - k = cic->key; - if (unlikely(!k)) { + if (unlikely(cic->key != cfqd)) { cfq_drop_dead_cic(cfqd, ioc, cic); rcu_read_lock(); continue; diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index a0bb301afac0..64d529133031 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -7,7 +7,6 @@ struct cfq_queue; struct cfq_io_context { void *key; - unsigned long dead_key; struct cfq_queue *cfqq[2]; -- cgit v1.2.3 From 14baf9d7f275f0bbf42c1216ff1eef1109ca42ba Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 24 May 2010 16:31:08 +0900 Subject: serial: sh-sci: fix up serial DMA build. asm/dmaengine.h no longer exists, update for the shared linux/sh_dma.h header. Signed-off-by: Paul Mundt --- include/linux/serial_sci.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index f5364a1de68b..baed2122c5a6 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -2,9 +2,7 @@ #define __LINUX_SERIAL_SCI_H #include -#ifdef CONFIG_SERIAL_SH_SCI_DMA -#include -#endif +#include /* * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) -- cgit v1.2.3 From 8187a2b70e34c727a06617441f74f202b6fefaf9 Mon Sep 17 00:00:00 2001 From: Zou Nan hai Date: Fri, 21 May 2010 09:08:55 +0800 Subject: drm/i915: introduce intel_ring_buffer structure (V2) Introduces a more complete intel_ring_buffer structure with callbacks for setup and management of a particular ringbuffer, and converts the render ring buffer consumers to use it. Signed-off-by: Zou Nan hai Signed-off-by: Xiang Hai hao [anholt: Fixed up whitespace fail and rebased against prep patches] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +- drivers/gpu/drm/i915/i915_dma.c | 58 ++-- drivers/gpu/drm/i915/i915_drv.c | 29 +- drivers/gpu/drm/i915/i915_drv.h | 80 ++--- drivers/gpu/drm/i915/i915_gem.c | 76 ++++- drivers/gpu/drm/i915/i915_irq.c | 15 +- drivers/gpu/drm/i915/intel_display.c | 1 - drivers/gpu/drm/i915/intel_overlay.c | 8 - drivers/gpu/drm/i915/intel_ringbuffer.c | 582 +++++++++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 124 +++++++ include/drm/i915_drm.h | 4 +- 11 files changed, 606 insertions(+), 377 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_ringbuffer.h (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 488175c70c7d..4fddf094deb2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -317,14 +317,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) u8 *virt; uint32_t *ptr, off; - if (!dev_priv->render_ring.ring_obj) { + if (!dev_priv->render_ring.gem_object) { seq_printf(m, "No ringbuffer setup\n"); return 0; } virt = dev_priv->render_ring.virtual_start; - for (off = 0; off < dev_priv->render_ring.Size; off += 4) { + for (off = 0; off < dev_priv->render_ring.size; off += 4) { ptr = (uint32_t *)(virt + off); seq_printf(m, "%08x : %08x\n", off, *ptr); } @@ -344,7 +344,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, "RingHead : %08x\n", head); seq_printf(m, "RingTail : %08x\n", tail); - seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.Size); + seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); return 0; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6de7eace4319..2541428b2fe5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -40,7 +40,6 @@ #include #include - /** * Sets up the hardware status page for devices that need a physical address * in the register. @@ -56,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); if (IS_I965G(dev)) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & @@ -95,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring); + struct intel_ring_buffer *ring = &dev_priv->render_ring; /* * We should never lose context on the ring with modesetting @@ -108,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev) ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) - ring->space += ring->Size; + ring->space += ring->size; if (!dev->primary->master) return; @@ -128,12 +128,7 @@ static int i915_dma_cleanup(struct drm_device * dev) if (dev->irq_enabled) drm_irq_uninstall(dev); - if (dev_priv->render_ring.virtual_start) { - drm_core_ioremapfree(&dev_priv->render_ring.map, dev); - dev_priv->render_ring.virtual_start = NULL; - dev_priv->render_ring.map.handle = NULL; - dev_priv->render_ring.map.size = 0; - } + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) @@ -156,14 +151,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.ring_obj != NULL) { + if (dev_priv->render_ring.gem_object != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - dev_priv->render_ring.Size = init->ring_size; + dev_priv->render_ring.size = init->ring_size; dev_priv->render_ring.map.offset = init->ring_start; dev_priv->render_ring.map.size = init->ring_size; @@ -201,26 +196,29 @@ static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring; DRM_DEBUG_DRIVER("%s\n", __func__); - if (dev_priv->render_ring.map.handle == NULL) { + ring = &dev_priv->render_ring; + + if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ - if (!dev_priv->hw_status_page) { + if (!ring->status_page.page_addr) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG_DRIVER("hw status page @ %p\n", - dev_priv->hw_status_page); - - if (dev_priv->status_gfx_addr != 0) - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + ring->status_page.page_addr); + if (ring->status_page.gfx_addr != 0) + ring->setup_status_page(dev, ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); + DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; @@ -330,9 +328,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; - RING_LOCALS; - if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.Size - 8) + if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) return -EINVAL; BEGIN_LP_RING((dwords+1)&~1); @@ -365,9 +362,7 @@ i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_clip_rect box = boxes[i]; - RING_LOCALS; if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", @@ -404,7 +399,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; dev_priv->counter++; if (dev_priv->counter > 0x7FFFFFFFUL) @@ -458,10 +452,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { - drm_i915_private_t *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i = 0, count; - RING_LOCALS; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); @@ -510,7 +502,6 @@ static int i915_dispatch_flip(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; if (!master_priv->sarea_priv) return -EINVAL; @@ -563,7 +554,8 @@ static int i915_quiescent(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); - return i915_wait_ring(dev, dev_priv->render_ring.Size - 8, __func__); + return intel_wait_ring_buffer(dev, &dev_priv->render_ring, + dev_priv->render_ring.size - 8); } static int i915_flush_ioctl(struct drm_device *dev, void *data, @@ -805,6 +797,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; + struct intel_ring_buffer *ring = &dev_priv->render_ring; if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; @@ -821,7 +814,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); - dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); + ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; @@ -837,10 +830,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data, " G33 hw status page\n"); return -ENOMEM; } - dev_priv->hw_status_page = dev_priv->hws_map.handle; + ring->status_page.page_addr = dev_priv->hws_map.handle; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", dev_priv->status_gfx_addr); DRM_DEBUG_DRIVER("load hws at %p\n", @@ -1639,7 +1632,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->user_irq_lock); spin_lock_init(&dev_priv->error_lock); - dev_priv->user_irq_refcount = 0; dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a1814f65fdb4..c57c54f403da 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -388,33 +388,10 @@ int i965_reset(struct drm_device *dev, u8 flags) * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || - !dev_priv->mm.suspended) { - drm_i915_ring_buffer_t *ring = &dev_priv->render_ring; - struct drm_gem_object *obj = ring->ring_obj; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + !dev_priv->mm.suspended) { + struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; - - /* Stop the ring if it's running. */ - I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_TAIL, 0); - I915_WRITE(PRB0_HEAD, 0); - - /* Initialize the ring. */ - I915_WRITE(PRB0_START, obj_priv->gtt_offset); - I915_WRITE(PRB0_CTL, - ((obj->size - 4096) & RING_NR_PAGES) | - RING_NO_REPORT | - RING_VALID); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); - else { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->Size; - } - + ring->init(dev, ring); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a39440cf1dee..6bb7933d49dc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -31,8 +31,8 @@ #define _I915_DRV_H_ #include "i915_reg.h" -#include "i915_drm.h" #include "intel_bios.h" +#include "intel_ringbuffer.h" #include /* General customization: @@ -92,16 +92,6 @@ struct drm_i915_gem_phys_object { struct drm_gem_object *cur_obj; }; -typedef struct _drm_i915_ring_buffer { - unsigned long Size; - u8 *virtual_start; - int head; - int tail; - int space; - drm_local_map_t map; - struct drm_gem_object *ring_obj; -} drm_i915_ring_buffer_t; - struct mem_block { struct mem_block *next; struct mem_block *prev; @@ -244,7 +234,7 @@ typedef struct drm_i915_private { void __iomem *regs; struct pci_dev *bridge_dev; - drm_i915_ring_buffer_t render_ring; + struct intel_ring_buffer render_ring; drm_dma_handle_t *status_page_dmah; void *hw_status_page; @@ -270,8 +260,6 @@ typedef struct drm_i915_private { atomic_t irq_received; /** Protects user_irq_refcount and irq_mask_reg */ spinlock_t user_irq_lock; - /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ - int user_irq_refcount; u32 trace_irq_seqno; /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; @@ -832,9 +820,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_user_irq_get(struct drm_device *dev); void i915_trace_irq_get(struct drm_device *dev, u32 seqno); -void i915_user_irq_put(struct drm_device *dev); extern void i915_enable_interrupt (struct drm_device *dev); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); @@ -853,8 +839,10 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); -void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); -void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask); +extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, + u32 mask); +extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, + u32 mask); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); @@ -962,8 +950,6 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); -int i915_gem_init_pipe_control(struct drm_device *dev); -void i915_gem_cleanup_pipe_control(struct drm_device *dev); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); @@ -1014,16 +1000,6 @@ static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; static inline void opregion_enable_asle(struct drm_device *dev) { return; } #endif -/* intel_ringbuffer.c */ -extern void i915_gem_flush(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains); -extern int i915_dispatch_gem_execbuffer(struct drm_device *dev, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset); -extern uint32_t i915_ring_add_request(struct drm_device *dev); - /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); @@ -1044,7 +1020,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); * has access to the ring. */ #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.ring_obj == NULL) \ + if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ + == NULL) \ LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) @@ -1060,32 +1037,27 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define I915_VERBOSE 0 -#define RING_LOCALS volatile unsigned int *ring_virt__; - -#define BEGIN_LP_RING(n) do { \ - int bytes__ = 4*(n); \ - if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ - /* a wrap must occur between instructions so pad beforehand */ \ - if (unlikely (dev_priv->render_ring.tail + bytes__ > dev_priv->render_ring.Size)) \ - i915_wrap_ring(dev); \ - if (unlikely (dev_priv->render_ring.space < bytes__)) \ - i915_wait_ring(dev, bytes__, __func__); \ - ring_virt__ = (unsigned int *) \ - (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail); \ - dev_priv->render_ring.tail += bytes__; \ - dev_priv->render_ring.tail &= dev_priv->render_ring.Size - 1; \ - dev_priv->render_ring.space -= bytes__; \ +#define BEGIN_LP_RING(n) do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ + if (I915_VERBOSE) \ + DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ + intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \ } while (0) -#define OUT_RING(n) do { \ - if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *ring_virt__++ = (n); \ + +#define OUT_RING(x) do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ + if (I915_VERBOSE) \ + DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ + intel_ring_emit(dev, &dev_priv->render_ring, x); \ } while (0) #define ADVANCE_LP_RING() do { \ + drm_i915_private_t *dev_priv = dev->dev_private; \ if (I915_VERBOSE) \ - DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->render_ring.tail); \ - I915_WRITE(PRB0_TAIL, dev_priv->render_ring.tail); \ + DRM_DEBUG("ADVANCE_LP_RING %x\n", \ + dev_priv->render_ring.tail); \ + intel_ring_advance(dev, &dev_priv->render_ring); \ } while(0) /** @@ -1103,14 +1075,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); * * The area from dword 0x20 to 0x3ff is available for driver usage. */ -#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) +#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ + (dev_priv->render_ring.status_page.page_addr))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 -extern int i915_wrap_ring(struct drm_device * dev); -extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); - #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 95dbe5628a25..58b6e814fae1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1590,6 +1590,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, } } } + uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, uint32_t flush_domains) @@ -1607,7 +1608,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, if (request == NULL) return 0; - seqno = i915_ring_add_request(dev); + seqno = dev_priv->render_ring.add_request(dev, &dev_priv->render_ring, + file_priv, flush_domains); DRM_DEBUG_DRIVER("%d\n", seqno); @@ -1645,10 +1647,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, static uint32_t i915_retire_commands(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; uint32_t flush_domains = 0; - RING_LOCALS; /* The sampler always gets flushed on i965 (sigh) */ if (IS_I965G(dev)) @@ -1746,7 +1746,9 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) + struct intel_ring_buffer *ring = &(dev_priv->render_ring); + if (!ring->status_page.page_addr + || list_empty(&dev_priv->mm.request_list)) return; seqno = i915_get_gem_seqno(dev); @@ -1773,7 +1775,8 @@ i915_gem_retire_requests(struct drm_device *dev) if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - i915_user_irq_put(dev); + + ring->user_irq_put(dev, ring); dev_priv->trace_irq_seqno = 0; } } @@ -1803,6 +1806,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) u32 ier; int ret = 0; + struct intel_ring_buffer *ring = &dev_priv->render_ring; BUG_ON(seqno == 0); if (atomic_read(&dev_priv->mm.wedged)) @@ -1823,7 +1827,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) trace_i915_gem_request_wait_begin(dev, seqno); dev_priv->mm.waiting_gem_seqno = seqno; - i915_user_irq_get(dev); + ring->user_irq_get(dev, ring); if (interruptible) ret = wait_event_interruptible(dev_priv->irq_queue, i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || @@ -1833,7 +1837,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || atomic_read(&dev_priv->mm.wedged)); - i915_user_irq_put(dev); + ring->user_irq_put(dev, ring); dev_priv->mm.waiting_gem_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -1867,6 +1871,19 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) } +static void +i915_gem_flush(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (flush_domains & I915_GEM_DOMAIN_CPU) + drm_agp_chipset_flush(dev); + dev_priv->render_ring.flush(dev, &dev_priv->render_ring, + invalidate_domains, + flush_domains); +} + /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. @@ -3820,7 +3837,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, #endif /* Exec the batchbuffer */ - ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); + ret = dev_priv->render_ring.dispatch_gem_execbuffer(dev, + &dev_priv->render_ring, + args, + cliprects, + exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -4378,7 +4399,8 @@ i915_gem_idle(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.suspended || dev_priv->render_ring.ring_obj == NULL) { + if (dev_priv->mm.suspended || + dev_priv->render_ring.gem_object == NULL) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4420,7 +4442,7 @@ i915_gem_idle(struct drm_device *dev) * 965+ support PIPE_CONTROL commands, which provide finer grained control * over cache flushing. */ -int +static int i915_gem_init_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -4459,7 +4481,8 @@ err: return ret; } -void + +static void i915_gem_cleanup_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -4476,6 +4499,37 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev) dev_priv->seqno_page = NULL; } +int +i915_gem_init_ringbuffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + dev_priv->render_ring = render_ring; + if (!I915_NEED_GFX_HWS(dev)) { + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; + memset(dev_priv->render_ring.status_page.page_addr, + 0, PAGE_SIZE); + } + if (HAS_PIPE_CONTROL(dev)) { + ret = i915_gem_init_pipe_control(dev); + if (ret) + return ret; + } + ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); + return ret; +} + +void +i915_gem_cleanup_ringbuffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); + if (HAS_PIPE_CONTROL(dev)) + i915_gem_cleanup_pipe_control(dev); +} + int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dd91c97de968..e07c643c8365 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -545,7 +545,8 @@ i915_ringbuffer_last_batch(struct drm_device *dev) } if (bbaddr == 0) { - ring = (u32 *)(dev_priv->render_ring.virtual_start + dev_priv->render_ring.Size); + ring = (u32 *)(dev_priv->render_ring.virtual_start + + dev_priv->render_ring.size); while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { bbaddr = i915_get_bbaddr(dev, ring); if (bbaddr) @@ -639,7 +640,8 @@ static void i915_capture_error_state(struct drm_device *dev) error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); /* Record the ringbuffer */ - error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.ring_obj); + error->ringbuffer = i915_error_object_create(dev, + dev_priv->render_ring.gem_object); /* Record buffers on the active list. */ error->active_bo = NULL; @@ -984,7 +986,6 @@ static int i915_emit_irq(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - RING_LOCALS; i915_kernel_lost_context(dev); @@ -1009,9 +1010,10 @@ static int i915_emit_irq(struct drm_device * dev) void i915_trace_irq_get(struct drm_device *dev, u32 seqno) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; if (dev_priv->trace_irq_seqno == 0) - i915_user_irq_get(dev); + render_ring->user_irq_get(dev, render_ring); dev_priv->trace_irq_seqno = seqno; } @@ -1021,6 +1023,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; + struct intel_ring_buffer *render_ring = &dev_priv->render_ring; DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1034,10 +1037,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - i915_user_irq_get(dev); + render_ring->user_irq_get(dev, render_ring); DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - i915_user_irq_put(dev); + render_ring->user_irq_put(dev, render_ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f469a84cacfd..b867f3c78408 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4629,7 +4629,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, unsigned long flags; int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; int ret, pipesrc; - RING_LOCALS; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index b0e17b06eb6e..93da83782e5e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -211,9 +211,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; int ret; - RING_LOCALS; BUG_ON(overlay->active); @@ -248,7 +246,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay, drm_i915_private_t *dev_priv = dev->dev_private; u32 flip_addr = overlay->flip_addr; u32 tmp; - RING_LOCALS; BUG_ON(!overlay->active); @@ -274,7 +271,6 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay) drm_i915_private_t *dev_priv = dev->dev_private; int ret; u32 tmp; - RING_LOCALS; if (overlay->last_flip_req != 0) { ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); @@ -314,9 +310,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) { u32 flip_addr = overlay->flip_addr; struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; int ret; - RING_LOCALS; BUG_ON(!overlay->active); @@ -390,11 +384,9 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, int interruptible) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; u32 flip_addr; int ret; - RING_LOCALS; if (overlay->hw_wedged == HW_WEDGED) return -EIO; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 06058ddb4eed..5715c4d8cce9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -29,30 +29,24 @@ #include "drmP.h" #include "drm.h" -#include "i915_drm.h" #include "i915_drv.h" +#include "i915_drm.h" #include "i915_trace.h" -#include "intel_drv.h" -void -i915_gem_flush(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains) +static void +render_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { - drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t cmd; - RING_LOCALS; - #if WATCH_EXEC DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, invalidate_domains, flush_domains); #endif - trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, + u32 cmd; + trace_i915_gem_request_flush(dev, ring->next_seqno, invalidate_domains, flush_domains); - if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { /* * read/write caches: @@ -100,19 +94,130 @@ i915_gem_flush(struct drm_device *dev, #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif - BEGIN_LP_RING(2); - OUT_RING(cmd); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + intel_ring_begin(dev, ring, 8); + intel_ring_emit(dev, ring, cmd); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); } +} + +static unsigned int render_ring_get_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(PRB0_HEAD) & HEAD_ADDR; +} +static unsigned int render_ring_get_tail(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return I915_READ(PRB0_TAIL) & TAIL_ADDR; } + +static unsigned int render_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + + return I915_READ(acthd_reg); +} + +static void render_ring_advance_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + I915_WRITE(PRB0_TAIL, ring->tail); +} + +static int init_ring_common(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + u32 head; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + obj_priv = to_intel_bo(ring->gem_object); + + /* Stop the ring if it's running. */ + I915_WRITE(ring->regs.ctl, 0); + I915_WRITE(ring->regs.head, 0); + I915_WRITE(ring->regs.tail, 0); + + /* Initialize the ring. */ + I915_WRITE(ring->regs.start, obj_priv->gtt_offset); + head = ring->get_head(dev, ring); + + /* G45 ring initialization fails to reset head to zero */ + if (head != 0) { + DRM_ERROR("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + + I915_WRITE(ring->regs.head, 0); + + DRM_ERROR("%s head forced to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + } + + I915_WRITE(ring->regs.ctl, + ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) + | RING_NO_REPORT | RING_VALID); + + head = I915_READ(ring->regs.head) & HEAD_ADDR; + /* If the head is still not zero, the ring is dead */ + if (head != 0) { + DRM_ERROR("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ(ring->regs.ctl), + I915_READ(ring->regs.head), + I915_READ(ring->regs.tail), + I915_READ(ring->regs.start)); + return -EIO; + } + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = ring->get_head(dev, ring); + ring->tail = ring->get_tail(dev, ring); + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + } + return 0; +} + +static int init_render_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret = init_ring_common(dev, ring); + if (IS_I9XX(dev) && !IS_GEN3(dev)) { + I915_WRITE(MI_MODE, + (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); + } + return ret; +} + #define PIPE_CONTROL_FLUSH(addr) \ +do { \ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ PIPE_CONTROL_DEPTH_STALL); \ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ OUT_RING(0); \ OUT_RING(0); \ +} while (0) /** * Creates a new sequence number, emitting a write of it to the status page @@ -122,21 +227,15 @@ i915_gem_flush(struct drm_device *dev, * * Returned sequence numbers are nonzero on success. */ -uint32_t -i915_ring_add_request(struct drm_device *dev) +static u32 +render_ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains) { + u32 seqno; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno; - RING_LOCALS; - - /* Grab the seqno we're going to make this request be, and bump the - * next (skipping 0 so it can be the reserved no-seqno value). - */ - seqno = dev_priv->mm.next_gem_seqno; - dev_priv->mm.next_gem_seqno++; - if (dev_priv->mm.next_gem_seqno == 0) - dev_priv->mm.next_gem_seqno++; - + seqno = intel_ring_get_seqno(dev, ring); if (HAS_PIPE_CONTROL(dev)) { u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; @@ -181,13 +280,26 @@ i915_ring_add_request(struct drm_device *dev) return seqno; } -void i915_user_irq_get(struct drm_device *dev) +static u32 +render_ring_get_gem_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + if (HAS_PIPE_CONTROL(dev)) + return ((volatile u32 *)(dev_priv->seqno_page))[0]; + else + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static void +render_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { + if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); else @@ -196,14 +308,16 @@ void i915_user_irq_get(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -void i915_user_irq_put(struct drm_device *dev) +static void +render_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); + if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); else @@ -212,20 +326,31 @@ void i915_user_irq_put(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -/** Dispatch a batchbuffer to the ring - */ -int -i915_dispatch_gem_execbuffer(struct drm_device *dev, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +static void render_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (IS_GEN6(dev)) { + I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); + I915_READ(HWS_PGA_GEN6); /* posting read */ + } else { + I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); + I915_READ(HWS_PGA); /* posting read */ + } + +} + +static int +render_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; int i = 0, count; uint32_t exec_start, exec_len; - RING_LOCALS; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; @@ -242,74 +367,61 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, } if (IS_I830(dev) || IS_845G(dev)) { - BEGIN_LP_RING(4); - OUT_RING(MI_BATCH_BUFFER); - OUT_RING(exec_start | MI_BATCH_NON_SECURE); - OUT_RING(exec_start + exec_len - 4); - OUT_RING(0); - ADVANCE_LP_RING(); + intel_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER); + intel_ring_emit(dev, ring, + exec_start | MI_BATCH_NON_SECURE); + intel_ring_emit(dev, ring, exec_start + exec_len - 4); + intel_ring_emit(dev, ring, 0); } else { - BEGIN_LP_RING(2); + intel_ring_begin(dev, ring, 4); if (IS_I965G(dev)) { - OUT_RING(MI_BATCH_BUFFER_START | - (2 << 6) | - MI_BATCH_NON_SECURE_I965); - OUT_RING(exec_start); + intel_ring_emit(dev, ring, + MI_BATCH_BUFFER_START | (2 << 6) + | MI_BATCH_NON_SECURE_I965); + intel_ring_emit(dev, ring, exec_start); } else { - OUT_RING(MI_BATCH_BUFFER_START | - (2 << 6)); - OUT_RING(exec_start | MI_BATCH_NON_SECURE); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START + | (2 << 6)); + intel_ring_emit(dev, ring, exec_start | + MI_BATCH_NON_SECURE); } - ADVANCE_LP_RING(); } + intel_ring_advance(dev, ring); } /* XXX breadcrumb */ return 0; } -static void -i915_gem_cleanup_hws(struct drm_device *dev) +static void cleanup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - if (dev_priv->hws_obj == NULL) + obj = ring->status_page.obj; + if (obj == NULL) return; - - obj = dev_priv->hws_obj; obj_priv = to_intel_bo(obj); kunmap(obj_priv->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; + ring->status_page.obj = NULL; memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - if (HAS_PIPE_CONTROL(dev)) - i915_gem_cleanup_pipe_control(dev); - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); } -static int -i915_gem_init_hws(struct drm_device *dev) +static int init_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret; - /* If we need a physical address for the status page, it's already - * initialized at driver load time. - */ - if (!I915_NEED_GFX_HWS(dev)) - return 0; - obj = i915_gem_alloc_object(dev, 4096); if (obj == NULL) { DRM_ERROR("Failed to allocate status page\n"); @@ -321,36 +433,21 @@ i915_gem_init_hws(struct drm_device *dev) ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { - drm_gem_object_unreference(obj); goto err_unref; } - dev_priv->status_gfx_addr = obj_priv->gtt_offset; - - dev_priv->hw_status_page = kmap(obj_priv->pages[0]); - if (dev_priv->hw_status_page == NULL) { - DRM_ERROR("Failed to map status page.\n"); + ring->status_page.gfx_addr = obj_priv->gtt_offset; + ring->status_page.page_addr = kmap(obj_priv->pages[0]); + if (ring->status_page.page_addr == NULL) { memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - ret = -EINVAL; goto err_unpin; } + ring->status_page.obj = obj; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); - if (HAS_PIPE_CONTROL(dev)) { - ret = i915_gem_init_pipe_control(dev); - if (ret) - goto err_unpin; - } - - dev_priv->hws_obj = obj; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - if (IS_GEN6(dev)) { - I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); - I915_READ(HWS_PGA_GEN6); /* posting read */ - } else { - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - I915_READ(HWS_PGA); /* posting read */ - } - DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); + ring->setup_status_page(dev, ring); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + ring->name, ring->status_page.gfx_addr); return 0; @@ -359,43 +456,42 @@ err_unpin: err_unref: drm_gem_object_unreference(obj); err: - return 0; + return ret; } -int -i915_gem_init_ringbuffer(struct drm_device *dev) + +int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - drm_i915_ring_buffer_t *ring = &dev_priv->render_ring; int ret; - u32 head; + struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *obj; + ring->dev = dev; - ret = i915_gem_init_hws(dev); - if (ret != 0) - return ret; + if (I915_NEED_GFX_HWS(dev)) { + ret = init_status_page(dev, ring); + if (ret) + return ret; + } - obj = i915_gem_alloc_object(dev, 128 * 1024); + obj = i915_gem_alloc_object(dev, ring->size); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); - i915_gem_cleanup_hws(dev); - return -ENOMEM; + ret = -ENOMEM; + goto cleanup; } - obj_priv = to_intel_bo(obj); - ret = i915_gem_object_pin(obj, 4096); + ring->gem_object = obj; + + ret = i915_gem_object_pin(obj, ring->alignment); if (ret != 0) { drm_gem_object_unreference(obj); - i915_gem_cleanup_hws(dev); - return ret; + goto cleanup; } - /* Set up the kernel mapping for the ring. */ - ring->Size = obj->size; - + obj_priv = to_intel_bo(obj); + ring->map.size = ring->size; ring->map.offset = dev->agp->base + obj_priv->gtt_offset; - ring->map.size = obj->size; ring->map.type = 0; ring->map.flags = 0; ring->map.mtrr = 0; @@ -403,143 +499,85 @@ i915_gem_init_ringbuffer(struct drm_device *dev) drm_core_ioremap_wc(&ring->map, dev); if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); - memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring)); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); - i915_gem_cleanup_hws(dev); - return -EINVAL; - } - ring->ring_obj = obj; - ring->virtual_start = ring->map.handle; - - /* Stop the ring if it's running. */ - I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_TAIL, 0); - I915_WRITE(PRB0_HEAD, 0); - - /* Initialize the ring. */ - I915_WRITE(PRB0_START, obj_priv->gtt_offset); - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - - /* G45 ring initialization fails to reset head to zero */ - if (head != 0) { - DRM_ERROR("Ring head not reset to zero " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); - I915_WRITE(PRB0_HEAD, 0); - - DRM_ERROR("Ring head forced to zero " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); + ret = -EINVAL; + goto cleanup; } - I915_WRITE(PRB0_CTL, - ((obj->size - 4096) & RING_NR_PAGES) | - RING_NO_REPORT | - RING_VALID); - - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - - /* If the head is still not zero, the ring is dead */ - if (head != 0) { - DRM_ERROR("Ring initialization failed " - "ctl %08x head %08x tail %08x start %08x\n", - I915_READ(PRB0_CTL), - I915_READ(PRB0_HEAD), - I915_READ(PRB0_TAIL), - I915_READ(PRB0_START)); - return -EIO; + ring->virtual_start = ring->map.handle; + ret = ring->init(dev, ring); + if (ret != 0) { + intel_cleanup_ring_buffer(dev, ring); + return ret; } - /* Update our cache of the ring state */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->head = ring->get_head(dev, ring); + ring->tail = ring->get_tail(dev, ring); ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) - ring->space += ring->Size; + ring->space += ring->size; } - - if (IS_I9XX(dev) && !IS_GEN3(dev)) { - I915_WRITE(MI_MODE, - (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); - } - - return 0; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + return ret; +cleanup: + cleanup_status_page(dev, ring); + return ret; } -void -i915_gem_cleanup_ringbuffer(struct drm_device *dev) +void intel_cleanup_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - - if (dev_priv->render_ring.ring_obj == NULL) + if (ring->gem_object == NULL) return; - drm_core_ioremapfree(&dev_priv->render_ring.map, dev); - - i915_gem_object_unpin(dev_priv->render_ring.ring_obj); - drm_gem_object_unreference(dev_priv->render_ring.ring_obj); - dev_priv->render_ring.ring_obj = NULL; - memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring)); + drm_core_ioremapfree(&ring->map, dev); - i915_gem_cleanup_hws(dev); + i915_gem_object_unpin(ring->gem_object); + drm_gem_object_unreference(ring->gem_object); + ring->gem_object = NULL; + cleanup_status_page(dev, ring); } -/* As a ringbuffer is only allowed to wrap between instructions, fill - * the tail with NOOPs. - */ -int i915_wrap_ring(struct drm_device *dev) +int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - volatile unsigned int *virt; + unsigned int *virt; int rem; + rem = ring->size - ring->tail; - rem = dev_priv->render_ring.Size - dev_priv->render_ring.tail; - if (dev_priv->render_ring.space < rem) { - int ret = i915_wait_ring(dev, rem, __func__); + if (ring->space < rem) { + int ret = intel_wait_ring_buffer(dev, ring, rem); if (ret) return ret; } - dev_priv->render_ring.space -= rem; - virt = (unsigned int *) - (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail); + virt = (unsigned int *)(ring->virtual_start + ring->tail); rem /= 4; while (rem--) *virt++ = MI_NOOP; - dev_priv->render_ring.tail = 0; + ring->tail = 0; return 0; } -int i915_wait_ring(struct drm_device * dev, int n, const char *caller) +int intel_wait_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring, int n) { - drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring); - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; - u32 last_acthd = I915_READ(acthd_reg); - u32 acthd; - u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - int i; + unsigned long end; trace_i915_ring_wait_begin (dev); - - for (i = 0; i < 100000; i++) { - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - acthd = I915_READ(acthd_reg); + end = jiffies + 3 * HZ; + do { + ring->head = ring->get_head(dev, ring); ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) - ring->space += ring->Size; + ring->space += ring->size; if (ring->space >= n) { trace_i915_ring_wait_end (dev); return 0; @@ -550,19 +588,97 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } + yield(); + } while (!time_after(jiffies, end)); + trace_i915_ring_wait_end (dev); + return -EBUSY; +} +void intel_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, int n) +{ + if (unlikely(ring->tail + n > ring->size)) + intel_wrap_ring_buffer(dev, ring); + if (unlikely(ring->space < n)) + intel_wait_ring_buffer(dev, ring, n); +} - if (ring->head != last_head) - i = 0; - if (acthd != last_acthd) - i = 0; +void intel_ring_emit(struct drm_device *dev, + struct intel_ring_buffer *ring, unsigned int data) +{ + unsigned int *virt = ring->virtual_start + ring->tail; + *virt = data; + ring->tail += 4; + ring->tail &= ring->size - 1; + ring->space -= 4; +} - last_head = ring->head; - last_acthd = acthd; - msleep_interruptible(10); +void intel_ring_advance(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + ring->advance_ring(dev, ring); +} - } +void intel_fill_struct(struct drm_device *dev, + struct intel_ring_buffer *ring, + void *data, + unsigned int len) +{ + unsigned int *virt = ring->virtual_start + ring->tail; + BUG_ON((len&~(4-1)) != 0); + intel_ring_begin(dev, ring, len); + memcpy(virt, data, len); + ring->tail += len; + ring->tail &= ring->size - 1; + ring->space -= len; + intel_ring_advance(dev, ring); +} - trace_i915_ring_wait_end (dev); - return -EBUSY; +u32 intel_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + u32 seqno; + seqno = ring->next_seqno; + + /* reserve 0 for non-seqno */ + if (++ring->next_seqno == 0) + ring->next_seqno = 1; + return seqno; } + +struct intel_ring_buffer render_ring = { + .name = "render ring", + .regs = { + .ctl = PRB0_CTL, + .head = PRB0_HEAD, + .tail = PRB0_TAIL, + .start = PRB0_START + }, + .ring_flag = I915_EXEC_RENDER, + .size = 32 * PAGE_SIZE, + .alignment = PAGE_SIZE, + .virtual_start = NULL, + .dev = NULL, + .gem_object = NULL, + .head = 0, + .tail = 0, + .space = 0, + .next_seqno = 1, + .user_irq_refcount = 0, + .irq_gem_seqno = 0, + .waiting_gem_seqno = 0, + .setup_status_page = render_setup_status_page, + .init = init_render_ring, + .get_head = render_ring_get_head, + .get_tail = render_ring_get_tail, + .get_active_head = render_ring_get_active_head, + .advance_ring = render_ring_advance_ring, + .flush = render_ring_flush, + .add_request = render_ring_add_request, + .get_gem_seqno = render_ring_get_gem_seqno, + .user_irq_get = render_ring_get_user_irq, + .user_irq_put = render_ring_put_user_irq, + .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, + .status_page = {NULL, 0, NULL}, + .map = {0,} +}; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h new file mode 100644 index 000000000000..d5568d3766de --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -0,0 +1,124 @@ +#ifndef _INTEL_RINGBUFFER_H_ +#define _INTEL_RINGBUFFER_H_ + +struct intel_hw_status_page { + void *page_addr; + unsigned int gfx_addr; + struct drm_gem_object *obj; +}; + +struct drm_i915_gem_execbuffer2; +struct intel_ring_buffer { + const char *name; + struct ring_regs { + u32 ctl; + u32 head; + u32 tail; + u32 start; + } regs; + unsigned int ring_flag; + unsigned long size; + unsigned int alignment; + void *virtual_start; + struct drm_device *dev; + struct drm_gem_object *gem_object; + + unsigned int head; + unsigned int tail; + unsigned int space; + u32 next_seqno; + struct intel_hw_status_page status_page; + + u32 irq_gem_seqno; /* last seq seem at irq time */ + u32 waiting_gem_seqno; + int user_irq_refcount; + void (*user_irq_get)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*user_irq_put)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*setup_status_page)(struct drm_device *dev, + struct intel_ring_buffer *ring); + + int (*init)(struct drm_device *dev, + struct intel_ring_buffer *ring); + + unsigned int (*get_head)(struct drm_device *dev, + struct intel_ring_buffer *ring); + unsigned int (*get_tail)(struct drm_device *dev, + struct intel_ring_buffer *ring); + unsigned int (*get_active_head)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*advance_ring)(struct drm_device *dev, + struct intel_ring_buffer *ring); + void (*flush)(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains); + u32 (*add_request)(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_file *file_priv, + u32 flush_domains); + u32 (*get_gem_seqno)(struct drm_device *dev, + struct intel_ring_buffer *ring); + int (*dispatch_gem_execbuffer)(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset); + + /** + * List of objects currently involved in rendering from the + * ringbuffer. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head request_list; + + wait_queue_head_t irq_queue; + drm_local_map_t map; +}; + +static inline u32 +intel_read_status_page(struct intel_ring_buffer *ring, + int reg) +{ + u32 *regs = ring->status_page.page_addr; + return regs[reg]; +} + +int intel_init_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +void intel_cleanup_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +int intel_wait_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring, int n); +int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring); +void intel_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, int n); +void intel_ring_emit(struct drm_device *dev, + struct intel_ring_buffer *ring, u32 data); +void intel_fill_struct(struct drm_device *dev, + struct intel_ring_buffer *ring, + void *data, + unsigned int len); +void intel_ring_advance(struct drm_device *dev, + struct intel_ring_buffer *ring); + +u32 intel_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring); + +extern struct intel_ring_buffer render_ring; +extern struct intel_ring_buffer bsd_ring; + +#endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index b64a8d7cdf6d..e9168704cabe 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -616,7 +616,9 @@ struct drm_i915_gem_execbuffer2 { __u32 num_cliprects; /** This is a struct drm_clip_rect *cliprects */ __u64 cliprects_ptr; - __u64 flags; /* currently unused */ +#define I915_EXEC_RENDER (1<<0) +#define I915_EXEC_BSD (1<<1) + __u64 flags; __u64 rsvd1; __u64 rsvd2; }; -- cgit v1.2.3 From 7fc74f17e6c9b4d86371c3a947afc32bd6bc9691 Mon Sep 17 00:00:00 2001 From: Marcin KoÅ›cielnicki Date: Sun, 23 May 2010 11:36:04 +0000 Subject: drm/nouveau: Add getparam for current PTIMER time. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will be useful for computing GPU-CPU latency, including GL_ARB_timer_query extension. Signed-off-by: Marcin KoÅ›cielnicki Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 3 +++ include/drm/nouveau_drm.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e632339c323e..a2544ffd02cc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -913,6 +913,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_VM_VRAM_BASE: getparam->value = dev_priv->vm_vram_base; break; + case NOUVEAU_GETPARAM_PTIMER_TIME: + getparam->value = dev_priv->engine.timer.read(dev); + break; case NOUVEAU_GETPARAM_GRAPH_UNITS: /* NV40 and NV50 versions are quite different, but register * address is the same. User is supposed to know the card diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index a6a9f4af5ebd..fe917dee723a 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -79,6 +79,7 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_CHIPSET_ID 11 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 +#define NOUVEAU_GETPARAM_PTIMER_TIME 14 struct drm_nouveau_getparam { uint64_t param; uint64_t value; -- cgit v1.2.3 From 487d9fc5016529d7d77dfe35b666fd3a090e2953 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 18 May 2010 14:42:51 +0000 Subject: sh: prepare MMCIF driver header file Update the MMCIF driver to include register information and register access functions in the header file. The MMCIF boot code builds on top of this. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/mmc/host/sh_mmcif.c | 125 +++++++++++++++++-------------------------- include/linux/mmc/sh_mmcif.h | 32 +++++++++++ 2 files changed, 82 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index eb97830c0344..5d3f824bb5a3 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -30,25 +30,6 @@ #define DRIVER_NAME "sh_mmcif" #define DRIVER_VERSION "2010-04-28" -#define MMCIF_CE_CMD_SET 0x00000000 -#define MMCIF_CE_ARG 0x00000008 -#define MMCIF_CE_ARG_CMD12 0x0000000C -#define MMCIF_CE_CMD_CTRL 0x00000010 -#define MMCIF_CE_BLOCK_SET 0x00000014 -#define MMCIF_CE_CLK_CTRL 0x00000018 -#define MMCIF_CE_BUF_ACC 0x0000001C -#define MMCIF_CE_RESP3 0x00000020 -#define MMCIF_CE_RESP2 0x00000024 -#define MMCIF_CE_RESP1 0x00000028 -#define MMCIF_CE_RESP0 0x0000002C -#define MMCIF_CE_RESP_CMD12 0x00000030 -#define MMCIF_CE_DATA 0x00000034 -#define MMCIF_CE_INT 0x00000040 -#define MMCIF_CE_INT_MASK 0x00000044 -#define MMCIF_CE_HOST_STS1 0x00000048 -#define MMCIF_CE_HOST_STS2 0x0000004C -#define MMCIF_CE_VERSION 0x0000007C - /* CE_CMD_SET */ #define CMD_MASK 0x3f000000 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) @@ -207,27 +188,17 @@ struct sh_mmcif_host { wait_queue_head_t intr_wait; }; -static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg) -{ - return readl(host->addr + reg); -} - -static inline void sh_mmcif_writel(struct sh_mmcif_host *host, - unsigned int reg, u32 val) -{ - writel(val, host->addr + reg); -} static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, unsigned int reg, u32 val) { - writel(val | sh_mmcif_readl(host, reg), host->addr + reg); + writel(val | readl(host->addr + reg), host->addr + reg); } static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, unsigned int reg, u32 val) { - writel(~val & sh_mmcif_readl(host, reg), host->addr + reg); + writel(~val & readl(host->addr + reg), host->addr + reg); } @@ -253,10 +224,10 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) { u32 tmp; - tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL); + tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); - sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON); - sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF); + sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); /* byte swap on */ @@ -271,12 +242,10 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) host->sd_error = 0; host->wait_int = 0; - state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1); - state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2); - pr_debug("%s: ERR HOST_STS1 = %08x\n", \ - DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)); - pr_debug("%s: ERR HOST_STS2 = %08x\n", \ - DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2)); + state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); + state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); + pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); + pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); @@ -288,7 +257,7 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) "command sequence timeout err\n"); return -EIO; } - if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1) + if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; mdelay(1); @@ -330,9 +299,9 @@ static int sh_mmcif_single_read(struct sh_mmcif_host *host, host->wait_int = 0; blocksize = (BLOCK_SIZE_MASK & - sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; for (i = 0; i < blocksize / 4; i++) - *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); /* buffer read end */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); @@ -353,7 +322,8 @@ static int sh_mmcif_multi_read(struct sh_mmcif_host *host, long time; u32 blocksize, i, j, sec, *p; - blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, + MMCIF_CE_BLOCK_SET); for (j = 0; j < data->sg_len; j++) { p = sg_virt(data->sg); host->wait_int = 0; @@ -370,7 +340,8 @@ static int sh_mmcif_multi_read(struct sh_mmcif_host *host, host->wait_int = 0; for (i = 0; i < blocksize / 4; i++) - *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + *p++ = sh_mmcif_readl(host->addr, + MMCIF_CE_DATA); } if (j < data->sg_len - 1) data->sg++; @@ -397,9 +368,9 @@ static int sh_mmcif_single_write(struct sh_mmcif_host *host, host->wait_int = 0; blocksize = (BLOCK_SIZE_MASK & - sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; for (i = 0; i < blocksize / 4; i++) - sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); /* buffer write end */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); @@ -421,7 +392,8 @@ static int sh_mmcif_multi_write(struct sh_mmcif_host *host, long time; u32 i, sec, j, blocksize, *p; - blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, + MMCIF_CE_BLOCK_SET); for (j = 0; j < data->sg_len; j++) { p = sg_virt(data->sg); @@ -439,7 +411,8 @@ static int sh_mmcif_multi_write(struct sh_mmcif_host *host, host->wait_int = 0; for (i = 0; i < blocksize / 4; i++) - sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + sh_mmcif_writel(host->addr, + MMCIF_CE_DATA, *p++); } if (j < data->sg_len - 1) data->sg++; @@ -451,18 +424,18 @@ static void sh_mmcif_get_response(struct sh_mmcif_host *host, struct mmc_command *cmd) { if (cmd->flags & MMC_RSP_136) { - cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3); - cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2); - cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1); - cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0); + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); + cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); + cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); + cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); } else - cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0); + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); } static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, struct mmc_command *cmd) { - cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12); + cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); } static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, @@ -596,18 +569,19 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; if (host->data) { - sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0); - sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz); + sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); + sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, + mrq->data->blksz); } opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); - sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0); - sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); /* set arg */ - sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg); + sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); host->wait_int = 0; /* set cmd */ - sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc); + sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); time = wait_event_interruptible_timeout(host->intr_wait, host->wait_int == 1 || host->sd_error == 1, host->timeout); @@ -752,43 +726,44 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) u32 state = 0; int err = 0; - state = sh_mmcif_readl(host, MMCIF_CE_INT); + state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); if (state & INT_RBSYE) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE)); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, + ~(INT_RBSYE | INT_CRSPE)); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); } else if (state & INT_CRSPE) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); } else if (state & INT_BUFREN) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); } else if (state & INT_BUFWEN) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); } else if (state & INT_CMD12DRE) { - sh_mmcif_writel(host, MMCIF_CE_INT, + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(INT_CMD12DRE | INT_CMD12RBE | INT_CMD12CRE | INT_BUFRE)); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); } else if (state & INT_BUFRE) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); } else if (state & INT_DTRANE) { - sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); } else if (state & INT_CMD12RBE) { - sh_mmcif_writel(host, MMCIF_CE_INT, + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(INT_CMD12RBE | INT_CMD12CRE)); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); } else if (state & INT_ERR_STS) { /* err interrupts */ - sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; } else { pr_debug("%s: Not support int\n", DRIVER_NAME); - sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; } @@ -894,12 +869,12 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) goto clean_up2; } - sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_detect(host->mmc); pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, - sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff); + sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; clean_up2: @@ -917,7 +892,7 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) struct sh_mmcif_host *host = platform_get_drvdata(pdev); int irq[2]; - sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h index aafe832f18aa..e079c6beeb98 100644 --- a/include/linux/mmc/sh_mmcif.h +++ b/include/linux/mmc/sh_mmcif.h @@ -14,6 +14,9 @@ #ifndef __SH_MMCIF_H__ #define __SH_MMCIF_H__ +#include +#include + /* * MMCIF : CE_CLK_CTRL [19:16] * 1000 : Peripheral clock / 512 @@ -36,4 +39,33 @@ struct sh_mmcif_plat_data { u32 ocr; }; +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_VERSION 0x0000007C + +extern inline u32 sh_mmcif_readl(void __iomem *addr, int reg) +{ + return readl(addr + reg); +} + +extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) +{ + writel(val, addr + reg); +} + #endif /* __SH_MMCIF_H__ */ -- cgit v1.2.3 From 8a768952ca8cb5cad98cfa343e6fb131e3bbdc3e Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 18 May 2010 14:43:04 +0000 Subject: sh: add boot code to MMCIF driver header This patch adds a set of MMCIF functions for the romImage boot loader that allows the kernel to be booted directly from an MMC card. Thanks to Jeremy Baker for the initial prototype. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- include/linux/mmc/sh_mmcif.h | 129 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) (limited to 'include') diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h index e079c6beeb98..d4a2ebbdab4b 100644 --- a/include/linux/mmc/sh_mmcif.h +++ b/include/linux/mmc/sh_mmcif.h @@ -68,4 +68,133 @@ extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) writel(val, addr + reg); } +#define SH_MMCIF_BBS 512 /* boot block size */ + +extern inline void sh_mmcif_boot_cmd_send(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_writel(base, MMCIF_CE_INT, 0); + sh_mmcif_writel(base, MMCIF_CE_ARG, arg); + sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); +} + +extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) +{ + unsigned long tmp; + int cnt; + + for (cnt = 0; cnt < 1000000; cnt++) { + tmp = sh_mmcif_readl(base, MMCIF_CE_INT); + if (tmp & mask) { + sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask); + return 0; + } + } + + return -1; +} + +extern inline int sh_mmcif_boot_cmd(void __iomem *base, + unsigned long cmd, unsigned long arg) +{ + sh_mmcif_boot_cmd_send(base, cmd, arg); + return sh_mmcif_boot_cmd_poll(base, 0x00010000); +} + +extern inline int sh_mmcif_boot_do_read_single(void __iomem *base, + unsigned int block_nr, + unsigned long *buf) +{ + int k; + + /* CMD13 - Status */ + sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000); + + if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900) + return -1; + + /* CMD17 - Read */ + sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS); + if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0) + return -1; + + for (k = 0; k < (SH_MMCIF_BBS / 4); k++) + buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA); + + return 0; +} + +extern inline int sh_mmcif_boot_do_read(void __iomem *base, + unsigned long first_block, + unsigned long nr_blocks, + void *buf) +{ + unsigned long k; + int ret = 0; + + /* CMD16 - Set the block size */ + sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS); + + for (k = 0; !ret && k < nr_blocks; k++) + ret = sh_mmcif_boot_do_read_single(base, first_block + k, + buf + (k * SH_MMCIF_BBS)); + + return ret; +} + +extern inline void sh_mmcif_boot_init(void __iomem *base) +{ + unsigned long tmp; + + /* reset */ + tmp = sh_mmcif_readl(base, MMCIF_CE_VERSION); + sh_mmcif_writel(base, MMCIF_CE_VERSION, tmp | 0x80000000); + sh_mmcif_writel(base, MMCIF_CE_VERSION, tmp & ~0x80000000); + + /* byte swap */ + sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, 0x00010000); + + /* Set block size in MMCIF hardware */ + sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS); + + /* Enable the clock, set it to Bus clock/256 (about 325Khz)*/ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, 0x01072fff); + + /* CMD0 */ + sh_mmcif_boot_cmd(base, 0x00000040, 0); + + /* CMD1 - Get OCR */ + do { + sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */ + } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000) + != 0x80000000); + + /* CMD2 - Get CID */ + sh_mmcif_boot_cmd(base, 0x02806040, 0); + + /* CMD3 - Set card relative address */ + sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); +} + +extern inline void sh_mmcif_boot_slurp(void __iomem *base, + unsigned char *buf, + unsigned long no_bytes) +{ + unsigned long tmp; + + /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */ + sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, 0x01012fff); + + /* CMD9 - Get CSD */ + sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000); + + /* CMD7 - Select the card */ + sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000); + + tmp = no_bytes / SH_MMCIF_BBS; + tmp += (no_bytes % SH_MMCIF_BBS) ? 1 : 0; + + sh_mmcif_boot_do_read(base, 512, tmp, buf); +} + #endif /* __SH_MMCIF_H__ */ -- cgit v1.2.3 From ac9721f3f54b27a16c7e1afb2481e7ee95a70318 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 27 May 2010 12:54:41 +0200 Subject: perf_events: Fix races and clean up perf_event and perf_mmap_data interaction In order to move toward separate buffer objects, rework the whole perf_mmap_data construct to be a more self-sufficient entity, one with its own lifetime rules. This greatly sanitizes the whole output redirection code, which was riddled with bugs and races. Signed-off-by: Peter Zijlstra Cc: LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 5 +- kernel/perf_event.c | 224 +++++++++++++++++++++++++-------------------- 2 files changed, 129 insertions(+), 100 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fb6c91eac7e3..490698590d6e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -585,6 +585,7 @@ enum perf_event_active_state { struct file; struct perf_mmap_data { + atomic_t refcount; struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; @@ -592,7 +593,6 @@ struct perf_mmap_data { #endif int nr_pages; /* nr of data pages */ int writable; /* are we writable */ - int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ @@ -643,7 +643,6 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; - struct perf_event *output; const struct pmu *pmu; enum perf_event_active_state state; @@ -704,6 +703,8 @@ struct perf_event { /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; + int mmap_locked; + struct user_struct *mmap_user; struct perf_mmap_data *data; /* poll related */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index bd7ce8ca5bb9..848d49a043e9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1841,6 +1841,7 @@ static void free_event_rcu(struct rcu_head *head) } static void perf_pending_sync(struct perf_event *event); +static void perf_mmap_data_put(struct perf_mmap_data *data); static void free_event(struct perf_event *event) { @@ -1856,9 +1857,9 @@ static void free_event(struct perf_event *event) atomic_dec(&nr_task_events); } - if (event->output) { - fput(event->output->filp); - event->output = NULL; + if (event->data) { + perf_mmap_data_put(event->data); + event->data = NULL; } if (event->destroy) @@ -2175,7 +2176,27 @@ unlock: return ret; } -static int perf_event_set_output(struct perf_event *event, int output_fd); +static const struct file_operations perf_fops; + +static struct perf_event *perf_fget_light(int fd, int *fput_needed) +{ + struct file *file; + + file = fget_light(fd, fput_needed); + if (!file) + return ERR_PTR(-EBADF); + + if (file->f_op != &perf_fops) { + fput_light(file, *fput_needed); + *fput_needed = 0; + return ERR_PTR(-EBADF); + } + + return file->private_data; +} + +static int perf_event_set_output(struct perf_event *event, + struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -2202,7 +2223,23 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return perf_event_period(event, (u64 __user *)arg); case PERF_EVENT_IOC_SET_OUTPUT: - return perf_event_set_output(event, arg); + { + struct perf_event *output_event = NULL; + int fput_needed = 0; + int ret; + + if (arg != -1) { + output_event = perf_fget_light(arg, &fput_needed); + if (IS_ERR(output_event)) + return PTR_ERR(output_event); + } + + ret = perf_event_set_output(event, output_event); + if (output_event) + fput_light(output_event->filp, fput_needed); + + return ret; + } case PERF_EVENT_IOC_SET_FILTER: return perf_event_set_filter(event, (void __user *)arg); @@ -2335,8 +2372,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) unsigned long size; int i; - WARN_ON(atomic_read(&event->mmap_count)); - size = sizeof(struct perf_mmap_data); size += nr_pages * sizeof(void *); @@ -2452,8 +2487,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) unsigned long size; void *all_buf; - WARN_ON(atomic_read(&event->mmap_count)); - size = sizeof(struct perf_mmap_data); size += sizeof(void *); @@ -2536,7 +2569,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) if (!data->watermark) data->watermark = max_size / 2; - + atomic_set(&data->refcount, 1); rcu_assign_pointer(event->data, data); } @@ -2548,13 +2581,26 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) perf_mmap_data_free(data); } -static void perf_mmap_data_release(struct perf_event *event) +static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) { - struct perf_mmap_data *data = event->data; + struct perf_mmap_data *data; + + rcu_read_lock(); + data = rcu_dereference(event->data); + if (data) { + if (!atomic_inc_not_zero(&data->refcount)) + data = NULL; + } + rcu_read_unlock(); + + return data; +} - WARN_ON(atomic_read(&event->mmap_count)); +static void perf_mmap_data_put(struct perf_mmap_data *data) +{ + if (!atomic_dec_and_test(&data->refcount)) + return; - rcu_assign_pointer(event->data, NULL); call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); } @@ -2569,15 +2615,18 @@ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; - WARN_ON_ONCE(event->ctx->parent_ctx); if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { unsigned long size = perf_data_size(event->data); - struct user_struct *user = current_user(); + struct user_struct *user = event->mmap_user; + struct perf_mmap_data *data = event->data; atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); - vma->vm_mm->locked_vm -= event->data->nr_locked; - perf_mmap_data_release(event); + vma->vm_mm->locked_vm -= event->mmap_locked; + rcu_assign_pointer(event->data, NULL); mutex_unlock(&event->mmap_mutex); + + perf_mmap_data_put(data); + free_uid(user); } } @@ -2629,13 +2678,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->mmap_mutex); - if (event->output) { - ret = -EINVAL; - goto unlock; - } - - if (atomic_inc_not_zero(&event->mmap_count)) { - if (nr_pages != event->data->nr_pages) + if (event->data) { + if (event->data->nr_pages == nr_pages) + atomic_inc(&event->data->refcount); + else ret = -EINVAL; goto unlock; } @@ -2667,21 +2713,23 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) WARN_ON(event->data); data = perf_mmap_data_alloc(event, nr_pages); - ret = -ENOMEM; - if (!data) + if (!data) { + ret = -ENOMEM; goto unlock; + } - ret = 0; perf_mmap_data_init(event, data); - - atomic_set(&event->mmap_count, 1); - atomic_long_add(user_extra, &user->locked_vm); - vma->vm_mm->locked_vm += extra; - event->data->nr_locked = extra; if (vma->vm_flags & VM_WRITE) event->data->writable = 1; + atomic_long_add(user_extra, &user->locked_vm); + event->mmap_locked = extra; + event->mmap_user = get_current_user(); + vma->vm_mm->locked_vm += event->mmap_locked; + unlock: + if (!ret) + atomic_inc(&event->mmap_count); mutex_unlock(&event->mmap_mutex); vma->vm_flags |= VM_RESERVED; @@ -2993,7 +3041,6 @@ int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size, int nmi, int sample) { - struct perf_event *output_event; struct perf_mmap_data *data; unsigned long tail, offset, head; int have_lost; @@ -3010,10 +3057,6 @@ int perf_output_begin(struct perf_output_handle *handle, if (event->parent) event = event->parent; - output_event = rcu_dereference(event->output); - if (output_event) - event = output_event; - data = rcu_dereference(event->data); if (!data) goto out; @@ -4912,39 +4955,17 @@ err_size: goto out; } -static int perf_event_set_output(struct perf_event *event, int output_fd) +static int +perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { - struct perf_event *output_event = NULL; - struct file *output_file = NULL; - struct perf_event *old_output; - int fput_needed = 0; + struct perf_mmap_data *data = NULL, *old_data = NULL; int ret = -EINVAL; - /* - * Don't allow output of inherited per-task events. This would - * create performance issues due to cross cpu access. - */ - if (event->cpu == -1 && event->attr.inherit) - return -EINVAL; - - if (!output_fd) + if (!output_event) goto set; - output_file = fget_light(output_fd, &fput_needed); - if (!output_file) - return -EBADF; - - if (output_file->f_op != &perf_fops) - goto out; - - output_event = output_file->private_data; - - /* Don't chain output fds */ - if (output_event->output) - goto out; - - /* Don't set an output fd when we already have an output channel */ - if (event->data) + /* don't allow circular references */ + if (event == output_event) goto out; /* @@ -4959,26 +4980,28 @@ static int perf_event_set_output(struct perf_event *event, int output_fd) if (output_event->cpu == -1 && output_event->ctx != event->ctx) goto out; - atomic_long_inc(&output_file->f_count); - set: mutex_lock(&event->mmap_mutex); - old_output = event->output; - rcu_assign_pointer(event->output, output_event); - mutex_unlock(&event->mmap_mutex); + /* Can't redirect output if we've got an active mmap() */ + if (atomic_read(&event->mmap_count)) + goto unlock; - if (old_output) { - /* - * we need to make sure no existing perf_output_*() - * is still referencing this event. - */ - synchronize_rcu(); - fput(old_output->filp); + if (output_event) { + /* get the buffer we want to redirect to */ + data = perf_mmap_data_get(output_event); + if (!data) + goto unlock; } + old_data = event->data; + rcu_assign_pointer(event->data, data); ret = 0; +unlock: + mutex_unlock(&event->mmap_mutex); + + if (old_data) + perf_mmap_data_put(old_data); out: - fput_light(output_file, fput_needed); return ret; } @@ -4994,7 +5017,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { - struct perf_event *event, *group_leader; + struct perf_event *event, *group_leader = NULL, *output_event = NULL; struct perf_event_attr attr; struct perf_event_context *ctx; struct file *event_file = NULL; @@ -5034,19 +5057,25 @@ SYSCALL_DEFINE5(perf_event_open, goto err_fd; } + if (group_fd != -1) { + group_leader = perf_fget_light(group_fd, &fput_needed); + if (IS_ERR(group_leader)) { + err = PTR_ERR(group_leader); + goto err_put_context; + } + group_file = group_leader->filp; + if (flags & PERF_FLAG_FD_OUTPUT) + output_event = group_leader; + if (flags & PERF_FLAG_FD_NO_GROUP) + group_leader = NULL; + } + /* * Look up the group leader (we will attach this event to it): */ - group_leader = NULL; - if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { + if (group_leader) { err = -EINVAL; - group_file = fget_light(group_fd, &fput_needed); - if (!group_file) - goto err_put_context; - if (group_file->f_op != &perf_fops) - goto err_put_context; - group_leader = group_file->private_data; /* * Do not allow a recursive hierarchy (this new sibling * becoming part of another group-sibling): @@ -5068,9 +5097,16 @@ SYSCALL_DEFINE5(perf_event_open, event = perf_event_alloc(&attr, cpu, ctx, group_leader, NULL, NULL, GFP_KERNEL); - err = PTR_ERR(event); - if (IS_ERR(event)) + if (IS_ERR(event)) { + err = PTR_ERR(event); goto err_put_context; + } + + if (output_event) { + err = perf_event_set_output(event, output_event); + if (err) + goto err_free_put_context; + } event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); if (IS_ERR(event_file)) { @@ -5078,12 +5114,6 @@ SYSCALL_DEFINE5(perf_event_open, goto err_free_put_context; } - if (flags & PERF_FLAG_FD_OUTPUT) { - err = perf_event_set_output(event, group_fd); - if (err) - goto err_fput_free_put_context; - } - event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); @@ -5101,8 +5131,6 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; -err_fput_free_put_context: - fput(event_file); err_free_put_context: free_event(event); err_put_context: -- cgit v1.2.3 From 8a49542c0554af7d0073aac0ee73ee65b807ef34 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 27 May 2010 15:47:49 +0200 Subject: perf_events: Fix races in group composition Group siblings don't pin each-other or the parent, so when we destroy events we must make sure to clean up all cross referencing pointers. In particular, for destruction of a group leader we must be able to find all its siblings and remove their reference to it. This means that detaching an event from its context must not detach it from the group, otherwise we can end up failing to clear all pointers. Solve this by clearly separating the attachment to a context and attachment to a group, and keep the group composed until we destroy the events. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 4 ++ kernel/perf_event.c | 91 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 71 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 490698590d6e..5d0266d94985 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -631,6 +631,9 @@ struct swevent_hlist { struct rcu_head rcu_head; }; +#define PERF_ATTACH_CONTEXT 0x01 +#define PERF_ATTACH_GROUP 0x02 + /** * struct perf_event - performance event kernel representation: */ @@ -646,6 +649,7 @@ struct perf_event { const struct pmu *pmu; enum perf_event_active_state state; + unsigned int attach_state; atomic64_t count; /* diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 848d49a043e9..10a1aee2309e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -283,14 +283,15 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) static void list_add_event(struct perf_event *event, struct perf_event_context *ctx) { - struct perf_event *group_leader = event->group_leader; + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); + event->attach_state |= PERF_ATTACH_CONTEXT; /* - * Depending on whether it is a standalone or sibling event, - * add it straight to the context's event list, or to the group - * leader's sibling list: + * If we're a stand alone event or group leader, we go to the context + * list, group events are kept attached to the group so that + * perf_group_detach can, at all times, locate all siblings. */ - if (group_leader == event) { + if (event->group_leader == event) { struct list_head *list; if (is_software_event(event)) @@ -298,13 +299,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) list = ctx_group_list(event, ctx); list_add_tail(&event->group_entry, list); - } else { - if (group_leader->group_flags & PERF_GROUP_SOFTWARE && - !is_software_event(event)) - group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; - - list_add_tail(&event->group_entry, &group_leader->sibling_list); - group_leader->nr_siblings++; } list_add_rcu(&event->event_entry, &ctx->event_list); @@ -313,6 +307,24 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) ctx->nr_stat++; } +static void perf_group_attach(struct perf_event *event) +{ + struct perf_event *group_leader = event->group_leader; + + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP); + event->attach_state |= PERF_ATTACH_GROUP; + + if (group_leader == event) + return; + + if (group_leader->group_flags & PERF_GROUP_SOFTWARE && + !is_software_event(event)) + group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; + + list_add_tail(&event->group_entry, &group_leader->sibling_list); + group_leader->nr_siblings++; +} + /* * Remove a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. @@ -320,17 +332,22 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { - if (list_empty(&event->group_entry)) + /* + * We can have double detach due to exit/hot-unplug + close. + */ + if (!(event->attach_state & PERF_ATTACH_CONTEXT)) return; + + event->attach_state &= ~PERF_ATTACH_CONTEXT; + ctx->nr_events--; if (event->attr.inherit_stat) ctx->nr_stat--; - list_del_init(&event->group_entry); list_del_rcu(&event->event_entry); - if (event->group_leader != event) - event->group_leader->nr_siblings--; + if (event->group_leader == event) + list_del_init(&event->group_entry); update_group_times(event); @@ -345,21 +362,39 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) event->state = PERF_EVENT_STATE_OFF; } -static void -perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx) +static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; + struct list_head *list = NULL; + + /* + * We can have double detach due to exit/hot-unplug + close. + */ + if (!(event->attach_state & PERF_ATTACH_GROUP)) + return; + + event->attach_state &= ~PERF_ATTACH_GROUP; + + /* + * If this is a sibling, remove it from its group. + */ + if (event->group_leader != event) { + list_del_init(&event->group_entry); + event->group_leader->nr_siblings--; + return; + } + + if (!list_empty(&event->group_entry)) + list = &event->group_entry; /* * If this was a group event with sibling events then * upgrade the siblings to singleton events by adding them - * to the context list directly: + * to whatever list we are on. */ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { - struct list_head *list; - - list = ctx_group_list(event, ctx); - list_move_tail(&sibling->group_entry, list); + if (list) + list_move_tail(&sibling->group_entry, list); sibling->group_leader = sibling; /* Inherit group flags from the previous leader */ @@ -727,6 +762,7 @@ static void add_event_to_ctx(struct perf_event *event, struct perf_event_context *ctx) { list_add_event(event, ctx); + perf_group_attach(event); event->tstamp_enabled = ctx->time; event->tstamp_running = ctx->time; event->tstamp_stopped = ctx->time; @@ -1894,8 +1930,8 @@ int perf_event_release_kernel(struct perf_event *event) */ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); raw_spin_lock_irq(&ctx->lock); + perf_group_detach(event); list_del_event(event, ctx); - perf_destroy_group(event, ctx); raw_spin_unlock_irq(&ctx->lock); mutex_unlock(&ctx->mutex); @@ -5127,6 +5163,12 @@ SYSCALL_DEFINE5(perf_event_open, list_add_tail(&event->owner_entry, ¤t->perf_event_list); mutex_unlock(¤t->perf_event_mutex); + /* + * Drop the reference on the group_event after placing the + * new event on the sibling_list. This ensures destruction + * of the group leader will find the pointer to itself in + * perf_group_detach(). + */ fput_light(group_file, fput_needed); fd_install(event_fd, event_file); return event_fd; @@ -5448,6 +5490,7 @@ static void perf_free_event(struct perf_event *event, fput(parent->filp); + perf_group_detach(event); list_del_event(event, ctx); free_event(event); } -- cgit v1.2.3 From 3771f0771154675d4a0ca780be2411f3cc357208 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 21 May 2010 12:31:09 +0200 Subject: perf_events, trace: Fix probe unregister race tracepoint_probe_unregister() does not synchronize against the probe callbacks, so do that explicitly. This properly serializes the callbacks and the free of the data used therein. Also, use this_cpu_ptr() where possible. Acked-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra LKML-Reference: <1274438476.1674.1702.camel@laptop> Signed-off-by: Ingo Molnar --- include/trace/ftrace.h | 2 +- kernel/trace/trace_event_perf.c | 10 ++++++++-- kernel/trace/trace_kprobe.c | 4 ++-- kernel/trace/trace_syscalls.c | 4 ++-- 4 files changed, 13 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 3d685d1f2a03..5a64905d7278 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -725,7 +725,7 @@ perf_trace_##call(void *__data, proto) \ \ { assign; } \ \ - head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ + head = this_cpu_ptr(event_call->perf_events); \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, &__regs, head); \ } diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index cb6f365016e4..49c7abf2ba5c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -116,7 +116,7 @@ int perf_trace_enable(struct perf_event *p_event) if (WARN_ON_ONCE(!list)) return -EINVAL; - list = per_cpu_ptr(list, smp_processor_id()); + list = this_cpu_ptr(list); hlist_add_head_rcu(&p_event->hlist_entry, list); return 0; @@ -142,6 +142,12 @@ void perf_trace_destroy(struct perf_event *p_event) tp_event->class->perf_probe, tp_event); + /* + * Ensure our callback won't be called anymore. See + * tracepoint_probe_unregister() and __DO_TRACE(). + */ + synchronize_sched(); + free_percpu(tp_event->perf_events); tp_event->perf_events = NULL; @@ -169,7 +175,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, if (*rctxp < 0) return NULL; - raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id()); + raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); /* zero the dead bytes from align to not leak stack to user */ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index faf7cefd15da..f52b5f50299d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1359,7 +1359,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - head = per_cpu_ptr(call->perf_events, smp_processor_id()); + head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); } @@ -1392,7 +1392,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); - head = per_cpu_ptr(call->perf_events, smp_processor_id()); + head = this_cpu_ptr(call->perf_events); perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); } diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index d2c859cec9ea..34e35804304b 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -519,7 +519,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - head = per_cpu_ptr(sys_data->enter_event->perf_events, smp_processor_id()); + head = this_cpu_ptr(sys_data->enter_event->perf_events); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); } @@ -595,7 +595,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - head = per_cpu_ptr(sys_data->exit_event->perf_events, smp_processor_id()); + head = this_cpu_ptr(sys_data->exit_event->perf_events); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); } -- cgit v1.2.3 From dcbe7bcfa32c5bc4f9bb6c75d4d41bb4db8c36fc Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 31 May 2010 13:35:36 +0200 Subject: ALSA: usb-audio: UAC2: clean up parsing of bmaControls Introduce two new static inline functions for a more readable parsing of UAC2 bmaControls. Signed-off-by: Daniel Mack Signed-off-by: Takashi Iwai --- include/linux/usb/audio-v2.h | 10 ++++++++++ sound/usb/mixer.c | 8 ++++---- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 92f1d99f0f17..8f91be3599c6 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -18,6 +18,16 @@ /* v1.0 and v2.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h */ +static inline bool uac2_control_is_readable(u32 bmControls, u8 control) +{ + return (bmControls >> (control * 2)) & 0x1; +} + +static inline bool uac2_control_is_writeable(u32 bmControls, u8 control) +{ + return (bmControls >> (control * 2)) & 0x2; +} + /* 4.7.2.1 Clock Source Descriptor */ struct uac_clock_source_descriptor { diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 03ce971e0027..43d6417b811e 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1188,9 +1188,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void for (j = 0; j < channels; j++) { unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); - if (mask & (1 << (i * 2))) { + if (uac2_control_is_readable(mask, i)) { ch_bits |= (1 << j); - if (~mask & (1 << ((i * 2) + 1))) + if (!uac2_control_is_writeable(mask, i)) ch_read_only |= (1 << j); } } @@ -1198,9 +1198,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void /* FIXME: the whole unit is read-only if any of the channels is marked read-only */ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, !!ch_read_only); - if (master_bits & (1 << i * 2)) + if (uac2_control_is_readable(master_bits, i)) build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, - ~master_bits & (1 << ((i * 2) + 1))); + !uac2_control_is_writeable(master_bits, i)); } } -- cgit v1.2.3 From 5dd360ebd8328affb22225141cece3a29403b965 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 31 May 2010 13:35:38 +0200 Subject: include/linux/usb/audio-v2.h: add more UAC2 details Also, remove the 'bmControl' field from uac_clock_selector_descriptor, which was at the wrong offset. This struct is currently unused. Signed-off-by: Daniel Mack Signed-off-by: Takashi Iwai --- include/linux/usb/audio-v2.h | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 8f91be3599c6..383b94ba8c20 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -41,6 +41,13 @@ struct uac_clock_source_descriptor { __u8 iClockSource; } __attribute__((packed)); +/* bmAttribute fields */ +#define UAC_CLOCK_SOURCE_TYPE_EXT 0x0 +#define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1 +#define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2 +#define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3 +#define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2) + /* 4.7.2.2 Clock Source Descriptor */ struct uac_clock_selector_descriptor { @@ -49,8 +56,20 @@ struct uac_clock_selector_descriptor { __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; - __u8 bmControls; __u8 baCSourceID[]; + /* bmControls, bAssocTerminal and iClockSource omitted */ +} __attribute__((packed)); + +/* 4.7.2.3 Clock Multiplier Descriptor */ + +struct uac_clock_multiplier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bClockID; + __u8 bCSourceID; + __u8 bmControls; + __u8 iClockMultiplier; } __attribute__((packed)); /* 4.7.2.4 Input terminal descriptor */ -- cgit v1.2.3 From 7176d37a28fa4ea7e32815007673f578cdcebf51 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 31 May 2010 13:35:39 +0200 Subject: ALSA: usb-audio: fix selector unit string index accessor This is another regression from the UAC2 code refactoring. Signed-off-by: Daniel Mack Signed-off-by: Takashi Iwai --- include/linux/usb/audio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index 5d646c388752..ed5cf92a3c0f 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -244,7 +244,7 @@ struct uac_selector_unit_descriptor { static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; - return raw[9 + desc->bLength - 1]; + return raw[desc->bLength - 1]; } /* 4.3.2.5 Feature Unit Descriptor */ -- cgit v1.2.3 From 65f25da44b51f55e3a74301c25f29263be2bf1ba Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 31 May 2010 13:35:41 +0200 Subject: ALSA: usb-audio: unify constants from specification Move more definitions from private enums to appropriate header files. Signed-off-by: Daniel Mack Signed-off-by: Takashi Iwai --- include/linux/usb/audio.h | 88 +++++++++++++++++++++++++++++------------ sound/usb/mixer.c | 99 ++++++++++++++++------------------------------- sound/usb/mixer_maps.c | 4 +- 3 files changed, 98 insertions(+), 93 deletions(-) (limited to 'include') diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index ed5cf92a3c0f..c51200c715e5 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -47,6 +47,15 @@ #define UAC_FORMAT_TYPE 0x02 #define UAC_FORMAT_SPECIFIC 0x03 +/* A.7 Processing Unit Process Types */ +#define UAC_PROCESS_UNDEFINED 0x00 +#define UAC_PROCESS_UP_DOWNMIX 0x01 +#define UAC_PROCESS_DOLBY_PROLOGIC 0x02 +#define UAC_PROCESS_STEREO_EXTENDER 0x03 +#define UAC_PROCESS_REVERB 0x04 +#define UAC_PROCESS_CHORUS 0x05 +#define UAC_PROCESS_DYN_RANGE_COMP 0x06 + /* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ #define UAC_EP_GENERAL 0x01 @@ -73,6 +82,60 @@ #define UAC_GET_STAT 0xff +/* A.10 Control Selector Codes */ + +/* A.10.1 Terminal Control Selectors */ +#define UAC_TERM_COPY_PROTECT 0x01 + +/* A.10.2 Feature Unit Control Selectors */ +#define UAC_FU_MUTE 0x01 +#define UAC_FU_VOLUME 0x02 +#define UAC_FU_BASS 0x03 +#define UAC_FU_MID 0x04 +#define UAC_FU_TREBLE 0x05 +#define UAC_FU_GRAPHIC_EQUALIZER 0x06 +#define UAC_FU_AUTOMATIC_GAIN 0x07 +#define UAC_FU_DELAY 0x08 +#define UAC_FU_BASS_BOOST 0x09 +#define UAC_FU_LOUDNESS 0x0a + +#define UAC_CONTROL_BIT(CS) (1 << ((CS) - 1)) + +/* A.10.3.1 Up/Down-mix Processing Unit Controls Selectors */ +#define UAC_UD_ENABLE 0x01 +#define UAC_UD_MODE_SELECT 0x02 + +/* A.10.3.2 Dolby Prologic (tm) Processing Unit Controls Selectors */ +#define UAC_DP_ENABLE 0x01 +#define UAC_DP_MODE_SELECT 0x02 + +/* A.10.3.3 3D Stereo Extender Processing Unit Control Selectors */ +#define UAC_3D_ENABLE 0x01 +#define UAC_3D_SPACE 0x02 + +/* A.10.3.4 Reverberation Processing Unit Control Selectors */ +#define UAC_REVERB_ENABLE 0x01 +#define UAC_REVERB_LEVEL 0x02 +#define UAC_REVERB_TIME 0x03 +#define UAC_REVERB_FEEDBACK 0x04 + +/* A.10.3.5 Chorus Processing Unit Control Selectors */ +#define UAC_CHORUS_ENABLE 0x01 +#define UAC_CHORUS_LEVEL 0x02 +#define UAC_CHORUS_RATE 0x03 +#define UAC_CHORUS_DEPTH 0x04 + +/* A.10.3.6 Dynamic Range Compressor Unit Control Selectors */ +#define UAC_DCR_ENABLE 0x01 +#define UAC_DCR_RATE 0x02 +#define UAC_DCR_MAXAMPL 0x03 +#define UAC_DCR_THRESHOLD 0x04 +#define UAC_DCR_ATTACK_TIME 0x05 +#define UAC_DCR_RELEASE_TIME 0x06 + +/* A.10.4 Extension Unit Control Selectors */ +#define UAC_XU_ENABLE 0x01 + /* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ #define UAC_MS_HEADER 0x01 #define UAC_MIDI_IN_JACK 0x02 @@ -463,31 +526,6 @@ struct uac_iso_endpoint_descriptor { #define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 #define UAC_EP_CS_ATTR_FILL_MAX 0x80 -/* A.10.2 Feature Unit Control Selectors */ - -#define UAC_FU_CONTROL_UNDEFINED 0x00 -#define UAC_MUTE_CONTROL 0x01 -#define UAC_VOLUME_CONTROL 0x02 -#define UAC_BASS_CONTROL 0x03 -#define UAC_MID_CONTROL 0x04 -#define UAC_TREBLE_CONTROL 0x05 -#define UAC_GRAPHIC_EQUALIZER_CONTROL 0x06 -#define UAC_AUTOMATIC_GAIN_CONTROL 0x07 -#define UAC_DELAY_CONTROL 0x08 -#define UAC_BASS_BOOST_CONTROL 0x09 -#define UAC_LOUDNESS_CONTROL 0x0a - -#define UAC_FU_MUTE (1 << (UAC_MUTE_CONTROL - 1)) -#define UAC_FU_VOLUME (1 << (UAC_VOLUME_CONTROL - 1)) -#define UAC_FU_BASS (1 << (UAC_BASS_CONTROL - 1)) -#define UAC_FU_MID (1 << (UAC_MID_CONTROL - 1)) -#define UAC_FU_TREBLE (1 << (UAC_TREBLE_CONTROL - 1)) -#define UAC_FU_GRAPHIC_EQ (1 << (UAC_GRAPHIC_EQUALIZER_CONTROL - 1)) -#define UAC_FU_AUTO_GAIN (1 << (UAC_AUTOMATIC_GAIN_CONTROL - 1)) -#define UAC_FU_DELAY (1 << (UAC_DELAY_CONTROL - 1)) -#define UAC_FU_BASS_BOOST (1 << (UAC_BASS_BOOST_CONTROL - 1)) -#define UAC_FU_LOUDNESS (1 << (UAC_LOUDNESS_CONTROL - 1)) - /* status word format (3.7.1.1) */ #define UAC1_STATUS_TYPE_ORIG_MASK 0x0f diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9149a84c716f..24428198ae1b 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -78,39 +78,6 @@ enum { USB_MIXER_U16, }; -enum { - USB_PROC_UPDOWN = 1, - USB_PROC_UPDOWN_SWITCH = 1, - USB_PROC_UPDOWN_MODE_SEL = 2, - - USB_PROC_PROLOGIC = 2, - USB_PROC_PROLOGIC_SWITCH = 1, - USB_PROC_PROLOGIC_MODE_SEL = 2, - - USB_PROC_3DENH = 3, - USB_PROC_3DENH_SWITCH = 1, - USB_PROC_3DENH_SPACE = 2, - - USB_PROC_REVERB = 4, - USB_PROC_REVERB_SWITCH = 1, - USB_PROC_REVERB_LEVEL = 2, - USB_PROC_REVERB_TIME = 3, - USB_PROC_REVERB_DELAY = 4, - - USB_PROC_CHORUS = 5, - USB_PROC_CHORUS_SWITCH = 1, - USB_PROC_CHORUS_LEVEL = 2, - USB_PROC_CHORUS_RATE = 3, - USB_PROC_CHORUS_DEPTH = 4, - - USB_PROC_DCR = 6, - USB_PROC_DCR_SWITCH = 1, - USB_PROC_DCR_RATIO = 2, - USB_PROC_DCR_MAX_AMP = 3, - USB_PROC_DCR_THRESHOLD = 4, - USB_PROC_DCR_ATTACK = 5, - USB_PROC_DCR_RELEASE = 6, -}; /*E-mu 0202(0404) eXtension Unit(XU) control*/ enum { @@ -980,7 +947,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, control++; /* change from zero-based to 1-based value */ - if (control == UAC_GRAPHIC_EQUALIZER_CONTROL) { + if (control == UAC_FU_GRAPHIC_EQUALIZER) { /* FIXME: not supported yet */ return; } @@ -1036,8 +1003,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, kctl->id.name, sizeof(kctl->id.name)); switch (control) { - case UAC_MUTE_CONTROL: - case UAC_VOLUME_CONTROL: + case UAC_FU_MUTE: + case UAC_FU_VOLUME: /* determine the control name. the rule is: * - if a name id is given in descriptor, use it. * - if the connected input can be determined, then use the name @@ -1064,9 +1031,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, len = append_ctl_name(kctl, " Playback"); } } - append_ctl_name(kctl, control == UAC_MUTE_CONTROL ? + append_ctl_name(kctl, control == UAC_FU_MUTE ? " Switch" : " Volume"); - if (control == UAC_VOLUME_CONTROL) { + if (control == UAC_FU_VOLUME) { kctl->tlv.c = mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | @@ -1165,7 +1132,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void snd_printk(KERN_INFO "usbmixer: master volume quirk for PCM2702 chip\n"); /* disable non-functional volume control */ - master_bits &= ~UAC_FU_VOLUME; + master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; } if (channels > 0) @@ -1410,51 +1377,51 @@ struct procunit_info { }; static struct procunit_value_info updown_proc_info[] = { - { USB_PROC_UPDOWN_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_UPDOWN_MODE_SEL, "Mode Select", USB_MIXER_U8, 1 }, + { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info prologic_proc_info[] = { - { USB_PROC_PROLOGIC_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_PROLOGIC_MODE_SEL, "Mode Select", USB_MIXER_U8, 1 }, + { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info threed_enh_proc_info[] = { - { USB_PROC_3DENH_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_3DENH_SPACE, "Spaciousness", USB_MIXER_U8 }, + { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info reverb_proc_info[] = { - { USB_PROC_REVERB_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, - { USB_PROC_REVERB_TIME, "Time", USB_MIXER_U16 }, - { USB_PROC_REVERB_DELAY, "Delay", USB_MIXER_U8 }, + { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, + { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, + { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info chorus_proc_info[] = { - { USB_PROC_CHORUS_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, - { USB_PROC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, - { USB_PROC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, + { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, + { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, + { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; static struct procunit_value_info dcr_proc_info[] = { - { USB_PROC_DCR_SWITCH, "Switch", USB_MIXER_BOOLEAN }, - { USB_PROC_DCR_RATIO, "Ratio", USB_MIXER_U16 }, - { USB_PROC_DCR_MAX_AMP, "Max Amp", USB_MIXER_S16 }, - { USB_PROC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, - { USB_PROC_DCR_ATTACK, "Attack Time", USB_MIXER_U16 }, - { USB_PROC_DCR_RELEASE, "Release Time", USB_MIXER_U16 }, + { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, + { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, + { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, + { UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, + { UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 }, + { UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 }, { 0 } }; static struct procunit_info procunits[] = { - { USB_PROC_UPDOWN, "Up Down", updown_proc_info }, - { USB_PROC_PROLOGIC, "Dolby Prologic", prologic_proc_info }, - { USB_PROC_3DENH, "3D Stereo Extender", threed_enh_proc_info }, - { USB_PROC_REVERB, "Reverb", reverb_proc_info }, - { USB_PROC_CHORUS, "Chorus", chorus_proc_info }, - { USB_PROC_DCR, "DCR", dcr_proc_info }, + { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, + { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, + { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, + { UAC_PROCESS_REVERB, "Reverb", reverb_proc_info }, + { UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info }, + { UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info }, { 0 }, }; /* @@ -1542,7 +1509,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw cval->channels = 1; /* get min/max values */ - if (type == USB_PROC_UPDOWN && cval->control == USB_PROC_UPDOWN_MODE_SEL) { + if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) { __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol); /* FIXME: hard-coded */ cval->min = 1; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index d93fc89beba8..f1324c423835 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -85,8 +85,8 @@ static struct usbmix_name_map extigy_map[] = { /* 16: MU (w/o controls) */ { 17, NULL, 1 }, /* DISABLED: PU-switch (any effect?) */ { 17, "Channel Routing", 2 }, /* PU: mode select */ - { 18, "Tone Control - Bass", UAC_BASS_CONTROL }, /* FU */ - { 18, "Tone Control - Treble", UAC_TREBLE_CONTROL }, /* FU */ + { 18, "Tone Control - Bass", UAC_FU_BASS }, /* FU */ + { 18, "Tone Control - Treble", UAC_FU_TREBLE }, /* FU */ { 18, "Master Playback" }, /* FU; others */ /* 19: OT speaker */ /* 20: OT headphone */ -- cgit v1.2.3 From fbf81762e385d3d45acad057b654d56972acf58c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 1 Jun 2010 09:09:06 +1000 Subject: drm/kms: disable/enable poll around switcheroo on/off Because we aren't in a suspend state the poll will still run when we have switcherooed a card off. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 28 ++++++++++++++++++++++------ drivers/gpu/drm/i915/i915_dma.c | 4 +++- drivers/gpu/drm/nouveau/nouveau_state.c | 3 +++ drivers/gpu/drm/radeon/radeon_device.c | 2 ++ include/drm/drm_crtc_helper.h | 3 +++ 5 files changed, 33 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 764401951041..9b2a54117c91 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work) } } -void drm_kms_helper_poll_init(struct drm_device *dev) +void drm_kms_helper_poll_disable(struct drm_device *dev) +{ + if (!dev->mode_config.poll_enabled) + return; + delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); +} +EXPORT_SYMBOL(drm_kms_helper_poll_disable); + +void drm_kms_helper_poll_enable(struct drm_device *dev) { - struct drm_connector *connector; bool poll = false; + struct drm_connector *connector; int ret; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->polled) poll = true; } - slow_work_register_user(THIS_MODULE); - delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, - &output_poll_ops); if (poll) { ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); @@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev) DRM_ERROR("delayed enqueue failed %d\n", ret); } } +EXPORT_SYMBOL(drm_kms_helper_poll_enable); + +void drm_kms_helper_poll_init(struct drm_device *dev) +{ + slow_work_register_user(THIS_MODULE); + delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, + &output_poll_ops); + dev->mode_config.poll_enabled = true; + + drm_kms_helper_poll_enable(dev); +} EXPORT_SYMBOL(drm_kms_helper_poll_init); void drm_kms_helper_poll_fini(struct drm_device *dev) { - delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); + drm_kms_helper_poll_disable(dev); slow_work_unregister_user(THIS_MODULE); } EXPORT_SYMBOL(drm_kms_helper_poll_fini); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2a6b5de5ae5d..cc6e56a18edd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1399,12 +1399,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { - printk(KERN_INFO "i915: switched off\n"); + printk(KERN_INFO "i915: switched on\n"); /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "i915: switched off\n"); + drm_kms_helper_poll_disable(dev); i915_suspend(dev, pmm); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e632339c323e..5c468a4fef9a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -376,12 +376,15 @@ out_err: static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { + struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); nouveau_pci_resume(pdev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); + drm_kms_helper_poll_disable(dev); nouveau_pci_suspend(pdev, pmm); } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index fdc3fdf78acb..db338522191f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ rdev->powered_down = false; radeon_resume_kms(dev); + drm_kms_helper_poll_enable(dev); } else { printk(KERN_INFO "radeon: switched off\n"); + drm_kms_helper_poll_disable(dev); radeon_suspend_kms(dev, pmm); /* don't suspend or resume card normally */ rdev->powered_down = true; diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index dc5873c21e45..1121f7799c6f 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -130,4 +130,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern void drm_helper_hpd_irq_event(struct drm_device *dev); + +extern void drm_kms_helper_poll_disable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable(struct drm_device *dev); #endif -- cgit v1.2.3 From 3a21ceed7f373894a7c537b4dbbe484f36e7ae24 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 8 May 2010 17:08:58 -0300 Subject: V4L/DVB: v4l2-mediabus.h: add two helper functions Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-mediabus.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 0dbe02ada259..d8e1608fee59 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -58,4 +58,24 @@ struct v4l2_mbus_framefmt { enum v4l2_colorspace colorspace; }; +static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_fmt->width = mbus_fmt->width; + pix_fmt->height = mbus_fmt->height; + pix_fmt->field = mbus_fmt->field; + pix_fmt->colorspace = mbus_fmt->colorspace; +} + +static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format *pix_fmt, + enum v4l2_mbus_pixelcode code) +{ + mbus_fmt->width = pix_fmt->width; + mbus_fmt->height = pix_fmt->height; + mbus_fmt->field = pix_fmt->field; + mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->code = code; +} + #endif -- cgit v1.2.3 From 260bb38a21a19edc8a328f3ac8dd45c184d01216 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 8 May 2010 19:02:11 -0300 Subject: V4L/DVB: v4l2-mediabus.h: added V4L2_MBUS_FMT_SGRBG8_1X8 Needed for mt9v011. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-mediabus.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index d8e1608fee59..865cda7cd611 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -40,6 +40,7 @@ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, + V4L2_MBUS_FMT_SGRBG8_1X8, }; /** -- cgit v1.2.3 From 3805f201934e5384f6e941222dc1968cb638a88c Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Sat, 8 May 2010 17:55:00 -0300 Subject: V4L/DVB: v4l2-subdev.h: fix enum_mbus_fmt prototype enum_mbus_fmt received an index argument that was defined as an int instead of an unsigned int. This is now fixed. This had the knock-on effect that the index argument in the callback get_formats in soc_camera.h also had to be changed to unsigned int. Signed-off-by: Hans Verkuil Acked-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/ak881x.c | 2 +- drivers/media/video/mt9m001.c | 4 ++-- drivers/media/video/mt9m111.c | 4 ++-- drivers/media/video/mt9t031.c | 2 +- drivers/media/video/mt9t112.c | 4 ++-- drivers/media/video/mt9v022.c | 4 ++-- drivers/media/video/mx3_camera.c | 4 ++-- drivers/media/video/ov772x.c | 4 ++-- drivers/media/video/ov9640.c | 4 ++-- drivers/media/video/pxa_camera.c | 4 ++-- drivers/media/video/rj54n1cb0c.c | 4 ++-- drivers/media/video/sh_mobile_ceu_camera.c | 4 ++-- drivers/media/video/soc_camera.c | 3 ++- drivers/media/video/soc_camera_platform.c | 2 +- drivers/media/video/tw9910.c | 2 +- include/media/soc_camera.h | 2 +- include/media/v4l2-subdev.h | 2 +- 17 files changed, 28 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c index 35390d4717b9..3006a2c80f47 100644 --- a/drivers/media/video/ak881x.c +++ b/drivers/media/video/ak881x.c @@ -141,7 +141,7 @@ static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd, return ak881x_try_g_mbus_fmt(sd, mf); } -static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, int index, +static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index) diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c index b62c0bd3f8ea..992ab8cb89ae 100644 --- a/drivers/media/video/mt9m001.c +++ b/drivers/media/video/mt9m001.c @@ -701,13 +701,13 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { #endif }; -static int mt9m001_enum_fmt(struct v4l2_subdev *sd, int index, +static int mt9m001_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { struct i2c_client *client = sd->priv; struct mt9m001 *mt9m001 = to_mt9m001(client); - if ((unsigned int)index >= mt9m001->num_fmts) + if (index >= mt9m001->num_fmts) return -EINVAL; *code = mt9m001->fmts[index].code; diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index d35f536f9fc3..bac0c5d4070c 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -999,10 +999,10 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = { #endif }; -static int mt9m111_enum_fmt(struct v4l2_subdev *sd, int index, +static int mt9m111_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { - if ((unsigned int)index >= ARRAY_SIZE(mt9m111_colour_fmts)) + if (index >= ARRAY_SIZE(mt9m111_colour_fmts)) return -EINVAL; *code = mt9m111_colour_fmts[index].code; diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c index 78b4e091d2d5..f26933a8fd8a 100644 --- a/drivers/media/video/mt9t031.c +++ b/drivers/media/video/mt9t031.c @@ -798,7 +798,7 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = { #endif }; -static int mt9t031_enum_fmt(struct v4l2_subdev *sd, int index, +static int mt9t031_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index) diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c index 7438f8d775ba..abe5505b9f79 100644 --- a/drivers/media/video/mt9t112.c +++ b/drivers/media/video/mt9t112.c @@ -1017,10 +1017,10 @@ static int mt9t112_try_fmt(struct v4l2_subdev *sd, return 0; } -static int mt9t112_enum_fmt(struct v4l2_subdev *sd, int index, +static int mt9t112_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { - if ((unsigned int)index >= ARRAY_SIZE(mt9t112_cfmts)) + if (index >= ARRAY_SIZE(mt9t112_cfmts)) return -EINVAL; *code = mt9t112_cfmts[index].code; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index e5bae4c9393b..c409d05d465a 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -838,13 +838,13 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = { #endif }; -static int mt9v022_enum_fmt(struct v4l2_subdev *sd, int index, +static int mt9v022_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { struct i2c_client *client = sd->priv; struct mt9v022 *mt9v022 = to_mt9v022(client); - if ((unsigned int)index >= mt9v022->num_fmts) + if (index >= mt9v022->num_fmts) return -EINVAL; *code = mt9v022->fmts[index].code; diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index d477e3058002..a9be14c23912 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -672,7 +672,7 @@ static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) fmt->packing == SOC_MBUS_PACKING_EXTEND16); } -static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, +static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); @@ -689,7 +689,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx, fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_err(icd->dev.parent, - "Invalid format code #%d: %d\n", idx, code); + "Invalid format code #%u: %d\n", idx, code); return 0; } diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 7f8ece30c77b..86b0186c360c 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -1092,10 +1092,10 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { #endif }; -static int ov772x_enum_fmt(struct v4l2_subdev *sd, int index, +static int ov772x_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { - if ((unsigned int)index >= ARRAY_SIZE(ov772x_cfmts)) + if (index >= ARRAY_SIZE(ov772x_cfmts)) return -EINVAL; *code = ov772x_cfmts[index].code; diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c index 36599a65f548..58811c044c6a 100644 --- a/drivers/media/video/ov9640.c +++ b/drivers/media/video/ov9640.c @@ -614,10 +614,10 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd, return 0; } -static int ov9640_enum_fmt(struct v4l2_subdev *sd, int index, +static int ov9640_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { - if ((unsigned int)index >= ARRAY_SIZE(ov9640_codes)) + if (index >= ARRAY_SIZE(ov9640_codes)) return -EINVAL; *code = ov9640_codes[index]; diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 7fe70e718656..fb242f6cfb1f 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -1247,7 +1247,7 @@ static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) fmt->packing == SOC_MBUS_PACKING_EXTEND16); } -static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, +static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); @@ -1264,7 +1264,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx, fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { - dev_err(dev, "Invalid format code #%d: %d\n", idx, code); + dev_err(dev, "Invalid format code #%u: %d\n", idx, code); return 0; } diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c index bbd9c11e2c5a..64eb05ce5603 100644 --- a/drivers/media/video/rj54n1cb0c.c +++ b/drivers/media/video/rj54n1cb0c.c @@ -481,10 +481,10 @@ static int reg_write_multiple(struct i2c_client *client, return 0; } -static int rj54n1_enum_fmt(struct v4l2_subdev *sd, int index, +static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { - if ((unsigned int)index >= ARRAY_SIZE(rj54n1_colour_fmts)) + if (index >= ARRAY_SIZE(rj54n1_colour_fmts)) return -EINVAL; *code = rj54n1_colour_fmts[index].code; diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 4ac3b482fbb4..961bfa2fea97 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -878,7 +878,7 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); -static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, +static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); @@ -897,7 +897,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx, fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_err(icd->dev.parent, - "Invalid format code #%d: %d\n", idx, code); + "Invalid format code #%u: %d\n", idx, code); return -EINVAL; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index db1ca0e90d76..475757bfd7ba 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -200,7 +200,8 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - int i, fmts = 0, raw_fmts = 0, ret; + unsigned int i, fmts = 0, raw_fmts = 0; + int ret; enum v4l2_mbus_pixelcode code; while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code)) diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c index 10b003a8be83..248c986f0989 100644 --- a/drivers/media/video/soc_camera_platform.c +++ b/drivers/media/video/soc_camera_platform.c @@ -71,7 +71,7 @@ static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd, static struct v4l2_subdev_core_ops platform_subdev_core_ops; -static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, int index, +static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 76be733eabfd..2f3b1942e027 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -903,7 +903,7 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { #endif }; -static int tw9910_enum_fmt(struct v4l2_subdev *sd, int index, +static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index) diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index c9a5bbfa6ab5..b8289c2f609b 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -66,7 +66,7 @@ struct soc_camera_host_ops { * .get_formats() fail, .put_formats() will not be called at all, the * failing .get_formats() must then clean up internally. */ - int (*get_formats)(struct soc_camera_device *, int, + int (*get_formats)(struct soc_camera_device *, unsigned int, struct soc_camera_format_xlate *); void (*put_formats)(struct soc_camera_device *); int (*cropcap)(struct soc_camera_device *, struct v4l2_cropcap *); diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index a88889355ae0..02c6f4d11ed3 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -246,7 +246,7 @@ struct v4l2_subdev_video_ops { struct v4l2_dv_timings *timings); int (*g_dv_timings)(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings); - int (*enum_mbus_fmt)(struct v4l2_subdev *sd, int index, + int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code); int (*g_mbus_fmt)(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt); -- cgit v1.2.3 From 9e1d9e7bac5c2bafc3c0c51db88c15f3fbcec83f Mon Sep 17 00:00:00 2001 From: Herton Ronaldo Krzesinski Date: Sat, 8 May 2010 02:23:37 -0300 Subject: V4L/DVB: saa7134: add support for Avermedia M733A This change adds support for Avermedia M733A. The original version for linux 2.6.31 was sent to me from Avermedia, original author is unknown. I ported it to current kernels, expanded and fixed key code handling for RM-K6 remote control, and added an additional pci id also supported. [mchehab@redhat.com: make checkpatch.pl happier] Signed-off-by: Herton Ronaldo Krzesinski Signed-off-by: Mauro Carvalho Chehab --- Documentation/video4linux/CARDLIST.saa7134 | 5 +- drivers/media/IR/keymaps/Makefile | 1 + .../media/IR/keymaps/rc-avermedia-m733a-rm-k6.c | 95 ++++++++++++++++++++++ drivers/media/video/saa7134/saa7134-cards.c | 55 +++++++++++++ drivers/media/video/saa7134/saa7134-input.c | 7 ++ drivers/media/video/saa7134/saa7134.h | 1 + include/media/rc-map.h | 1 + 7 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c (limited to 'include') diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134 index 070f2576707e..1387a69ae3aa 100644 --- a/Documentation/video4linux/CARDLIST.saa7134 +++ b/Documentation/video4linux/CARDLIST.saa7134 @@ -176,5 +176,6 @@ 175 -> Leadtek Winfast DTV1000S [107d:6655] 176 -> Beholder BeholdTV 505 RDS [0000:5051] 177 -> Hawell HW-404M7 -179 -> Beholder BeholdTV H7 [5ace:7190] -180 -> Beholder BeholdTV A7 [5ace:7090] +178 -> Beholder BeholdTV H7 [5ace:7190] +179 -> Beholder BeholdTV A7 [5ace:7090] +180 -> Avermedia M733A [1461:4155,1461:4255] diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index ec25258a955f..585f75c3f376 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-avermedia-cardbus.o \ rc-avermedia-dvbt.o \ rc-avermedia-m135a-rm-jx.o \ + rc-avermedia-m733a-rm-k6.o \ rc-avertv-303.o \ rc-behold.o \ rc-behold-columbus.o \ diff --git a/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c b/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c new file mode 100644 index 000000000000..cf8d45717cb3 --- /dev/null +++ b/drivers/media/IR/keymaps/rc-avermedia-m733a-rm-k6.c @@ -0,0 +1,95 @@ +/* avermedia-m733a-rm-k6.h - Keytable for avermedia_m733a_rm_k6 Remote Controller + * + * Copyright (c) 2010 by Herton Ronaldo Krzesinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +/* + * Avermedia M733A with IR model RM-K6 + * This is the stock remote controller used with Positivo machines with M733A + * Herton Ronaldo Krzesinski + */ + +static struct ir_scancode avermedia_m733a_rm_k6[] = { + { 0x0401, KEY_POWER2 }, + { 0x0406, KEY_MUTE }, + { 0x0408, KEY_MODE }, /* TV/FM */ + + { 0x0409, KEY_1 }, + { 0x040a, KEY_2 }, + { 0x040b, KEY_3 }, + { 0x040c, KEY_4 }, + { 0x040d, KEY_5 }, + { 0x040e, KEY_6 }, + { 0x040f, KEY_7 }, + { 0x0410, KEY_8 }, + { 0x0411, KEY_9 }, + { 0x044c, KEY_DOT }, /* '.' */ + { 0x0412, KEY_0 }, + { 0x0407, KEY_REFRESH }, /* Refresh/Reload */ + + { 0x0413, KEY_AUDIO }, + { 0x0440, KEY_SCREEN }, /* Full Screen toggle */ + { 0x0441, KEY_HOME }, + { 0x0442, KEY_BACK }, + { 0x0447, KEY_UP }, + { 0x0448, KEY_DOWN }, + { 0x0449, KEY_LEFT }, + { 0x044a, KEY_RIGHT }, + { 0x044b, KEY_OK }, + { 0x0404, KEY_VOLUMEUP }, + { 0x0405, KEY_VOLUMEDOWN }, + { 0x0402, KEY_CHANNELUP }, + { 0x0403, KEY_CHANNELDOWN }, + + { 0x0443, KEY_RED }, + { 0x0444, KEY_GREEN }, + { 0x0445, KEY_YELLOW }, + { 0x0446, KEY_BLUE }, + + { 0x0414, KEY_TEXT }, + { 0x0415, KEY_EPG }, + { 0x041a, KEY_TV2 }, /* PIP */ + { 0x041b, KEY_MHP }, /* Snapshot */ + + { 0x0417, KEY_RECORD }, + { 0x0416, KEY_PLAYPAUSE }, + { 0x0418, KEY_STOP }, + { 0x0419, KEY_PAUSE }, + + { 0x041f, KEY_PREVIOUS }, + { 0x041c, KEY_REWIND }, + { 0x041d, KEY_FORWARD }, + { 0x041e, KEY_NEXT }, +}; + +static struct rc_keymap avermedia_m733a_rm_k6_map = { + .map = { + .scan = avermedia_m733a_rm_k6, + .size = ARRAY_SIZE(avermedia_m733a_rm_k6), + .ir_type = IR_TYPE_NEC, + .name = RC_MAP_AVERMEDIA_M733A_RM_K6, + } +}; + +static int __init init_rc_map_avermedia_m733a_rm_k6(void) +{ + return ir_register_map(&avermedia_m733a_rm_k6_map); +} + +static void __exit exit_rc_map_avermedia_m733a_rm_k6(void) +{ + ir_unregister_map(&avermedia_m733a_rm_k6_map); +} + +module_init(init_rc_map_avermedia_m733a_rm_k6) +module_exit(exit_rc_map_avermedia_m733a_rm_k6) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 72700d4e3941..07f6bb8ef9d9 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -3897,6 +3897,40 @@ struct saa7134_board saa7134_boards[] = { .gpio = 0x01, }, }, + [SAA7134_BOARD_AVERMEDIA_M733A] = { + .name = "Avermedia PCI M733A", + .audio_clock = 0x00187de7, + .tuner_type = TUNER_PHILIPS_TDA8290, + .radio_type = UNSET, + .tuner_addr = ADDR_UNSET, + .radio_addr = ADDR_UNSET, + .tuner_config = 0, + .gpiomask = 0x020200000, + .inputs = {{ + .name = name_tv, + .vmux = 1, + .amux = TV, + .tv = 1, + }, { + .name = name_comp1, + .vmux = 3, + .amux = LINE1, + }, { + .name = name_svideo, + .vmux = 8, + .amux = LINE1, + } }, + .radio = { + .name = name_radio, + .amux = TV, + .gpio = 0x00200000, + }, + .mute = { + .name = name_mute, + .amux = TV, + .gpio = 0x01, + }, + }, [SAA7134_BOARD_BEHOLD_401] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov */ @@ -5820,6 +5854,18 @@ struct pci_device_id saa7134_pci_tbl[] = { .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf11d, .driver_data = SAA7134_BOARD_AVERMEDIA_M135A, + }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x1461, /* Avermedia Technologies Inc */ + .subdevice = 0x4155, + .driver_data = SAA7134_BOARD_AVERMEDIA_M733A, + }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x1461, /* Avermedia Technologies Inc */ + .subdevice = 0x4255, + .driver_data = SAA7134_BOARD_AVERMEDIA_M733A, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, @@ -6786,6 +6832,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev, switch (dev->board) { case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: + case SAA7134_BOARD_AVERMEDIA_M733A: /* tda8290 + tda18271 */ ret = saa7134_tda8290_18271_callback(dev, command, arg); break; @@ -7087,6 +7134,14 @@ int saa7134_board_init1(struct saa7134_dev *dev) saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0000C000, 0x0000C000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0000C000, 0x0000C000); break; + case SAA7134_BOARD_AVERMEDIA_M733A: + saa7134_set_gpio(dev, 1, 1); + msleep(10); + saa7134_set_gpio(dev, 1, 0); + msleep(10); + saa7134_set_gpio(dev, 1, 1); + dev->has_remote = SAA7134_REMOTE_GPIO; + break; } return 0; } diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c index e5565e2fd426..f9ed8048894d 100644 --- a/drivers/media/video/saa7134/saa7134-input.c +++ b/drivers/media/video/saa7134/saa7134-input.c @@ -663,6 +663,13 @@ int saa7134_input_init1(struct saa7134_dev *dev) mask_keycode = 0xffff; raw_decode = 1; break; + case SAA7134_BOARD_AVERMEDIA_M733A: + ir_codes = RC_MAP_AVERMEDIA_M733A_RM_K6; + mask_keydown = 0x0040000; + mask_keyup = 0x0040000; + mask_keycode = 0xffff; + raw_decode = 1; + break; case SAA7134_BOARD_AVERMEDIA_777: case SAA7134_BOARD_AVERMEDIA_A16AR: ir_codes = RC_MAP_AVERMEDIA; diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h index 3962534267be..756a1ca8833d 100644 --- a/drivers/media/video/saa7134/saa7134.h +++ b/drivers/media/video/saa7134/saa7134.h @@ -303,6 +303,7 @@ struct saa7134_format { #define SAA7134_BOARD_HAWELL_HW_404M7 177 #define SAA7134_BOARD_BEHOLD_H7 178 #define SAA7134_BOARD_BEHOLD_A7 179 +#define SAA7134_BOARD_AVERMEDIA_M733A 180 #define SAA7134_MAXBOARDS 32 #define SAA7134_INPUT_MAX 8 diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 5833966a7100..6f1f9f76674e 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -56,6 +56,7 @@ void rc_map_init(void); #define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus" #define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt" #define RC_MAP_AVERMEDIA_M135A_RM_JX "rc-avermedia-m135a-rm-jx" +#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6" #define RC_MAP_AVERMEDIA "rc-avermedia" #define RC_MAP_AVERTV_303 "rc-avertv-303" #define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus" -- cgit v1.2.3 From 63fc31e8d0757574edb03ed73986be56e70a75c1 Mon Sep 17 00:00:00 2001 From: Herton Ronaldo Krzesinski Date: Mon, 10 May 2010 15:43:31 -0300 Subject: V4L/DVB: saa7134: add RM-K6 remote control support for Avermedia M135A This change adds support for one more remote control type for Avermedia M135A (model RM-K6), shipped with Positivo machines. Signed-off-by: Herton Ronaldo Krzesinski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/keymaps/Makefile | 2 +- .../media/IR/keymaps/rc-avermedia-m135a-rm-jx.c | 90 ------------- drivers/media/IR/keymaps/rc-avermedia-m135a.c | 147 +++++++++++++++++++++ drivers/media/video/saa7134/saa7134-input.c | 2 +- include/media/rc-map.h | 2 +- 5 files changed, 150 insertions(+), 93 deletions(-) delete mode 100644 drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c create mode 100644 drivers/media/IR/keymaps/rc-avermedia-m135a.c (limited to 'include') diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index 585f75c3f376..aea649fbcf5a 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-avermedia.o \ rc-avermedia-cardbus.o \ rc-avermedia-dvbt.o \ - rc-avermedia-m135a-rm-jx.o \ + rc-avermedia-m135a.o \ rc-avermedia-m733a-rm-k6.o \ rc-avertv-303.o \ rc-behold.o \ diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c deleted file mode 100644 index 101e7ea85941..000000000000 --- a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c +++ /dev/null @@ -1,90 +0,0 @@ -/* avermedia-m135a-rm-jx.h - Keytable for avermedia_m135a_rm_jx Remote Controller - * - * keymap imported from ir-keymaps.c - * - * Copyright (c) 2010 by Mauro Carvalho Chehab - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include - -/* - * Avermedia M135A with IR model RM-JX - * The same codes exist on both Positivo (BR) and original IR - * Mauro Carvalho Chehab - */ - -static struct ir_scancode avermedia_m135a_rm_jx[] = { - { 0x0200, KEY_POWER2 }, - { 0x022e, KEY_DOT }, /* '.' */ - { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */ - - { 0x0205, KEY_1 }, - { 0x0206, KEY_2 }, - { 0x0207, KEY_3 }, - { 0x0209, KEY_4 }, - { 0x020a, KEY_5 }, - { 0x020b, KEY_6 }, - { 0x020d, KEY_7 }, - { 0x020e, KEY_8 }, - { 0x020f, KEY_9 }, - { 0x0211, KEY_0 }, - - { 0x0213, KEY_RIGHT }, /* -> or L */ - { 0x0212, KEY_LEFT }, /* <- or R */ - - { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */ - { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */ - - { 0x0303, KEY_CHANNELUP }, - { 0x0302, KEY_CHANNELDOWN }, - { 0x021f, KEY_VOLUMEUP }, - { 0x021e, KEY_VOLUMEDOWN }, - { 0x020c, KEY_ENTER }, /* Full Screen */ - - { 0x0214, KEY_MUTE }, - { 0x0208, KEY_AUDIO }, - - { 0x0203, KEY_TEXT }, /* Teletext */ - { 0x0204, KEY_EPG }, - { 0x022b, KEY_TV2 }, /* TV2 or PIP */ - - { 0x021d, KEY_RED }, - { 0x021c, KEY_YELLOW }, - { 0x0301, KEY_GREEN }, - { 0x0300, KEY_BLUE }, - - { 0x021a, KEY_PLAYPAUSE }, - { 0x0219, KEY_RECORD }, - { 0x0218, KEY_PLAY }, - { 0x021b, KEY_STOP }, -}; - -static struct rc_keymap avermedia_m135a_rm_jx_map = { - .map = { - .scan = avermedia_m135a_rm_jx, - .size = ARRAY_SIZE(avermedia_m135a_rm_jx), - .ir_type = IR_TYPE_NEC, - .name = RC_MAP_AVERMEDIA_M135A_RM_JX, - } -}; - -static int __init init_rc_map_avermedia_m135a_rm_jx(void) -{ - return ir_register_map(&avermedia_m135a_rm_jx_map); -} - -static void __exit exit_rc_map_avermedia_m135a_rm_jx(void) -{ - ir_unregister_map(&avermedia_m135a_rm_jx_map); -} - -module_init(init_rc_map_avermedia_m135a_rm_jx) -module_exit(exit_rc_map_avermedia_m135a_rm_jx) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a.c b/drivers/media/IR/keymaps/rc-avermedia-m135a.c new file mode 100644 index 000000000000..e4471fb2ad1e --- /dev/null +++ b/drivers/media/IR/keymaps/rc-avermedia-m135a.c @@ -0,0 +1,147 @@ +/* avermedia-m135a.c - Keytable for Avermedia M135A Remote Controllers + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + * Copyright (c) 2010 by Herton Ronaldo Krzesinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +/* + * Avermedia M135A with RM-JX and RM-K6 remote controls + * + * On Avermedia M135A with IR model RM-JX, the same codes exist on both + * Positivo (BR) and original IR, initial version and remote control codes + * added by Mauro Carvalho Chehab + * + * Positivo also ships Avermedia M135A with model RM-K6, extra control + * codes added by Herton Ronaldo Krzesinski + */ + +static struct ir_scancode avermedia_m135a[] = { + /* RM-JX */ + { 0x0200, KEY_POWER2 }, + { 0x022e, KEY_DOT }, /* '.' */ + { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */ + + { 0x0205, KEY_1 }, + { 0x0206, KEY_2 }, + { 0x0207, KEY_3 }, + { 0x0209, KEY_4 }, + { 0x020a, KEY_5 }, + { 0x020b, KEY_6 }, + { 0x020d, KEY_7 }, + { 0x020e, KEY_8 }, + { 0x020f, KEY_9 }, + { 0x0211, KEY_0 }, + + { 0x0213, KEY_RIGHT }, /* -> or L */ + { 0x0212, KEY_LEFT }, /* <- or R */ + + { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */ + { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */ + + { 0x0303, KEY_CHANNELUP }, + { 0x0302, KEY_CHANNELDOWN }, + { 0x021f, KEY_VOLUMEUP }, + { 0x021e, KEY_VOLUMEDOWN }, + { 0x020c, KEY_ENTER }, /* Full Screen */ + + { 0x0214, KEY_MUTE }, + { 0x0208, KEY_AUDIO }, + + { 0x0203, KEY_TEXT }, /* Teletext */ + { 0x0204, KEY_EPG }, + { 0x022b, KEY_TV2 }, /* TV2 or PIP */ + + { 0x021d, KEY_RED }, + { 0x021c, KEY_YELLOW }, + { 0x0301, KEY_GREEN }, + { 0x0300, KEY_BLUE }, + + { 0x021a, KEY_PLAYPAUSE }, + { 0x0219, KEY_RECORD }, + { 0x0218, KEY_PLAY }, + { 0x021b, KEY_STOP }, + + /* RM-K6 */ + { 0x0401, KEY_POWER2 }, + { 0x0406, KEY_MUTE }, + { 0x0408, KEY_MODE }, /* TV/FM */ + + { 0x0409, KEY_1 }, + { 0x040a, KEY_2 }, + { 0x040b, KEY_3 }, + { 0x040c, KEY_4 }, + { 0x040d, KEY_5 }, + { 0x040e, KEY_6 }, + { 0x040f, KEY_7 }, + { 0x0410, KEY_8 }, + { 0x0411, KEY_9 }, + { 0x044c, KEY_DOT }, /* '.' */ + { 0x0412, KEY_0 }, + { 0x0407, KEY_REFRESH }, /* Refresh/Reload */ + + { 0x0413, KEY_AUDIO }, + { 0x0440, KEY_SCREEN }, /* Full Screen toggle */ + { 0x0441, KEY_HOME }, + { 0x0442, KEY_BACK }, + { 0x0447, KEY_UP }, + { 0x0448, KEY_DOWN }, + { 0x0449, KEY_LEFT }, + { 0x044a, KEY_RIGHT }, + { 0x044b, KEY_OK }, + { 0x0404, KEY_VOLUMEUP }, + { 0x0405, KEY_VOLUMEDOWN }, + { 0x0402, KEY_CHANNELUP }, + { 0x0403, KEY_CHANNELDOWN }, + + { 0x0443, KEY_RED }, + { 0x0444, KEY_GREEN }, + { 0x0445, KEY_YELLOW }, + { 0x0446, KEY_BLUE }, + + { 0x0414, KEY_TEXT }, + { 0x0415, KEY_EPG }, + { 0x041a, KEY_TV2 }, /* PIP */ + { 0x041b, KEY_MHP }, /* Snapshot */ + + { 0x0417, KEY_RECORD }, + { 0x0416, KEY_PLAYPAUSE }, + { 0x0418, KEY_STOP }, + { 0x0419, KEY_PAUSE }, + + { 0x041f, KEY_PREVIOUS }, + { 0x041c, KEY_REWIND }, + { 0x041d, KEY_FORWARD }, + { 0x041e, KEY_NEXT }, +}; + +static struct rc_keymap avermedia_m135a_map = { + .map = { + .scan = avermedia_m135a, + .size = ARRAY_SIZE(avermedia_m135a), + .ir_type = IR_TYPE_NEC, + .name = RC_MAP_AVERMEDIA_M135A, + } +}; + +static int __init init_rc_map_avermedia_m135a(void) +{ + return ir_register_map(&avermedia_m135a_map); +} + +static void __exit exit_rc_map_avermedia_m135a(void) +{ + ir_unregister_map(&avermedia_m135a_map); +} + +module_init(init_rc_map_avermedia_m135a) +module_exit(exit_rc_map_avermedia_m135a) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c index f9ed8048894d..b36960dd7319 100644 --- a/drivers/media/video/saa7134/saa7134-input.c +++ b/drivers/media/video/saa7134/saa7134-input.c @@ -657,7 +657,7 @@ int saa7134_input_init1(struct saa7134_dev *dev) saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4); break; case SAA7134_BOARD_AVERMEDIA_M135A: - ir_codes = RC_MAP_AVERMEDIA_M135A_RM_JX; + ir_codes = RC_MAP_AVERMEDIA_M135A; mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */ mask_keyup = 0x0040000; mask_keycode = 0xffff; diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 6f1f9f76674e..c78e99a435b6 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -55,7 +55,7 @@ void rc_map_init(void); #define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d" #define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus" #define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt" -#define RC_MAP_AVERMEDIA_M135A_RM_JX "rc-avermedia-m135a-rm-jx" +#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a" #define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6" #define RC_MAP_AVERMEDIA "rc-avermedia" #define RC_MAP_AVERTV_303 "rc-avertv-303" -- cgit v1.2.3 From 02f726949f2be0967aa4871dd4e47d3967779b26 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 31 May 2010 18:13:25 +0200 Subject: sched, trace: Fix sched_switch() prev_state argument For CONFIG_PREEMPT=y kernels the sched_switch(.prev_state) argument isn't useful because we can get preempted with current->state != TASK_RUNNING without actually getting removed from the runqueue. Cure this by treating all preempted tasks as runnable from the tracer's point of view. Signed-off-by: Peter Zijlstra Cautiously-acked-by: Steven Rostedt LKML-Reference: <1275322715.27810.23323.camel@twins> Signed-off-by: Ingo Molnar --- include/trace/events/sched.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4f733ecea46e..b9e1dd6c6208 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -115,6 +115,23 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p, int success), TP_ARGS(p, success)); +#ifdef CREATE_TRACE_POINTS +static inline long __trace_sched_switch_state(struct task_struct *p) +{ + long state = p->state; + +#ifdef CONFIG_PREEMPT + /* + * For all intents and purposes a preempted task is a running task. + */ + if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) + state = TASK_RUNNING; +#endif + + return state; +} +#endif + /* * Tracepoint for task switches, performed by the scheduler: */ @@ -139,7 +156,7 @@ TRACE_EVENT(sched_switch, memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; - __entry->prev_state = prev->state; + __entry->prev_state = __trace_sched_switch_state(prev); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; -- cgit v1.2.3 From 0e3c9a2284f5417f196e327c254d0b84c9ee8929 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 1 Jun 2010 11:08:43 +0200 Subject: Revert "writeback: fix WB_SYNC_NONE writeback from umount" This reverts commit e913fc825dc685a444cb4c1d0f9d32f372f59861. We are investigating a hang associated with the WB_SYNC_NONE changes, so revert them for now. Conflicts: fs/fs-writeback.c mm/page-writeback.c Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 48 +++++++++++---------------------------------- fs/sync.c | 2 +- include/linux/backing-dev.h | 2 +- include/linux/writeback.h | 10 ---------- mm/page-writeback.c | 4 ++-- 5 files changed, 15 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6753912641b4..408a7877b79d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -45,7 +45,6 @@ struct wb_writeback_args { unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; - unsigned int sb_pinned:1; }; /* @@ -231,11 +230,6 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi, .sync_mode = WB_SYNC_ALL, .nr_pages = LONG_MAX, .range_cyclic = 0, - /* - * Setting sb_pinned is not necessary for WB_SYNC_ALL, but - * lets make it explicitly clear. - */ - .sb_pinned = 1, }; struct bdi_work work; @@ -251,23 +245,21 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi, * @bdi: the backing device to write from * @sb: write inodes from this super_block * @nr_pages: the number of pages to write - * @sb_locked: caller already holds sb umount sem. * * Description: * This does WB_SYNC_NONE opportunistic writeback. The IO is only * started when this function returns, we make no guarentees on - * completion. Caller specifies whether sb umount sem is held already or not. + * completion. Caller need not hold sb s_umount semaphore. * */ void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, - long nr_pages, int sb_locked) + long nr_pages) { struct wb_writeback_args args = { .sb = sb, .sync_mode = WB_SYNC_NONE, .nr_pages = nr_pages, .range_cyclic = 1, - .sb_pinned = sb_locked, }; /* @@ -592,7 +584,7 @@ static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, /* * Caller must already hold the ref for this */ - if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) { + if (wbc->sync_mode == WB_SYNC_ALL) { WARN_ON(!rwsem_is_locked(&sb->s_umount)); return SB_NOT_PINNED; } @@ -766,7 +758,6 @@ static long wb_writeback(struct bdi_writeback *wb, .for_kupdate = args->for_kupdate, .for_background = args->for_background, .range_cyclic = args->range_cyclic, - .sb_pinned = args->sb_pinned, }; unsigned long oldest_jif; long wrote = 0; @@ -1214,18 +1205,6 @@ static void wait_sb_inodes(struct super_block *sb) iput(old_inode); } -static void __writeback_inodes_sb(struct super_block *sb, int sb_locked) -{ - unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); - unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); - long nr_to_write; - - nr_to_write = nr_dirty + nr_unstable + - (inodes_stat.nr_inodes - inodes_stat.nr_unused); - - bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked); -} - /** * writeback_inodes_sb - writeback dirty inodes from given super_block * @sb: the superblock @@ -1237,21 +1216,16 @@ static void __writeback_inodes_sb(struct super_block *sb, int sb_locked) */ void writeback_inodes_sb(struct super_block *sb) { - __writeback_inodes_sb(sb, 0); -} -EXPORT_SYMBOL(writeback_inodes_sb); + unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); + unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); + long nr_to_write; -/** - * writeback_inodes_sb_locked - writeback dirty inodes from given super_block - * @sb: the superblock - * - * Like writeback_inodes_sb(), except the caller already holds the - * sb umount sem. - */ -void writeback_inodes_sb_locked(struct super_block *sb) -{ - __writeback_inodes_sb(sb, 1); + nr_to_write = nr_dirty + nr_unstable + + (inodes_stat.nr_inodes - inodes_stat.nr_unused); + + bdi_start_writeback(sb->s_bdi, sb, nr_to_write); } +EXPORT_SYMBOL(writeback_inodes_sb); /** * writeback_inodes_sb_if_idle - start writeback if none underway diff --git a/fs/sync.c b/fs/sync.c index e8cbd415e50a..5a537ccd2e85 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -42,7 +42,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) if (wait) sync_inodes_sb(sb); else - writeback_inodes_sb_locked(sb); + writeback_inodes_sb(sb); if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, wait); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e6e0cb5437e6..aee5f6ce166e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -106,7 +106,7 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, - long nr_pages, int sb_locked); + long nr_pages); int bdi_writeback_task(struct bdi_writeback *wb); int bdi_has_dirty_io(struct backing_dev_info *bdi); void bdi_arm_supers_timer(void); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index cc97d6caf2b3..f64134653a8c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -65,15 +65,6 @@ struct writeback_control { * so we use a single control to update them */ unsigned no_nrwrite_index_update:1; - - /* - * For WB_SYNC_ALL, the sb must always be pinned. For WB_SYNC_NONE, - * the writeback code will pin the sb for the caller. However, - * for eg umount, the caller does WB_SYNC_NONE but already has - * the sb pinned. If the below is set, caller already has the - * sb pinned. - */ - unsigned sb_pinned:1; }; /* @@ -82,7 +73,6 @@ struct writeback_control { struct bdi_writeback; int inode_wait(void *); void writeback_inodes_sb(struct super_block *); -void writeback_inodes_sb_locked(struct super_block *); int writeback_inodes_sb_if_idle(struct super_block *); void sync_inodes_sb(struct super_block *); void writeback_inodes_wbc(struct writeback_control *wbc); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b289310e2c89..5fa63bdf52e4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping, (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS)) > background_thresh))) - bdi_start_writeback(bdi, NULL, 0, 0); + bdi_start_writeback(bdi, NULL, 0); } void set_page_dirty_balance(struct page *page, int page_mkwrite) @@ -707,7 +707,7 @@ void laptop_mode_timer_fn(unsigned long data) */ if (bdi_has_dirty_io(&q->backing_dev_info)) - bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0); + bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); } /* -- cgit v1.2.3 From 099c5c310e9744bd0654881bb55c137051228e56 Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Thu, 27 May 2010 13:46:35 +0200 Subject: Preparing 8.3.8rc2 Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg Signed-off-by: Jens Axboe --- include/linux/drbd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 68530521ad00..30da4ae48972 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -53,7 +53,7 @@ extern const char *drbd_buildtag(void); -#define REL_VERSION "8.3.8rc1" +#define REL_VERSION "8.3.8rc2" #define API_VERSION 88 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 94 -- cgit v1.2.3 From 28f4197e5d4707311febeec8a0eb97cb5fd93c97 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 1 Jun 2010 12:23:18 +0200 Subject: block: disable preemption before using sched_clock() Commit 9195291e5f05e01d67f9a09c756b8aca8f009089 added calls to sched_clock() from preemptible code. sched_clock() is both the wrong interface AND cannot be called without preempt disabled. Apply a temporary fix to get rid of the warnings, a real patch is in the works. Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8b7f5e0914ad..09a840264d6f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1211,14 +1211,23 @@ struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); #ifdef CONFIG_BLK_CGROUP +/* + * This should not be using sched_clock(). A real patch is in progress + * to fix this up, until that is in place we need to disable preemption + * around sched_clock() in this function and set_io_start_time_ns(). + */ static inline void set_start_time_ns(struct request *req) { + preempt_disable(); req->start_time_ns = sched_clock(); + preempt_enable(); } static inline void set_io_start_time_ns(struct request *req) { + preempt_disable(); req->io_start_time_ns = sched_clock(); + preempt_enable(); } static inline uint64_t rq_start_time_ns(struct request *req) -- cgit v1.2.3 From e3a815fcd38043b8f1bb526123d8ab6ae01deb77 Mon Sep 17 00:00:00 2001 From: Zou Nan hai Date: Mon, 31 May 2010 13:58:47 +0800 Subject: drm/i915: add HAS_BSD check to i915_getparam This will let userland only try to use the new media decode functionality when the appropriate kernel is present. Signed-off-by: Zou Nan hai Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ include/drm/i915_drm.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6577798d3441..84ce95602f00 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -744,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data, /* depends on GEM */ value = dev_priv->has_gem; break; + case I915_PARAM_HAS_BSD: + value = HAS_BSD(dev); + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index e9168704cabe..7f0028e1010b 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_OVERLAY 7 #define I915_PARAM_HAS_PAGEFLIPPING 8 #define I915_PARAM_HAS_EXECBUF2 9 +#define I915_PARAM_HAS_BSD 10 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3 From 72ec24bd7725545bc149d80cbd21a7578d9aa206 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 15 May 2010 20:09:32 +0200 Subject: SCSI: implement sd_unlock_native_capacity() Implement sd_unlock_native_capacity() method which calls into hostt->unlock_native_capacity() if implemented. This will be invoked by block layer if partitions extend beyond the end of the device and can be used to implement, for example, on-demand ATA host protected area unlocking. Signed-off-by: Tejun Heo Cc: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/scsi/sd.c | 22 ++++++++++++++++++++++ include/scsi/scsi_host.h | 8 ++++++++ 2 files changed, 30 insertions(+) (limited to 'include') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 829cc37abc41..8802e48bc063 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -97,6 +97,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #endif static int sd_revalidate_disk(struct gendisk *); +static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); static int sd_remove(struct device *); static void sd_shutdown(struct device *); @@ -1101,6 +1102,7 @@ static const struct block_device_operations sd_fops = { #endif .media_changed = sd_media_changed, .revalidate_disk = sd_revalidate_disk, + .unlock_native_capacity = sd_unlock_native_capacity, }; static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) @@ -2120,6 +2122,26 @@ static int sd_revalidate_disk(struct gendisk *disk) return 0; } +/** + * sd_unlock_native_capacity - unlock native capacity + * @disk: struct gendisk to set capacity for + * + * Block layer calls this function if it detects that partitions + * on @disk reach beyond the end of the device. If the SCSI host + * implements ->unlock_native_capacity() method, it's invoked to + * give it a chance to adjust the device capacity. + * + * CONTEXT: + * Defined by block layer. Might sleep. + */ +static void sd_unlock_native_capacity(struct gendisk *disk) +{ + struct scsi_device *sdev = scsi_disk(disk)->device; + + if (sdev->host->hostt->unlock_native_capacity) + sdev->host->hostt->unlock_native_capacity(sdev); +} + /** * sd_format_disk_name - format disk name * @prefix: name prefix - ie. "sd" for SCSI disks diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index c50a97fc76f9..b7bdecb7b76e 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -326,6 +326,14 @@ struct scsi_host_template { int (* bios_param)(struct scsi_device *, struct block_device *, sector_t, int []); + /* + * This function is called when one or more partitions on the + * device reach beyond the end of the device. + * + * Status: OPTIONAL + */ + void (*unlock_native_capacity)(struct scsi_device *); + /* * Can be used to export driver statistics and other infos to the * world outside the kernel ie. userspace and it also provides an -- cgit v1.2.3 From d8d9129ea28e2177749627c82962feb26e8d11e9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 15 May 2010 20:09:34 +0200 Subject: libata: implement on-demand HPA unlocking Implement ata_scsi_unlock_native_capacity() which will be called through SCSI layer when block layer notices that partitions on a device extend beyond the end of the device. It requests EH to unlock HPA, waits for completion and returns the current device capacity. This allows libata to unlock HPA on demand instead of having to decide whether to unlock upfront. Unlocking on demand is safer than unlocking by upfront because some BIOSes write private data to the area beyond HPA limit. This was suggested by Ben Hutchings. Signed-off-by: Tejun Heo Suggested-by: Ben Hutchings Signed-off-by: Jeff Garzik --- drivers/ata/libata-core.c | 1 + drivers/ata/libata-scsi.c | 29 +++++++++++++++++++++++++++++ include/linux/libata.h | 2 ++ 3 files changed, 32 insertions(+) (limited to 'include') diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1e5d0a36a0a4..ddf8e4862787 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6668,6 +6668,7 @@ EXPORT_SYMBOL_GPL(ata_dummy_port_info); EXPORT_SYMBOL_GPL(ata_link_next); EXPORT_SYMBOL_GPL(ata_dev_next); EXPORT_SYMBOL_GPL(ata_std_bios_param); +EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); EXPORT_SYMBOL_GPL(ata_host_init); EXPORT_SYMBOL_GPL(ata_host_alloc); EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cfa9dd3d7253..a54273d2c3c6 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -414,6 +414,35 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, return 0; } +/** + * ata_scsi_unlock_native_capacity - unlock native capacity + * @sdev: SCSI device to adjust device capacity for + * + * This function is called if a partition on @sdev extends beyond + * the end of the device. It requests EH to unlock HPA. + * + * LOCKING: + * Defined by the SCSI layer. Might sleep. + */ +void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *dev; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + + dev = ata_scsi_find_dev(ap, sdev); + if (dev && dev->n_sectors < dev->n_native_sectors) { + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + dev->link->eh_info.action |= ATA_EH_RESET; + ata_port_schedule_eh(ap); + } + + spin_unlock_irqrestore(ap->lock, flags); + ata_port_wait_eh(ap); +} + /** * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl * @ap: target port diff --git a/include/linux/libata.h b/include/linux/libata.h index 3bad2701bfa6..b85f3ff34d7d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1023,6 +1023,7 @@ extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, extern int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); +extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, @@ -1174,6 +1175,7 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ + .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ .sdev_attrs = ata_common_sdev_attrs #define ATA_NCQ_SHT(drv_name) \ -- cgit v1.2.3 From c0db9cbc73338d8e2987a19a02388d67aeec0bfe Mon Sep 17 00:00:00 2001 From: Tiago Vignatti Date: Mon, 24 May 2010 18:24:31 +0300 Subject: vgaarb: use MIT license Signed-off-by: Tiago Vignatti Cc: Henry Zhao Signed-off-by: Dave Airlie --- drivers/gpu/vga/vgaarb.c | 26 +++++++++++++++++++++++--- include/linux/vgaarb.h | 21 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 290b0ccea63d..b87569e96b16 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1,12 +1,32 @@ /* - * vgaarb.c + * vgaarb.c: Implements the VGA arbitration. For details refer to + * Documentation/vgaarbiter.txt + * * * (C) Copyright 2005 Benjamin Herrenschmidt * (C) Copyright 2007 Paulo R. Zanoni * (C) Copyright 2007, 2009 Tiago Vignatti * - * Implements the VGA arbitration. For details refer to - * Documentation/vgaarbiter.txt + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. + * */ #include diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 2dfaa293ae8c..c9a975976995 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -5,6 +5,27 @@ * (C) Copyright 2005 Benjamin Herrenschmidt * (C) Copyright 2007 Paulo R. Zanoni * (C) Copyright 2007, 2009 Tiago Vignatti + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. + * */ #ifndef LINUX_VGA_H -- cgit v1.2.3 From d8bd19d2aff95e52c7f356cc2fc722584a656065 Mon Sep 17 00:00:00 2001 From: Jakob Bornecrantz Date: Tue, 1 Jun 2010 11:54:20 +0200 Subject: drm/vmwgfx: Allow userspace to change default layout. Bump minor. The host may change the layout and, since the change is communicated to the master, the master needs a way to communicate the change to the kernel driver. The minor version number is bumped to advertize the availability of this feature. Signed-off-by: Jakob Bornecrantz Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 7 +++- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 45 ++++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 83 ++++++++++++++++++++++++++++++++++--- include/drm/vmwgfx_drm.h | 26 ++++++++++++ 6 files changed, 160 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7597323d5a5a..b793c8c9acb3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -88,6 +88,9 @@ #define DRM_IOCTL_VMW_FENCE_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ struct drm_vmw_fence_wait_arg) +#define DRM_IOCTL_VMW_UPDATE_LAYOUT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ + struct drm_vmw_update_layout_arg) /** @@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, - DRM_AUTH | DRM_UNLOCKED) + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, + DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) }; static struct pci_device_id vmw_pci_id_list[] = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e577af5bb3d6..eaad52095339 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -41,7 +41,7 @@ #define VMWGFX_DRIVER_DATE "20100209" #define VMWGFX_DRIVER_MAJOR 1 -#define VMWGFX_DRIVER_MINOR 1 +#define VMWGFX_DRIVER_MINOR 2 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) @@ -509,6 +509,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bbp, unsigned depth); +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** * Overlay control - vmwgfx_overlay.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 648ba044f6f8..f1d626112415 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -938,3 +938,48 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv) return 0; } + +int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_update_layout_arg *arg = + (struct drm_vmw_update_layout_arg *)data; + struct vmw_master *vmaster = vmw_master(file_priv->master); + void __user *user_rects; + struct drm_vmw_rect *rects; + unsigned rects_size; + int ret; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + if (!arg->num_outputs) { + struct drm_vmw_rect def_rect = {0, 0, 800, 600}; + vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect); + goto out_unlock; + } + + rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); + rects = kzalloc(rects_size, GFP_KERNEL); + if (unlikely(!rects)) { + ret = -ENOMEM; + goto out_unlock; + } + + user_rects = (void __user *)(unsigned long)arg->rects; + ret = copy_from_user(rects, user_rects, rects_size); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to get rects.\n"); + goto out_free; + } + + vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects); + +out_free: + kfree(rects); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 8b95249f0531..8a398a0339b6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); /* - * Legacy display unit functions - vmwgfx_ldu.h + * Legacy display unit functions - vmwgfx_ldu.c */ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); +int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, + struct drm_vmw_rect *rects); #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index f7094dde18f9..cfaf690a5b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -49,6 +49,11 @@ struct vmw_legacy_display { struct vmw_legacy_display_unit { struct vmw_display_unit base; + unsigned pref_width; + unsigned pref_height; + bool pref_active; + struct drm_display_mode *pref_mode; + struct list_head active; }; @@ -332,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector) static enum drm_connector_status vmw_ldu_connector_detect(struct drm_connector *connector) { - /* XXX vmwctrl should control connection status */ - if (vmw_connector_to_ldu(connector)->base.unit == 0) + if (vmw_connector_to_ldu(connector)->pref_active) return connector_status_connected; return connector_status_disconnected; } @@ -344,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 800x600@60Hz */ - { DRM_MODE("800x600", - DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, - 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, + 968, 1056, 0, 600, 601, 605, 628, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, @@ -419,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = { static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, uint32_t max_width, uint32_t max_height) { + struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode = NULL; + struct drm_display_mode prefmode = { DRM_MODE("preferred", + DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) + }; int i; + /* Add preferred mode */ + { + mode = drm_mode_duplicate(dev, &prefmode); + if (!mode) + return 0; + mode->hdisplay = ldu->pref_width; + mode->vdisplay = ldu->pref_height; + mode->vrefresh = drm_mode_vrefresh(mode); + drm_mode_probed_add(connector, mode); + + if (ldu->pref_mode) { + list_del_init(&ldu->pref_mode->head); + drm_mode_destroy(dev, ldu->pref_mode); + } + + ldu->pref_mode = mode; + } + for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { if (vmw_ldu_connector_builtin[i].hdisplay > max_width || vmw_ldu_connector_builtin[i].vdisplay > max_height) @@ -482,6 +509,11 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) INIT_LIST_HEAD(&ldu->active); + ldu->pref_active = (unit == 0); + ldu->pref_width = 800; + ldu->pref_height = 600; + ldu->pref_mode = NULL; + drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_LVDS); connector->status = vmw_ldu_connector_detect(connector); @@ -546,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) return 0; } + +int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num, + struct drm_vmw_rect *rects) +{ + struct drm_device *dev = dev_priv->dev; + struct vmw_legacy_display_unit *ldu; + struct drm_connector *con; + int i; + + mutex_lock(&dev->mode_config.mutex); + +#if 0 + DRM_INFO("%s: new layout ", __func__); + for (i = 0; i < (int)num; i++) + DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, + rects[i].w, rects[i].h); + DRM_INFO("\n"); +#else + (void)i; +#endif + + list_for_each_entry(con, &dev->mode_config.connector_list, head) { + ldu = vmw_connector_to_ldu(con); + if (num > ldu->base.unit) { + ldu->pref_width = rects[ldu->base.unit].w; + ldu->pref_height = rects[ldu->base.unit].h; + ldu->pref_active = true; + } else { + ldu->pref_width = 800; + ldu->pref_height = 600; + ldu->pref_active = false; + } + con->status = vmw_ldu_connector_detect(con); + } + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index c7645f480d12..4d0842391edc 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -50,6 +50,8 @@ #define DRM_VMW_EXECBUF 12 #define DRM_VMW_FIFO_DEBUG 13 #define DRM_VMW_FENCE_WAIT 14 +/* guarded by minor version >= 2 */ +#define DRM_VMW_UPDATE_LAYOUT 15 /*************************************************************************/ @@ -585,4 +587,28 @@ struct drm_vmw_stream_arg { * sure that the stream has been stopped. */ +/*************************************************************************/ +/** + * DRM_VMW_UPDATE_LAYOUT - Update layout + * + * Updates the prefered modes and connection status for connectors. The + * command conisits of one drm_vmw_update_layout_arg pointing out a array + * of num_outputs drm_vmw_rect's. + */ + +/** + * struct drm_vmw_update_layout_arg + * + * @num_outputs: number of active + * @rects: pointer to array of drm_vmw_rect + * + * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. + */ + +struct drm_vmw_update_layout_arg { + uint32_t num_outputs; + uint32_t pad64; + uint64_t rects; +}; + #endif -- cgit v1.2.3 From ff9da691c0498ff81fdd014e7a0731dab2337dac Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 3 Jun 2010 14:54:39 +0200 Subject: pipe: change /proc/sys/fs/pipe-max-pages to byte sized interface This changes the interface to be based on bytes instead. The API matches that of F_SETPIPE_SZ in that it rounds up the passed in size so that the resulting page array is a power-of-2 in size. The proc file is renamed to /proc/sys/fs/pipe-max-size to reflect this change. Signed-off-by: Jens Axboe --- fs/pipe.c | 54 ++++++++++++++++++++++++++++++++++++----------- include/linux/pipe_fs_i.h | 4 +++- kernel/sysctl.c | 8 +++---- 3 files changed, 49 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/fs/pipe.c b/fs/pipe.c index f98fae3e36b0..69c4c7c13ea9 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -26,9 +26,14 @@ /* * The max size that a non-root user is allowed to grow the pipe. Can - * be set by root in /proc/sys/fs/pipe-max-pages + * be set by root in /proc/sys/fs/pipe-max-size */ -unsigned int pipe_max_pages = PIPE_DEF_BUFFERS * 16; +unsigned int pipe_max_size = 1048576; + +/* + * Minimum pipe size, as required by POSIX + */ +unsigned int pipe_min_size = PAGE_SIZE; /* * We use a start+len construction, which provides full use of the @@ -1156,6 +1161,35 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) return nr_pages * PAGE_SIZE; } +/* + * Currently we rely on the pipe array holding a power-of-2 number + * of pages. + */ +static inline unsigned int round_pipe_size(unsigned int size) +{ + unsigned long nr_pages; + + nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; +} + +/* + * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax + * will return an error. + */ +int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, + size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); + if (ret < 0 || !write) + return ret; + + pipe_max_size = round_pipe_size(pipe_max_size); + return ret; +} + long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; @@ -1169,23 +1203,19 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case F_SETPIPE_SZ: { - unsigned long nr_pages; + unsigned int size, nr_pages; - /* - * Currently the array must be a power-of-2 size, so adjust - * upwards if needed. - */ - nr_pages = (arg + PAGE_SIZE - 1) >> PAGE_SHIFT; - nr_pages = roundup_pow_of_two(nr_pages); + size = round_pipe_size(arg); + nr_pages = size >> PAGE_SHIFT; - if (!capable(CAP_SYS_RESOURCE) && nr_pages > pipe_max_pages) { + if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { ret = -EPERM; goto out; - } else if (nr_pages < 1) { + } else if (nr_pages < PAGE_SIZE) { ret = -EINVAL; goto out; } - ret = pipe_set_size(pipe, arg); + ret = pipe_set_size(pipe, nr_pages); break; } case F_GETPIPE_SZ: diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 16de3933c45e..445796945ac9 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -139,7 +139,9 @@ void pipe_lock(struct pipe_inode_info *); void pipe_unlock(struct pipe_inode_info *); void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); -extern unsigned int pipe_max_pages; +extern unsigned int pipe_max_size, pipe_min_size; +int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); + /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 997080f00e0b..d24f761f4876 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1471,12 +1471,12 @@ static struct ctl_table fs_table[] = { }, #endif { - .procname = "pipe-max-pages", - .data = &pipe_max_pages, + .procname = "pipe-max-size", + .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &two, + .proc_handler = &pipe_proc_fn, + .extra1 = &pipe_min_size, }, /* * NOTE: do not add new entries to this table unless you have read -- cgit v1.2.3 From 724df615928b7050d33b6243f60b12bd87484fc7 Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Wed, 26 May 2010 09:22:40 -0700 Subject: fix comment typo in netdevice.h Fix missing "of" in comment. Signed-off-by: Justin P. Mattock Signed-off-by: Jiri Kosina --- include/linux/netdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a1bff6518166..c761c903772e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -775,7 +775,7 @@ struct net_device { /* * This is the first field of the "visible" part of this structure * (i.e. as seen by users in the "Space.c" file). It is the name - * the interface. + * of the interface. */ char name[IFNAMSIZ]; -- cgit v1.2.3 From 7d683a09990ff095a91b6e724ecee0ff8733274a Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Thu, 3 Jun 2010 11:58:28 +0200 Subject: wrong type for 'magic' argument in simple_fill_super() It's used to superblock ->s_magic, which is unsigned long. Signed-off-by: Roberto Sassu Reviewed-by: Mimi Zohar Signed-off-by: Eric Paris CC: stable@kernel.org Signed-off-by: Al Viro --- fs/libfs.c | 3 ++- include/linux/fs.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/libfs.c b/fs/libfs.c index 09e1016eb774..dcaf972cbf1b 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -489,7 +489,8 @@ int simple_write_end(struct file *file, struct address_space *mapping, * unique inode values later for this filesystem, then you must take care * to pass it an appropriate max_reserved value to avoid collisions. */ -int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) +int simple_fill_super(struct super_block *s, unsigned long magic, + struct tree_descr *files) { struct inode *inode; struct dentry *root; diff --git a/include/linux/fs.h b/include/linux/fs.h index 3428393942a6..471e1ff5079a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2388,7 +2388,7 @@ extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; struct tree_descr { char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); -extern int simple_fill_super(struct super_block *, int, struct tree_descr *); +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); -- cgit v1.2.3 From 485d527686850d68a0e9006dd9904f19f122485e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 4 Jun 2010 14:14:58 -0700 Subject: sys_personality: change sys_personality() to accept "unsigned int" instead of u_long task_struct->pesonality is "unsigned int", but sys_personality() paths use "unsigned long pesonality". This means that every assignment or comparison is not right. In particular, if this argument does not fit into "unsigned int" __set_personality() changes the caller's personality and then sys_personality() returns -EINVAL. Turn this argument into "unsigned int" and avoid overflows. Obviously, this is the user-visible change, we just ignore the upper bits. But this can't break the sane application. There is another thing which can confuse the poorly written applications. User-space thinks that this syscall returns int, not long. This means that the returned value can be negative and look like the error code. But note that libc won't be confused and thus errno won't be set, and with this patch the user-space can never get -1 unless sys_personality() really fails. And, most importantly, the negative RET != -1 is only possible if that app previously called personality(RET). Pointed-out-by: Wenming Zhang Suggested-by: Linus Torvalds Signed-off-by: Oleg Nesterov Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/personality.h | 2 +- include/linux/syscalls.h | 2 +- kernel/exec_domain.c | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/personality.h b/include/linux/personality.h index 126120819a0d..eec3bae164d4 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -12,7 +12,7 @@ struct pt_regs; extern int register_exec_domain(struct exec_domain *); extern int unregister_exec_domain(struct exec_domain *); -extern int __set_personality(unsigned long); +extern int __set_personality(unsigned int); #endif /* __KERNEL__ */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a1a86a53bc73..7f614ce274a9 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -289,7 +289,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr); asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data); -asmlinkage long sys_personality(u_long personality); +asmlinkage long sys_personality(unsigned int personality); asmlinkage long sys_sigpending(old_sigset_t __user *set); asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index c35452cadded..dd62f8e714ca 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c @@ -27,7 +27,7 @@ static struct exec_domain *exec_domains = &default_exec_domain; static DEFINE_RWLOCK(exec_domains_lock); -static u_long ident_map[32] = { +static unsigned long ident_map[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -56,10 +56,10 @@ default_handler(int segment, struct pt_regs *regp) } static struct exec_domain * -lookup_exec_domain(u_long personality) +lookup_exec_domain(unsigned int personality) { - struct exec_domain * ep; - u_long pers = personality(personality); + unsigned int pers = personality(personality); + struct exec_domain *ep; read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) { @@ -70,7 +70,7 @@ lookup_exec_domain(u_long personality) #ifdef CONFIG_MODULES read_unlock(&exec_domains_lock); - request_module("personality-%ld", pers); + request_module("personality-%d", pers); read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) { @@ -135,7 +135,7 @@ unregister: } int -__set_personality(u_long personality) +__set_personality(unsigned int personality) { struct exec_domain *ep, *oep; @@ -188,9 +188,9 @@ static int __init proc_execdomains_init(void) module_init(proc_execdomains_init); #endif -SYSCALL_DEFINE1(personality, u_long, personality) +SYSCALL_DEFINE1(personality, unsigned int, personality) { - u_long old = current->personality; + unsigned int old = current->personality; if (personality != 0xffffffff) { set_personality(personality); @@ -198,7 +198,7 @@ SYSCALL_DEFINE1(personality, u_long, personality) return -EINVAL; } - return (long)old; + return old; } -- cgit v1.2.3 From 2c02dfe7fe3fba97a5665d329d039d2415ea5607 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 31 May 2010 12:19:37 -0700 Subject: module: Make the 'usage' lists be two-way When adding a module that depends on another one, we used to create a one-way list of "modules_which_use_me", so that module unloading could see who needs a module. It's actually quite simple to make that list go both ways: so that we not only can see "who uses me", but also see a list of modules that are "used by me". In fact, we always wanted that list in "module_unload_free()": when we unload a module, we want to also release all the other modules that are used by that module. But because we didn't have that list, we used to first iterate over all modules, and then iterate over each "used by me" list of that module. By making the list two-way, we simplify module_unload_free(), and it allows for some trivial fixes later too. Signed-off-by: Linus Torvalds Signed-off-by: Rusty Russell (cleaned & rebased) --- include/linux/module.h | 4 ++- kernel/module.c | 79 ++++++++++++++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 6914fcad4673..680db9e2ac36 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -359,7 +359,9 @@ struct module #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ - struct list_head modules_which_use_me; + struct list_head source_list; + /* What modules do I depend on? */ + struct list_head target_list; /* Who is waiting for us to be unloaded */ struct task_struct *waiter; diff --git a/kernel/module.c b/kernel/module.c index 0129769301e3..be18c3e34684 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -523,7 +523,8 @@ static void module_unload_init(struct module *mod) { int cpu; - INIT_LIST_HEAD(&mod->modules_which_use_me); + INIT_LIST_HEAD(&mod->source_list); + INIT_LIST_HEAD(&mod->target_list); for_each_possible_cpu(cpu) { per_cpu_ptr(mod->refptr, cpu)->incs = 0; per_cpu_ptr(mod->refptr, cpu)->decs = 0; @@ -538,8 +539,9 @@ static void module_unload_init(struct module *mod) /* modules using other modules */ struct module_use { - struct list_head list; - struct module *module_which_uses; + struct list_head source_list; + struct list_head target_list; + struct module *source, *target; }; /* Does a already use b? */ @@ -547,8 +549,8 @@ static int already_uses(struct module *a, struct module *b) { struct module_use *use; - list_for_each_entry(use, &b->modules_which_use_me, list) { - if (use->module_which_uses == a) { + list_for_each_entry(use, &b->source_list, source_list) { + if (use->source == a) { DEBUGP("%s uses %s!\n", a->name, b->name); return 1; } @@ -557,6 +559,33 @@ static int already_uses(struct module *a, struct module *b) return 0; } +/* + * Module a uses b + * - we add 'a' as a "source", 'b' as a "target" of module use + * - the module_use is added to the list of 'b' sources (so + * 'b' can walk the list to see who sourced them), and of 'a' + * targets (so 'a' can see what modules it targets). + */ +static int add_module_usage(struct module *a, struct module *b) +{ + int no_warn; + struct module_use *use; + + DEBUGP("Allocating new usage for %s.\n", a->name); + use = kmalloc(sizeof(*use), GFP_ATOMIC); + if (!use) { + printk(KERN_WARNING "%s: out of memory loading\n", a->name); + return -ENOMEM; + } + + use->source = a; + use->target = b; + list_add(&use->source_list, &b->source_list); + list_add(&use->target_list, &a->target_list); + no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name); + return 0; +} + /* Module a uses b */ int use_module(struct module *a, struct module *b) { @@ -578,17 +607,11 @@ int use_module(struct module *a, struct module *b) if (err) return 0; - DEBUGP("Allocating new usage for %s.\n", a->name); - use = kmalloc(sizeof(*use), GFP_ATOMIC); - if (!use) { - printk("%s: out of memory loading\n", a->name); + err = add_module_usage(a, b); + if (err) { module_put(b); return 0; } - - use->module_which_uses = a; - list_add(&use->list, &b->modules_which_use_me); - no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name); return 1; } EXPORT_SYMBOL_GPL(use_module); @@ -596,22 +619,16 @@ EXPORT_SYMBOL_GPL(use_module); /* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) { - struct module *i; - - list_for_each_entry(i, &modules, list) { - struct module_use *use; + struct module_use *use, *tmp; - list_for_each_entry(use, &i->modules_which_use_me, list) { - if (use->module_which_uses == mod) { - DEBUGP("%s unusing %s\n", mod->name, i->name); - module_put(i); - list_del(&use->list); - kfree(use); - sysfs_remove_link(i->holders_dir, mod->name); - /* There can be at most one match. */ - break; - } - } + list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { + struct module *i = use->target; + DEBUGP("%s unusing %s\n", mod->name, i->name); + module_put(i); + list_del(&use->source_list); + list_del(&use->target_list); + kfree(use); + sysfs_remove_link(i->holders_dir, mod->name); } } @@ -735,7 +752,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, goto out; } - if (!list_empty(&mod->modules_which_use_me)) { + if (!list_empty(&mod->source_list)) { /* Other modules depend on us: get rid of them first. */ ret = -EWOULDBLOCK; goto out; @@ -799,9 +816,9 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod) /* Always include a trailing , so userspace can differentiate between this and the old multi-field proc format. */ - list_for_each_entry(use, &mod->modules_which_use_me, list) { + list_for_each_entry(use, &mod->source_list, source_list) { printed_something = 1; - seq_printf(m, "%s,", use->module_which_uses->name); + seq_printf(m, "%s,", use->source->name); } if (mod->init != NULL && mod->exit == NULL) { -- cgit v1.2.3 From c8e21ced08b39ef8dfe7236fb2a923a95f645262 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 5 Jun 2010 11:17:35 -0600 Subject: module: fix kdb's illicit use of struct module_use. Linus changed the structure, and luckily this didn't compile any more. Reported-by: Stephen Rothwell Signed-off-by: Rusty Russell Cc: Jason Wessel Cc: Martin Hicks --- include/linux/module.h | 7 +++++++ kernel/debug/kdb/kdb_main.c | 12 +++--------- kernel/module.c | 11 +---------- 3 files changed, 11 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 680db9e2ac36..5d8fca5dcff5 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -181,6 +181,13 @@ void *__symbol_get(const char *symbol); void *__symbol_get_gpl(const char *symbol); #define symbol_get(x) ((typeof(&x))(__symbol_get(MODULE_SYMBOL_PREFIX #x))) +/* modules using other modules: kdb wants to see this. */ +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source, *target; +}; + #ifndef __GENKSYMS__ #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index b724c791b6d4..184cd8209c36 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1857,12 +1857,6 @@ static int kdb_ef(int argc, const char **argv) } #if defined(CONFIG_MODULES) -/* modules using other modules */ -struct module_use { - struct list_head list; - struct module *module_which_uses; -}; - /* * kdb_lsmod - This function implements the 'lsmod' command. Lists * currently loaded kernel modules. @@ -1894,9 +1888,9 @@ static int kdb_lsmod(int argc, const char **argv) { struct module_use *use; kdb_printf(" [ "); - list_for_each_entry(use, &mod->modules_which_use_me, - list) - kdb_printf("%s ", use->module_which_uses->name); + list_for_each_entry(use, &mod->source_list, + source_list) + kdb_printf("%s ", use->target->name); kdb_printf("]\n"); } #endif diff --git a/kernel/module.c b/kernel/module.c index be18c3e34684..bbb1d812c79c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -536,14 +536,6 @@ static void module_unload_init(struct module *mod) mod->waiter = current; } -/* modules using other modules */ -struct module_use -{ - struct list_head source_list; - struct list_head target_list; - struct module *source, *target; -}; - /* Does a already use b? */ static int already_uses(struct module *a, struct module *b) { @@ -589,8 +581,7 @@ static int add_module_usage(struct module *a, struct module *b) /* Module a uses b */ int use_module(struct module *a, struct module *b) { - struct module_use *use; - int no_warn, err; + int err; if (b == NULL || already_uses(a, b)) return 1; -- cgit v1.2.3 From 6407ebb271fc34440b306f305e1efb7685eece26 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 5 Jun 2010 11:17:36 -0600 Subject: module: Make module sysfs functions private. These were placed in the header in ef665c1a06 to get the various SYSFS/MODULE config combintations to compile. That may have been necessary then, but it's not now. These functions are all local to module.c. Signed-off-by: Rusty Russell Cc: Randy Dunlap --- include/linux/module.h | 33 --------------------------------- kernel/module.c | 29 +++++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/linux/module.h b/include/linux/module.h index 5d8fca5dcff5..8a6b9fdc7ffa 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -672,43 +672,10 @@ static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter) #endif /* CONFIG_MODULES */ -struct device_driver; #ifdef CONFIG_SYSFS -struct module; - extern struct kset *module_kset; extern struct kobj_type module_ktype; extern int module_sysfs_initialized; - -int mod_sysfs_init(struct module *mod); -int mod_sysfs_setup(struct module *mod, - struct kernel_param *kparam, - unsigned int num_params); -int module_add_modinfo_attrs(struct module *mod); -void module_remove_modinfo_attrs(struct module *mod); - -#else /* !CONFIG_SYSFS */ - -static inline int mod_sysfs_init(struct module *mod) -{ - return 0; -} - -static inline int mod_sysfs_setup(struct module *mod, - struct kernel_param *kparam, - unsigned int num_params) -{ - return 0; -} - -static inline int module_add_modinfo_attrs(struct module *mod) -{ - return 0; -} - -static inline void module_remove_modinfo_attrs(struct module *mod) -{ } - #endif /* CONFIG_SYSFS */ #define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) diff --git a/kernel/module.c b/kernel/module.c index c690d9885797..808aa18dd661 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1323,7 +1323,7 @@ static void del_usage_links(struct module *mod) #endif } -int module_add_modinfo_attrs(struct module *mod) +static int module_add_modinfo_attrs(struct module *mod) { struct module_attribute *attr; struct module_attribute *temp_attr; @@ -1349,7 +1349,7 @@ int module_add_modinfo_attrs(struct module *mod) return error; } -void module_remove_modinfo_attrs(struct module *mod) +static void module_remove_modinfo_attrs(struct module *mod) { struct module_attribute *attr; int i; @@ -1365,7 +1365,7 @@ void module_remove_modinfo_attrs(struct module *mod) kfree(mod->modinfo_attrs); } -int mod_sysfs_init(struct module *mod) +static int mod_sysfs_init(struct module *mod) { int err; struct kobject *kobj; @@ -1399,7 +1399,7 @@ out: return err; } -int mod_sysfs_setup(struct module *mod, +static int mod_sysfs_setup(struct module *mod, struct kernel_param *kparam, unsigned int num_params) { @@ -1445,6 +1445,27 @@ static void mod_sysfs_fini(struct module *mod) #else /* CONFIG_SYSFS */ +static inline int mod_sysfs_init(struct module *mod) +{ + return 0; +} + +static inline int mod_sysfs_setup(struct module *mod, + struct kernel_param *kparam, + unsigned int num_params) +{ + return 0; +} + +static inline int module_add_modinfo_attrs(struct module *mod) +{ + return 0; +} + +static inline void module_remove_modinfo_attrs(struct module *mod) +{ +} + static void mod_sysfs_fini(struct module *mod) { } -- cgit v1.2.3 From abbceff7d7a884968e876e52578da1db4a4f6b54 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 10 May 2010 15:15:12 -0400 Subject: swiotlb: add the swiotlb initialization function with iotlb memory This enables the caller to initialize swiotlb with its own iotlb memory. See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for full description of patchset. [v2: changed ..with_tlb to ..with_tbl] Signed-off-by: FUJITA Tomonori Reviewed-by: Konrad Rzeszutek Wilk Tested-by: Albert Herranz --- include/linux/swiotlb.h | 1 + lib/swiotlb.c | 48 ++++++++++++++++++++++++++++++------------------ 2 files changed, 31 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 81a4e213c6cf..b406261d8887 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -23,6 +23,7 @@ extern int swiotlb_force; #define IO_TLB_SHIFT 11 extern void swiotlb_init(int verbose); +extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 783aff00024c..ec61e1507d0a 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -140,28 +140,14 @@ void swiotlb_print_info(void) (unsigned long long)pend); } -/* - * Statically reserve bounce buffer space and initialize bounce buffer data - * structures for the software IO TLB used to implement the DMA API. - */ -void __init -swiotlb_init_with_default_size(size_t default_size, int verbose) +void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { unsigned long i, bytes; - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } + bytes = nslabs << IO_TLB_SHIFT; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; - - /* - * Get IO TLB memory from the low pages - */ - io_tlb_start = alloc_bootmem_low_pages(bytes); - if (!io_tlb_start) - panic("Cannot allocate SWIOTLB buffer"); + io_tlb_nslabs = nslabs; + io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; /* @@ -185,6 +171,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) swiotlb_print_info(); } +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the software IO TLB used to implement the DMA API. + */ +void __init +swiotlb_init_with_default_size(size_t default_size, int verbose) +{ + unsigned long bytes; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + bytes = io_tlb_nslabs << IO_TLB_SHIFT; + + /* + * Get IO TLB memory from the low pages + */ + io_tlb_start = alloc_bootmem_low_pages(bytes); + if (!io_tlb_start) + panic("Cannot allocate SWIOTLB buffer"); + + swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); +} + void __init swiotlb_init(int verbose) { -- cgit v1.2.3 From 22d48269984fc93a71f65a54aa422aacf5fdb926 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Mon, 10 May 2010 15:01:15 -0500 Subject: swiotlb: search and replace "int dir" with "enum dma_data_direction dir" .. to catch anybody doing something funky. See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for full description of patchset. [v2: swiotlb_sync_single_range_* no more - removed usage] [v3: enum dma_data_direction direction -> enum dma_data_direction dir] Signed-off-by: Konrad Rzeszutek Wilk Acked-by: FUJITA Tomonori Tested-by: Albert Herranz --- include/linux/swiotlb.h | 4 ++-- lib/swiotlb.c | 23 +++++++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b406261d8887..250d766f17f7 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -43,11 +43,11 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction); + enum dma_data_direction dir); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - int direction); + enum dma_data_direction dir); extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1fc15bf63945..5f60157f31d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -374,7 +374,8 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, } void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t phys, size_t size, int dir) + phys_addr_t phys, size_t size, + enum dma_data_direction dir) { unsigned long flags; char *dma_addr; @@ -481,7 +482,8 @@ found: */ static void * -map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) +map_single(struct device *hwdev, phys_addr_t phys, size_t size, + enum dma_data_direction dir) { dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); @@ -493,7 +495,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) */ static void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, - int dir) + enum dma_data_direction dir) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -534,7 +536,7 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, static void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, - int dir, int target) + enum dma_data_direction dir, int target) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; @@ -624,7 +626,8 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, EXPORT_SYMBOL(swiotlb_free_coherent); static void -swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) +swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, + int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. @@ -702,7 +705,7 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); * whatever the device wrote there. */ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); @@ -745,7 +748,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); */ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir, int target) + size_t size, enum dma_data_direction dir, int target) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); @@ -832,7 +835,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs); int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) + enum dma_data_direction dir) { return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); } @@ -859,7 +862,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir) + enum dma_data_direction dir) { return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); } @@ -874,7 +877,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); */ static void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir, int target) + int nelems, enum dma_data_direction dir, int target) { struct scatterlist *sg; int i; -- cgit v1.2.3 From d7ef1533a90f432615d25729c2477bac9e72051d Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Fri, 28 May 2010 11:37:10 -0400 Subject: swiotlb: Make swiotlb bookkeeping functions visible in the header file. We put the functions dealing with the operations on the SWIOTLB buffer in the header and make those functions non-static. And also make the functions exported via EXPORT_SYMBOL_GPL. See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for full description of patchset. [v2: swiotlb_sync_single_range_for_* no more. Remove usage.] Signed-off-by: Konrad Rzeszutek Wilk Acked-by: FUJITA Tomonori Tested-by: Albert Herranz --- include/linux/swiotlb.h | 22 ++++++++++++++++++++++ lib/swiotlb.c | 29 ++++++++++++++--------------- 2 files changed, 36 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 250d766f17f7..8c0e349f4a6c 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -25,6 +25,28 @@ extern int swiotlb_force; extern void swiotlb_init(int verbose); extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); +/* + * Enumeration for sync targets + */ +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; +extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, + phys_addr_t phys, size_t size, + enum dma_data_direction dir); + +extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir); + +extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target); + +/* Accessory functions. */ +extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, + enum dma_data_direction dir); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 5f60157f31d8..34e3082632d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -50,14 +50,6 @@ */ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) -/* - * Enumeration for sync targets - */ -enum dma_sync_target { - SYNC_FOR_CPU = 0, - SYNC_FOR_DEVICE = 1, -}; - int swiotlb_force; /* @@ -335,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr) /* * Bounce: copy the swiotlb buffer back to the original dma location */ -static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, - enum dma_data_direction dir) +void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, + enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(phys); @@ -372,6 +364,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, memcpy(phys_to_virt(phys), dma_addr, size); } } +EXPORT_SYMBOL_GPL(swiotlb_bounce); void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, @@ -476,6 +469,7 @@ found: return dma_addr; } +EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); /* * Allocates bounce buffer and returns its kernel virtual address. @@ -493,7 +487,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, /* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ -static void +void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, enum dma_data_direction dir) { @@ -533,10 +527,12 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, } spin_unlock_irqrestore(&io_tlb_lock, flags); } +EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); -static void +void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, - enum dma_data_direction dir, int target) + enum dma_data_direction dir, + enum dma_sync_target target) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t phys = io_tlb_orig_addr[index]; @@ -560,6 +556,7 @@ swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, BUG(); } } +EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, @@ -748,7 +745,8 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); */ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, int target) + size_t size, enum dma_data_direction dir, + enum dma_sync_target target) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); @@ -877,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); */ static void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, int target) + int nelems, enum dma_data_direction dir, + enum dma_sync_target target) { struct scatterlist *sg; int i; -- cgit v1.2.3 From 4daedcfe8c6851aa01cc1997220f2577f4039c13 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 3 Jun 2010 11:57:04 +0200 Subject: ahci: add pci quirk for JMB362 JMB362 is a new variant of jmicron controller which is similar to JMB360 but has two SATA ports instead of one. As there is no PATA port, single function AHCI mode can be used as in JMB360. Add pci quirk for JMB362. Signed-off-by: Tejun Heo Reported-by: Aries Lee Cc: stable@kernel.org Signed-off-by: Jeff Garzik --- drivers/pci/quirks.c | 5 ++++- include/linux/pci_ids.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b7512cf08c58..477345d41641 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1457,7 +1457,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) conf5 &= ~(1 << 24); /* Clear bit 24 */ switch (pdev->device) { - case PCI_DEVICE_ID_JMICRON_JMB360: + case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ + case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ /* The controller should be in single function ahci mode */ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ break; @@ -1493,12 +1494,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f149dd10908b..4eb467910a45 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2321,6 +2321,7 @@ #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 -- cgit v1.2.3 From 148a03bc0b0e3ef153d0cade7bc88e9b14edfb7a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 3 Jun 2010 19:00:03 -0400 Subject: drm/radeon/kms/evergreen: set accel_enabled This is needed to enable accel in the ddx. However, due to a bug in older versions of the ddx, it relies on accel being disabled in order to load properly on evergreen chips. To maintain compatility, we add a new get accel param and call that from the ddx. The old one always returns false for evergreen cards. [this fixes a regression with older userspaces on newer kernels]. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/radeon_drv.c | 3 ++- drivers/gpu/drm/radeon/radeon_kms.c | 9 ++++++++- include/drm/radeon_drm.h | 1 + 4 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0440c0939bdd..49c94aef0dda 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2153,7 +2153,7 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - rdev->accel_working = false; + rdev->accel_working = true; r = evergreen_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 902d1731a652..e166fe4d7c30 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -45,9 +45,10 @@ * - 2.2.0 - add r6xx/r7xx const buffer support * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs * - 2.4.0 - add crtc id query + * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 4 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 04068352ccd2..6a70c0dc7f92 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -118,7 +118,11 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) value = rdev->num_z_pipes; break; case RADEON_INFO_ACCEL_WORKING: - value = rdev->accel_working; + /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ + if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) + value = false; + else + value = rdev->accel_working; break; case RADEON_INFO_CRTC_FROM_ID: for (i = 0, found = 0; i < rdev->num_crtc; i++) { @@ -134,6 +138,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_ACCEL_WORKING2: + value = rdev->accel_working; + break; default: DRM_DEBUG("Invalid request %d\n", info->request); return -EINVAL; diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 3ff9fc071dfe..5347063e9d5a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -903,6 +903,7 @@ struct drm_radeon_cs { #define RADEON_INFO_NUM_Z_PIPES 0x02 #define RADEON_INFO_ACCEL_WORKING 0x03 #define RADEON_INFO_CRTC_FROM_ID 0x04 +#define RADEON_INFO_ACCEL_WORKING2 0x05 struct drm_radeon_info { uint32_t request; -- cgit v1.2.3 From dc61b1d65e353d638b2445f71fb8e5b5630f2415 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Jun 2010 11:40:42 +0200 Subject: sched: Fix PROVE_RCU vs cpu_cgroup PROVE_RCU has a few issues with the cpu_cgroup because the scheduler typically holds rq->lock around the css rcu derefs but the generic cgroup code doesn't (and can't) know about that lock. Provide means to add extra checks to the css dereference and use that in the scheduler to annotate its users. The addition of rq->lock to these checks is correct because the cgroup_subsys::attach() method takes the rq->lock for each task it moves, therefore by holding that lock, we ensure the task is pinned to the current cgroup and the RCU derefence is valid. That leaves one genuine race in __sched_setscheduler() where we used task_group() without holding any of the required locks and thus raced with the cgroup code. Solve this by moving the check under the appropriate lock. Signed-off-by: Peter Zijlstra Cc: "Paul E. McKenney" LKML-Reference: Signed-off-by: Ingo Molnar --- include/linux/cgroup.h | 20 ++++++--- kernel/sched.c | 115 +++++++++++++++++++++++++------------------------ 2 files changed, 73 insertions(+), 62 deletions(-) (limited to 'include') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0c621604baa1..e3d00fdb858d 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -525,13 +525,21 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state( return cgrp->subsys[subsys_id]; } -static inline struct cgroup_subsys_state *task_subsys_state( - struct task_struct *task, int subsys_id) +/* + * function to get the cgroup_subsys_state which allows for extra + * rcu_dereference_check() conditions, such as locks used during the + * cgroup_subsys::attach() methods. + */ +#define task_subsys_state_check(task, subsys_id, __c) \ + rcu_dereference_check(task->cgroups->subsys[subsys_id], \ + rcu_read_lock_held() || \ + lockdep_is_held(&task->alloc_lock) || \ + cgroup_lock_is_held() || (__c)) + +static inline struct cgroup_subsys_state * +task_subsys_state(struct task_struct *task, int subsys_id) { - return rcu_dereference_check(task->cgroups->subsys[subsys_id], - rcu_read_lock_held() || - lockdep_is_held(&task->alloc_lock) || - cgroup_lock_is_held()); + return task_subsys_state_check(task, subsys_id, false); } static inline struct cgroup* task_cgroup(struct task_struct *task, diff --git a/kernel/sched.c b/kernel/sched.c index f8b8996228dd..2aaceebd484c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -306,52 +306,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD; */ struct task_group init_task_group; -/* return group to which a task belongs */ -static inline struct task_group *task_group(struct task_struct *p) -{ - struct task_group *tg; - -#ifdef CONFIG_CGROUP_SCHED - tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), - struct task_group, css); -#else - tg = &init_task_group; -#endif - return tg; -} - -/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) -{ - /* - * Strictly speaking this rcu_read_lock() is not needed since the - * task_group is tied to the cgroup, which in turn can never go away - * as long as there are tasks attached to it. - * - * However since task_group() uses task_subsys_state() which is an - * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. - */ - rcu_read_lock(); -#ifdef CONFIG_FAIR_GROUP_SCHED - p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; - p->se.parent = task_group(p)->se[cpu]; -#endif - -#ifdef CONFIG_RT_GROUP_SCHED - p->rt.rt_rq = task_group(p)->rt_rq[cpu]; - p->rt.parent = task_group(p)->rt_se[cpu]; -#endif - rcu_read_unlock(); -} - -#else - -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } -static inline struct task_group *task_group(struct task_struct *p) -{ - return NULL; -} - #endif /* CONFIG_CGROUP_SCHED */ /* CFS-related fields in a runqueue */ @@ -644,6 +598,49 @@ static inline int cpu_of(struct rq *rq) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() (&__raw_get_cpu_var(runqueues)) +#ifdef CONFIG_CGROUP_SCHED + +/* + * Return the group to which this tasks belongs. + * + * We use task_subsys_state_check() and extend the RCU verification + * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() + * holds that lock for each task it moves into the cgroup. Therefore + * by holding that lock, we pin the task to the current cgroup. + */ +static inline struct task_group *task_group(struct task_struct *p) +{ + struct cgroup_subsys_state *css; + + css = task_subsys_state_check(p, cpu_cgroup_subsys_id, + lockdep_is_held(&task_rq(p)->lock)); + return container_of(css, struct task_group, css); +} + +/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ +static inline void set_task_rq(struct task_struct *p, unsigned int cpu) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; + p->se.parent = task_group(p)->se[cpu]; +#endif + +#ifdef CONFIG_RT_GROUP_SCHED + p->rt.rt_rq = task_group(p)->rt_rq[cpu]; + p->rt.parent = task_group(p)->rt_se[cpu]; +#endif +} + +#else /* CONFIG_CGROUP_SCHED */ + +static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } +static inline struct task_group *task_group(struct task_struct *p) +{ + return NULL; +} + +#endif /* CONFIG_CGROUP_SCHED */ + inline void update_rq_clock(struct rq *rq) { if (!rq->skip_clock_update) @@ -4465,16 +4462,6 @@ recheck: } if (user) { -#ifdef CONFIG_RT_GROUP_SCHED - /* - * Do not allow realtime tasks into groups that have no runtime - * assigned. - */ - if (rt_bandwidth_enabled() && rt_policy(policy) && - task_group(p)->rt_bandwidth.rt_runtime == 0) - return -EPERM; -#endif - retval = security_task_setscheduler(p, policy, param); if (retval) return retval; @@ -4490,6 +4477,22 @@ recheck: * runqueue lock must be held. */ rq = __task_rq_lock(p); + +#ifdef CONFIG_RT_GROUP_SCHED + if (user) { + /* + * Do not allow realtime tasks into groups that have no runtime + * assigned. + */ + if (rt_bandwidth_enabled() && rt_policy(policy) && + task_group(p)->rt_bandwidth.rt_runtime == 0) { + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return -EPERM; + } + } +#endif + /* recheck policy now with rq lock held */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; -- cgit v1.2.3 From b9b76dfaac6fa2c289ee8a005be637afd2da7e2f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 3 Jun 2010 23:34:09 +0200 Subject: tracing: Fix null pointer deref with SEND_SIG_FORCED BUG: unable to handle kernel NULL pointer dereference at 0000000000000006 IP: [] ftrace_raw_event_signal_generate+0x87/0x140 TP_STORE_SIGINFO() forgets about SEND_SIG_FORCED, fix. We should probably export is_si_special() and change TP_STORE_SIGINFO() to use it in the longer term. Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Cc: Steven Rostedt Cc: Andrew Morton Cc: Jason Baron Cc: Masami Hiramatsu Cc: 2.6.33.x-2.6.34.x LKML-Reference: <20100603213409.GA8307@redhat.com> Signed-off-by: Frederic Weisbecker --- include/trace/events/signal.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 814566c99d29..17df43464df0 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -10,7 +10,8 @@ #define TP_STORE_SIGINFO(__entry, info) \ do { \ - if (info == SEND_SIG_NOINFO) { \ + if (info == SEND_SIG_NOINFO || \ + info == SEND_SIG_FORCED) { \ __entry->errno = 0; \ __entry->code = SI_USER; \ } else if (info == SEND_SIG_PRIV) { \ -- cgit v1.2.3 From 0b5649278e39a068aaf91399941bab1b4a4a3cc2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 9 Jun 2010 10:37:18 +1000 Subject: writeback: pay attention to wbc->nr_to_write in write_cache_pages If a filesystem writes more than one page in ->writepage, write_cache_pages fails to notice this and continues to attempt writeback when wbc->nr_to_write has gone negative - this trace was captured from XFS: wbc_writeback_start: towrt=1024 wbc_writepage: towrt=1024 wbc_writepage: towrt=0 wbc_writepage: towrt=-1 wbc_writepage: towrt=-5 wbc_writepage: towrt=-21 wbc_writepage: towrt=-85 This has adverse effects on filesystem writeback behaviour. write_cache_pages() needs to terminate after a certain number of pages are written, not after a certain number of calls to ->writepage are made. This is a regression introduced by 17bc6c30cf6bfffd816bdc53682dd46fc34a2cf4 ("vfs: Add no_nrwrite_index_update writeback control flag"), but cannot be reverted directly due to subsequent bug fixes that have gone in on top of it. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- include/linux/writeback.h | 9 --------- include/trace/events/ext4.h | 5 +---- mm/page-writeback.c | 15 +++++---------- 3 files changed, 6 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/writeback.h b/include/linux/writeback.h index f64134653a8c..d63ef8f9609f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -56,15 +56,6 @@ struct writeback_control { unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned more_io:1; /* more io to be dispatched */ - /* - * write_cache_pages() won't update wbc->nr_to_write and - * mapping->writeback_index if no_nrwrite_index_update - * is set. write_cache_pages() may write more than we - * requested and we want to make sure nr_to_write and - * writeback_index are updated in a consistent manner - * so we use a single control to update them - */ - unsigned no_nrwrite_index_update:1; }; /* diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index f5b1ba90e952..f3865c7b4166 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -306,7 +306,6 @@ TRACE_EVENT(ext4_da_writepages_result, __field( int, pages_written ) __field( long, pages_skipped ) __field( char, more_io ) - __field( char, no_nrwrite_index_update ) __field( pgoff_t, writeback_index ) ), @@ -317,16 +316,14 @@ TRACE_EVENT(ext4_da_writepages_result, __entry->pages_written = pages_written; __entry->pages_skipped = wbc->pages_skipped; __entry->more_io = wbc->more_io; - __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update; __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld more_io %d no_nrwrite_index_update %d writeback_index %lu", + TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu", jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, __entry->ret, __entry->pages_written, __entry->pages_skipped, __entry->more_io, - __entry->no_nrwrite_index_update, (unsigned long) __entry->writeback_index) ); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5fa63bdf52e4..b3dbb8040ed5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -835,7 +835,6 @@ int write_cache_pages(struct address_space *mapping, pgoff_t done_index; int cycled; int range_whole = 0; - long nr_to_write = wbc->nr_to_write; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { @@ -935,11 +934,10 @@ continue_unlock: done = 1; break; } - } + } - if (nr_to_write > 0) { - nr_to_write--; - if (nr_to_write == 0 && + if (wbc->nr_to_write > 0) { + if (--wbc->nr_to_write == 0 && wbc->sync_mode == WB_SYNC_NONE) { /* * We stop writing back only if we are @@ -970,11 +968,8 @@ continue_unlock: end = writeback_index - 1; goto retry; } - if (!wbc->no_nrwrite_index_update) { - if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) - mapping->writeback_index = done_index; - wbc->nr_to_write = nr_to_write; - } + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + mapping->writeback_index = done_index; return ret; } -- cgit v1.2.3 From 79907d89c397b8bc2e05b347ec94e928ea919d33 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Wed, 9 Jun 2010 09:39:49 +0100 Subject: misc: Fix allocation 'borrowed' by vhost_net 10, 233 is allocated officially to /dev/kmview which is shipping in Ubuntu and Debian distributions. vhost_net seem to have borrowed it without making a proper request and this causes regressions in the other distributions. vhost_net can use a dynamic minor so use that instead. Also update the file with a comment to try and avoid future misunderstandings. cc: stable@kernel.org Signed-off-by: Alan Cox [ We should have caught this before 2.6.34 got released. - Linus ] Signed-off-by: Linus Torvalds --- drivers/vhost/net.c | 2 +- include/linux/miscdevice.h | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 0f41c9195e9b..df5b6b971f26 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -637,7 +637,7 @@ const static struct file_operations vhost_net_fops = { }; static struct miscdevice vhost_net_misc = { - VHOST_NET_MINOR, + MISC_DYNAMIC_MINOR, "vhost-net", &vhost_net_fops, }; diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index b631c46cffd9..f6c9b7dcb9fd 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -3,6 +3,12 @@ #include #include +/* + * These allocations are managed by device@lanana.org. If you use an + * entry that is not in assigned your entry may well be moved and + * reassigned, or set dynamic if a fixed value is not justified. + */ + #define PSMOUSE_MINOR 1 #define MS_BUSMOUSE_MINOR 2 #define ATIXL_BUSMOUSE_MINOR 3 @@ -30,7 +36,6 @@ #define HPET_MINOR 228 #define FUSE_MINOR 229 #define KVM_MINOR 232 -#define VHOST_NET_MINOR 233 #define BTRFS_MINOR 234 #define AUTOFS_MINOR 235 #define MISC_DYNAMIC_MINOR 255 -- cgit v1.2.3 From dd4c4f17d722ffeb2515bf781400675a30fcead7 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 28 May 2010 16:32:14 -0400 Subject: suspend: Move NVS save/restore code to generic suspend functionality Saving platform non-volatile state may be required for suspend to RAM as well as hibernation. Move it to more generic code. Signed-off-by: Matthew Garrett Acked-by: Rafael J. Wysocki Tested-by: Maxim Levitsky Signed-off-by: Len Brown --- arch/x86/kernel/e820.c | 2 +- drivers/acpi/sleep.c | 12 ++-- include/linux/suspend.h | 26 ++++----- kernel/power/Kconfig | 9 +-- kernel/power/Makefile | 2 +- kernel/power/hibernate_nvs.c | 136 ------------------------------------------- kernel/power/nvs.c | 136 +++++++++++++++++++++++++++++++++++++++++++ kernel/power/suspend.c | 6 ++ 8 files changed, 168 insertions(+), 161 deletions(-) delete mode 100644 kernel/power/hibernate_nvs.c create mode 100644 kernel/power/nvs.c (limited to 'include') diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 7bca3c6a02fb..0d6fc71bedb1 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -729,7 +729,7 @@ static int __init e820_mark_nvs_memory(void) struct e820entry *ei = &e820.map[i]; if (ei->type == E820_NVS) - hibernate_nvs_register(ei->addr, ei->size); + suspend_nvs_register(ei->addr, ei->size); } return 0; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 4ab2275b4461..bcaa6efa8136 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -393,7 +393,7 @@ static int acpi_hibernation_begin(void) { int error; - error = s4_no_nvs ? 0 : hibernate_nvs_alloc(); + error = s4_no_nvs ? 0 : suspend_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_sleep_tts_switch(acpi_target_sleep_state); @@ -407,7 +407,7 @@ static int acpi_hibernation_pre_snapshot(void) int error = acpi_pm_prepare(); if (!error) - hibernate_nvs_save(); + suspend_nvs_save(); return error; } @@ -432,7 +432,7 @@ static int acpi_hibernation_enter(void) static void acpi_hibernation_finish(void) { - hibernate_nvs_free(); + suspend_nvs_free(); acpi_pm_finish(); } @@ -452,7 +452,7 @@ static void acpi_hibernation_leave(void) panic("ACPI S4 hardware signature mismatch"); } /* Restore the NVS memory area */ - hibernate_nvs_restore(); + suspend_nvs_restore(); } static int acpi_pm_pre_restore(void) @@ -501,7 +501,7 @@ static int acpi_hibernation_begin_old(void) if (!error) { if (!s4_no_nvs) - error = hibernate_nvs_alloc(); + error = suspend_nvs_alloc(); if (!error) acpi_target_sleep_state = ACPI_STATE_S4; } @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void) int error = acpi_pm_disable_gpes(); if (!error) - hibernate_nvs_save(); + suspend_nvs_save(); return error; } diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5e781d824e6d..bc7d6bb4cd8e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -256,22 +256,22 @@ static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } #endif /* CONFIG_HIBERNATION */ -#ifdef CONFIG_HIBERNATION_NVS -extern int hibernate_nvs_register(unsigned long start, unsigned long size); -extern int hibernate_nvs_alloc(void); -extern void hibernate_nvs_free(void); -extern void hibernate_nvs_save(void); -extern void hibernate_nvs_restore(void); -#else /* CONFIG_HIBERNATION_NVS */ -static inline int hibernate_nvs_register(unsigned long a, unsigned long b) +#ifdef CONFIG_SUSPEND_NVS +extern int suspend_nvs_register(unsigned long start, unsigned long size); +extern int suspend_nvs_alloc(void); +extern void suspend_nvs_free(void); +extern void suspend_nvs_save(void); +extern void suspend_nvs_restore(void); +#else /* CONFIG_SUSPEND_NVS */ +static inline int suspend_nvs_register(unsigned long a, unsigned long b) { return 0; } -static inline int hibernate_nvs_alloc(void) { return 0; } -static inline void hibernate_nvs_free(void) {} -static inline void hibernate_nvs_save(void) {} -static inline void hibernate_nvs_restore(void) {} -#endif /* CONFIG_HIBERNATION_NVS */ +static inline int suspend_nvs_alloc(void) { return 0; } +static inline void suspend_nvs_free(void) {} +static inline void suspend_nvs_save(void) {} +static inline void suspend_nvs_restore(void) {} +#endif /* CONFIG_SUSPEND_NVS */ #ifdef CONFIG_PM_SLEEP void save_processor_state(void); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 5c36ea9d55d2..ca6066a6952e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -99,9 +99,13 @@ config PM_SLEEP_ADVANCED_DEBUG depends on PM_ADVANCED_DEBUG default n +config SUSPEND_NVS + bool + config SUSPEND bool "Suspend to RAM and standby" depends on PM && ARCH_SUSPEND_POSSIBLE + select SUSPEND_NVS if HAS_IOMEM default y ---help--- Allow the system to enter sleep states in which main memory is @@ -130,13 +134,10 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. -config HIBERNATION_NVS - bool - config HIBERNATION bool "Hibernation (aka 'suspend to disk')" depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE - select HIBERNATION_NVS if HAS_IOMEM + select SUSPEND_NVS if HAS_IOMEM ---help--- Enable the suspend to disk (STD) functionality, which is usually called "hibernation" in user interfaces. STD checkpoints the diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 524e058dcf06..f9063c6b185d 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -10,6 +10,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o -obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o +obj-$(CONFIG_SUSPEND_NVS) += nvs.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c deleted file mode 100644 index fdcad9ed5a7b..000000000000 --- a/kernel/power/hibernate_nvs.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory - * - * Copyright (C) 2008,2009 Rafael J. Wysocki , Novell Inc. - * - * This file is released under the GPLv2. - */ - -#include -#include -#include -#include -#include -#include - -/* - * Platforms, like ACPI, may want us to save some memory used by them during - * hibernation and to restore the contents of this memory during the subsequent - * resume. The code below implements a mechanism allowing us to do that. - */ - -struct nvs_page { - unsigned long phys_start; - unsigned int size; - void *kaddr; - void *data; - struct list_head node; -}; - -static LIST_HEAD(nvs_list); - -/** - * hibernate_nvs_register - register platform NVS memory region to save - * @start - physical address of the region - * @size - size of the region - * - * The NVS region need not be page-aligned (both ends) and we arrange - * things so that the data from page-aligned addresses in this region will - * be copied into separate RAM pages. - */ -int hibernate_nvs_register(unsigned long start, unsigned long size) -{ - struct nvs_page *entry, *next; - - while (size > 0) { - unsigned int nr_bytes; - - entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); - if (!entry) - goto Error; - - list_add_tail(&entry->node, &nvs_list); - entry->phys_start = start; - nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); - entry->size = (size < nr_bytes) ? size : nr_bytes; - - start += entry->size; - size -= entry->size; - } - return 0; - - Error: - list_for_each_entry_safe(entry, next, &nvs_list, node) { - list_del(&entry->node); - kfree(entry); - } - return -ENOMEM; -} - -/** - * hibernate_nvs_free - free data pages allocated for saving NVS regions - */ -void hibernate_nvs_free(void) -{ - struct nvs_page *entry; - - list_for_each_entry(entry, &nvs_list, node) - if (entry->data) { - free_page((unsigned long)entry->data); - entry->data = NULL; - if (entry->kaddr) { - iounmap(entry->kaddr); - entry->kaddr = NULL; - } - } -} - -/** - * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions - */ -int hibernate_nvs_alloc(void) -{ - struct nvs_page *entry; - - list_for_each_entry(entry, &nvs_list, node) { - entry->data = (void *)__get_free_page(GFP_KERNEL); - if (!entry->data) { - hibernate_nvs_free(); - return -ENOMEM; - } - } - return 0; -} - -/** - * hibernate_nvs_save - save NVS memory regions - */ -void hibernate_nvs_save(void) -{ - struct nvs_page *entry; - - printk(KERN_INFO "PM: Saving platform NVS memory\n"); - - list_for_each_entry(entry, &nvs_list, node) - if (entry->data) { - entry->kaddr = ioremap(entry->phys_start, entry->size); - memcpy(entry->data, entry->kaddr, entry->size); - } -} - -/** - * hibernate_nvs_restore - restore NVS memory regions - * - * This function is going to be called with interrupts disabled, so it - * cannot iounmap the virtual addresses used to access the NVS region. - */ -void hibernate_nvs_restore(void) -{ - struct nvs_page *entry; - - printk(KERN_INFO "PM: Restoring platform NVS memory\n"); - - list_for_each_entry(entry, &nvs_list, node) - if (entry->data) - memcpy(entry->kaddr, entry->data, entry->size); -} diff --git a/kernel/power/nvs.c b/kernel/power/nvs.c new file mode 100644 index 000000000000..1836db60bbb6 --- /dev/null +++ b/kernel/power/nvs.c @@ -0,0 +1,136 @@ +/* + * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory + * + * Copyright (C) 2008,2009 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Platforms, like ACPI, may want us to save some memory used by them during + * suspend and to restore the contents of this memory during the subsequent + * resume. The code below implements a mechanism allowing us to do that. + */ + +struct nvs_page { + unsigned long phys_start; + unsigned int size; + void *kaddr; + void *data; + struct list_head node; +}; + +static LIST_HEAD(nvs_list); + +/** + * suspend_nvs_register - register platform NVS memory region to save + * @start - physical address of the region + * @size - size of the region + * + * The NVS region need not be page-aligned (both ends) and we arrange + * things so that the data from page-aligned addresses in this region will + * be copied into separate RAM pages. + */ +int suspend_nvs_register(unsigned long start, unsigned long size) +{ + struct nvs_page *entry, *next; + + while (size > 0) { + unsigned int nr_bytes; + + entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL); + if (!entry) + goto Error; + + list_add_tail(&entry->node, &nvs_list); + entry->phys_start = start; + nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); + entry->size = (size < nr_bytes) ? size : nr_bytes; + + start += entry->size; + size -= entry->size; + } + return 0; + + Error: + list_for_each_entry_safe(entry, next, &nvs_list, node) { + list_del(&entry->node); + kfree(entry); + } + return -ENOMEM; +} + +/** + * suspend_nvs_free - free data pages allocated for saving NVS regions + */ +void suspend_nvs_free(void) +{ + struct nvs_page *entry; + + list_for_each_entry(entry, &nvs_list, node) + if (entry->data) { + free_page((unsigned long)entry->data); + entry->data = NULL; + if (entry->kaddr) { + iounmap(entry->kaddr); + entry->kaddr = NULL; + } + } +} + +/** + * suspend_nvs_alloc - allocate memory necessary for saving NVS regions + */ +int suspend_nvs_alloc(void) +{ + struct nvs_page *entry; + + list_for_each_entry(entry, &nvs_list, node) { + entry->data = (void *)__get_free_page(GFP_KERNEL); + if (!entry->data) { + suspend_nvs_free(); + return -ENOMEM; + } + } + return 0; +} + +/** + * suspend_nvs_save - save NVS memory regions + */ +void suspend_nvs_save(void) +{ + struct nvs_page *entry; + + printk(KERN_INFO "PM: Saving platform NVS memory\n"); + + list_for_each_entry(entry, &nvs_list, node) + if (entry->data) { + entry->kaddr = ioremap(entry->phys_start, entry->size); + memcpy(entry->data, entry->kaddr, entry->size); + } +} + +/** + * suspend_nvs_restore - restore NVS memory regions + * + * This function is going to be called with interrupts disabled, so it + * cannot iounmap the virtual addresses used to access the NVS region. + */ +void suspend_nvs_restore(void) +{ + struct nvs_page *entry; + + printk(KERN_INFO "PM: Restoring platform NVS memory\n"); + + list_for_each_entry(entry, &nvs_list, node) + if (entry->data) + memcpy(entry->kaddr, entry->data, entry->size); +} diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 56e7dbb8b996..f37cb7dd4402 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -16,6 +16,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include "power.h" -- cgit v1.2.3 From fb76dd10b91146e9cefbb3cd4e6812c5a95ee43b Mon Sep 17 00:00:00 2001 From: Luotao Fu Date: Thu, 10 Jun 2010 12:05:23 -0700 Subject: Input: matrix_keypad - add support for clustered irq This one adds support of a combined irq source for the whole matrix keypad. This can be useful if all rows and columns of the keypad are e.g. connected to a GPIO expander, which only has one interrupt line for all events on every single GPIO. Signed-off-by: Luotao Fu Acked-by: Eric Miao Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/matrix_keypad.c | 108 ++++++++++++++++++++++++--------- include/linux/input/matrix_keypad.h | 6 ++ 2 files changed, 86 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index b443e088fd3c..b02e4268e18f 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -37,6 +37,7 @@ struct matrix_keypad { spinlock_t lock; bool scan_pending; bool stopped; + bool gpio_all_disabled; }; /* @@ -87,8 +88,12 @@ static void enable_row_irqs(struct matrix_keypad *keypad) const struct matrix_keypad_platform_data *pdata = keypad->pdata; int i; - for (i = 0; i < pdata->num_row_gpios; i++) - enable_irq(gpio_to_irq(pdata->row_gpios[i])); + if (pdata->clustered_irq > 0) + enable_irq(pdata->clustered_irq); + else { + for (i = 0; i < pdata->num_row_gpios; i++) + enable_irq(gpio_to_irq(pdata->row_gpios[i])); + } } static void disable_row_irqs(struct matrix_keypad *keypad) @@ -96,8 +101,12 @@ static void disable_row_irqs(struct matrix_keypad *keypad) const struct matrix_keypad_platform_data *pdata = keypad->pdata; int i; - for (i = 0; i < pdata->num_row_gpios; i++) - disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i])); + if (pdata->clustered_irq > 0) + disable_irq_nosync(pdata->clustered_irq); + else { + for (i = 0; i < pdata->num_row_gpios; i++) + disable_irq_nosync(gpio_to_irq(pdata->row_gpios[i])); + } } /* @@ -216,45 +225,69 @@ static void matrix_keypad_stop(struct input_dev *dev) } #ifdef CONFIG_PM -static int matrix_keypad_suspend(struct device *dev) +static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad) { - struct platform_device *pdev = to_platform_device(dev); - struct matrix_keypad *keypad = platform_get_drvdata(pdev); const struct matrix_keypad_platform_data *pdata = keypad->pdata; + unsigned int gpio; int i; - matrix_keypad_stop(keypad->input_dev); + if (pdata->clustered_irq > 0) { + if (enable_irq_wake(pdata->clustered_irq) == 0) + keypad->gpio_all_disabled = true; + } else { - if (device_may_wakeup(&pdev->dev)) { for (i = 0; i < pdata->num_row_gpios; i++) { if (!test_bit(i, keypad->disabled_gpios)) { - unsigned int gpio = pdata->row_gpios[i]; + gpio = pdata->row_gpios[i]; if (enable_irq_wake(gpio_to_irq(gpio)) == 0) __set_bit(i, keypad->disabled_gpios); } } } - - return 0; } -static int matrix_keypad_resume(struct device *dev) +static void matrix_keypad_disable_wakeup(struct matrix_keypad *keypad) { - struct platform_device *pdev = to_platform_device(dev); - struct matrix_keypad *keypad = platform_get_drvdata(pdev); const struct matrix_keypad_platform_data *pdata = keypad->pdata; + unsigned int gpio; int i; - if (device_may_wakeup(&pdev->dev)) { + if (pdata->clustered_irq > 0) { + if (keypad->gpio_all_disabled) { + disable_irq_wake(pdata->clustered_irq); + keypad->gpio_all_disabled = false; + } + } else { for (i = 0; i < pdata->num_row_gpios; i++) { if (test_and_clear_bit(i, keypad->disabled_gpios)) { - unsigned int gpio = pdata->row_gpios[i]; - + gpio = pdata->row_gpios[i]; disable_irq_wake(gpio_to_irq(gpio)); } } } +} + +static int matrix_keypad_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct matrix_keypad *keypad = platform_get_drvdata(pdev); + + matrix_keypad_stop(keypad->input_dev); + + if (device_may_wakeup(&pdev->dev)) + matrix_keypad_enable_wakeup(keypad); + + return 0; +} + +static int matrix_keypad_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct matrix_keypad *keypad = platform_get_drvdata(pdev); + + if (device_may_wakeup(&pdev->dev)) + matrix_keypad_disable_wakeup(keypad); matrix_keypad_start(keypad->input_dev); @@ -296,17 +329,31 @@ static int __devinit init_matrix_gpio(struct platform_device *pdev, gpio_direction_input(pdata->row_gpios[i]); } - for (i = 0; i < pdata->num_row_gpios; i++) { - err = request_irq(gpio_to_irq(pdata->row_gpios[i]), + if (pdata->clustered_irq > 0) { + err = request_irq(pdata->clustered_irq, matrix_keypad_interrupt, - IRQF_DISABLED | - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + pdata->clustered_irq_flags, "matrix-keypad", keypad); if (err) { dev_err(&pdev->dev, - "Unable to acquire interrupt for GPIO line %i\n", - pdata->row_gpios[i]); - goto err_free_irqs; + "Unable to acquire clustered interrupt\n"); + goto err_free_rows; + } + } else { + for (i = 0; i < pdata->num_row_gpios; i++) { + err = request_irq(gpio_to_irq(pdata->row_gpios[i]), + matrix_keypad_interrupt, + IRQF_DISABLED | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "matrix-keypad", keypad); + if (err) { + dev_err(&pdev->dev, + "Unable to acquire interrupt " + "for GPIO line %i\n", + pdata->row_gpios[i]); + goto err_free_irqs; + } } } @@ -418,11 +465,16 @@ static int __devexit matrix_keypad_remove(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 0); - for (i = 0; i < pdata->num_row_gpios; i++) { - free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad); - gpio_free(pdata->row_gpios[i]); + if (pdata->clustered_irq > 0) { + free_irq(pdata->clustered_irq, keypad); + } else { + for (i = 0; i < pdata->num_row_gpios; i++) + free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad); } + for (i = 0; i < pdata->num_row_gpios; i++) + gpio_free(pdata->row_gpios[i]); + for (i = 0; i < pdata->num_col_gpios; i++) gpio_free(pdata->col_gpios[i]); diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index c964cd7f436a..80352ad6581a 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -41,6 +41,9 @@ struct matrix_keymap_data { * @col_scan_delay_us: delay, measured in microseconds, that is * needed before we can keypad after activating column gpio * @debounce_ms: debounce interval in milliseconds + * @clustered_irq: may be specified if interrupts of all row/column GPIOs + * are bundled to one single irq + * @clustered_irq_flags: flags that are needed for the clustered irq * @active_low: gpio polarity * @wakeup: controls whether the device should be set up as wakeup * source @@ -63,6 +66,9 @@ struct matrix_keypad_platform_data { /* key debounce interval in milli-second */ unsigned int debounce_ms; + unsigned int clustered_irq; + unsigned int clustered_irq_flags; + bool active_low; bool wakeup; bool no_autorepeat; -- cgit v1.2.3 From c5444198ca210498e8ac0ba121b4cd3537aa12f7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Jun 2010 18:15:15 +0200 Subject: writeback: simplify and split bdi_start_writeback bdi_start_writeback now never gets a superblock passed, so we can just remove that case. And to further untangle the code and flatten the call stack split it into two trivial helpers for it's two callers. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 32 ++++++++++++++++++++------------ include/linux/backing-dev.h | 4 ++-- mm/page-writeback.c | 5 ++--- 3 files changed, 24 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4fcca4f74940..0079bf59b583 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -200,7 +200,6 @@ static void bdi_queue_work_onstack(struct wb_writeback_args *args) /** * bdi_start_writeback - start writeback * @bdi: the backing device to write from - * @sb: write inodes from this super_block * @nr_pages: the number of pages to write * * Description: @@ -209,25 +208,34 @@ static void bdi_queue_work_onstack(struct wb_writeback_args *args) * completion. Caller need not hold sb s_umount semaphore. * */ -void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, - long nr_pages) +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) { struct wb_writeback_args args = { - .sb = sb, .sync_mode = WB_SYNC_NONE, .nr_pages = nr_pages, .range_cyclic = 1, }; - /* - * We treat @nr_pages=0 as the special case to do background writeback, - * ie. to sync pages until the background dirty threshold is reached. - */ - if (!nr_pages) { - args.nr_pages = LONG_MAX; - args.for_background = 1; - } + bdi_alloc_queue_work(bdi, &args); +} +/** + * bdi_start_background_writeback - start background writeback + * @bdi: the backing device to write from + * + * Description: + * This does WB_SYNC_NONE background writeback. The IO is only + * started when this function returns, we make no guarentees on + * completion. Caller need not hold sb s_umount semaphore. + */ +void bdi_start_background_writeback(struct backing_dev_info *bdi) +{ + struct wb_writeback_args args = { + .sync_mode = WB_SYNC_NONE, + .nr_pages = LONG_MAX, + .for_background = 1, + .range_cyclic = 1, + }; bdi_alloc_queue_work(bdi, &args); } diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index aee5f6ce166e..9ae2889096b6 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -105,8 +105,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); -void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, - long nr_pages); +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); +void bdi_start_background_writeback(struct backing_dev_info *bdi); int bdi_writeback_task(struct bdi_writeback *wb); int bdi_has_dirty_io(struct backing_dev_info *bdi); void bdi_arm_supers_timer(void); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index bbd396ac9546..54f28bd493d3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping, (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS)) > background_thresh))) - bdi_start_writeback(bdi, NULL, 0); + bdi_start_background_writeback(bdi); } void set_page_dirty_balance(struct page *page, int page_mkwrite) @@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data) * We want to write everything out, not just down to the dirty * threshold */ - if (bdi_has_dirty_io(&q->backing_dev_info)) - bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); + bdi_start_writeback(&q->backing_dev_info, nr_pages); } /* -- cgit v1.2.3 From fd247447c1d94a79d5cfc647430784306b3a8323 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 8 Jun 2010 10:49:08 +0200 Subject: ACPI / ACPICA: Fix low-level GPE manipulation code ACPICA uses acpi_ev_enable_gpe() for enabling GPEs at the low level, which is incorrect, because this function only enables the GPE if the corresponding bit in its enable register's enable_for_run mask is set. This causes acpi_set_gpe() to work incorrectly if used for enabling GPEs that were not previously enabled with acpi_enable_gpe(). As a result, among other things, wakeup-only GPEs are never enabled by acpi_enable_wakeup_device(), so the devices that use them are unable to wake up the system. To fix this issue remove acpi_ev_enable_gpe() and its counterpart acpi_ev_disable_gpe() and replace acpi_hw_low_disable_gpe() with acpi_hw_low_set_gpe() that will be used instead to manipulate GPE enable bits at the low level. Make the users of acpi_ev_enable_gpe() and acpi_ev_disable_gpe() call acpi_hw_low_set_gpe() instead and make sure that GPE enable masks are only updated by acpi_enable_gpe() and acpi_disable_gpe() when GPE reference counters change from 0 to 1 and from 1 to 0, respectively. Signed-off-by: Rafael J. Wysocki Signed-off-by: Len Brown --- drivers/acpi/acpica/acevents.h | 4 -- drivers/acpi/acpica/achware.h | 3 +- drivers/acpi/acpica/evgpe.c | 108 +---------------------------------------- drivers/acpi/acpica/evxfevnt.c | 59 +++++++++++++++++++--- drivers/acpi/acpica/hwgpe.c | 26 ++++++++-- include/acpi/actypes.h | 2 +- 6 files changed, 79 insertions(+), 123 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 5e094a28cf54..138bbb521930 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -78,10 +78,6 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node, acpi_status acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); -acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); - -acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); - struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number); diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index c46277d179f0..32391588e163 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -93,7 +93,8 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width); u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, struct acpi_gpe_register_info *gpe_register_info); -acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); +acpi_status +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); acpi_status acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info); diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 57eeb3bde41e..66cd03835d6e 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -99,106 +99,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) return_ACPI_STATUS(AE_OK); } -/******************************************************************************* - * - * FUNCTION: acpi_ev_enable_gpe - * - * PARAMETERS: gpe_event_info - GPE to enable - * - * RETURN: Status - * - * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless - * of type or number of references. - * - * Note: The GPE lock should be already acquired when this function is called. - * - ******************************************************************************/ - -acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) -{ - acpi_status status; - - - ACPI_FUNCTION_TRACE(ev_enable_gpe); - - - /* - * We will only allow a GPE to be enabled if it has either an - * associated method (_Lxx/_Exx) or a handler. Otherwise, the - * GPE will be immediately disabled by acpi_ev_gpe_dispatch the - * first time it fires. - */ - if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { - return_ACPI_STATUS(AE_NO_HANDLER); - } - - /* Ensure the HW enable masks are current */ - - status = acpi_ev_update_gpe_enable_masks(gpe_event_info); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* Clear the GPE (of stale events) */ - - status = acpi_hw_clear_gpe(gpe_event_info); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* Enable the requested GPE */ - - status = acpi_hw_write_gpe_enable_reg(gpe_event_info); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * - * FUNCTION: acpi_ev_disable_gpe - * - * PARAMETERS: gpe_event_info - GPE to disable - * - * RETURN: Status - * - * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE, - * regardless of the type or number of references. - * - * Note: The GPE lock should be already acquired when this function is called. - * - ******************************************************************************/ - -acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(ev_disable_gpe); - - - /* - * Note: Always disable the GPE, even if we think that that it is already - * disabled. It is possible that the AML or some other code has enabled - * the GPE behind our back. - */ - - /* Ensure the HW enable masks are current */ - - status = acpi_ev_update_gpe_enable_masks(gpe_event_info); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* - * Always H/W disable this GPE, even if we don't know the GPE type. - * Simply clear the enable bit for this particular GPE, but do not - * write out the current GPE enable mask since this may inadvertently - * enable GPEs too early. An example is a rogue GPE that has arrived - * during ACPICA initialization - possibly because AML or other code - * has enabled the GPE. - */ - status = acpi_hw_low_disable_gpe(gpe_event_info); - return_ACPI_STATUS(status); -} - /******************************************************************************* * @@ -450,10 +350,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) return_VOID; } - /* Update the GPE register masks for return to enabled state */ - - (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); - /* * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. @@ -606,7 +502,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) * Disable the GPE, so it doesn't keep firing before the method has a * chance to run (it runs asynchronously with interrupts enabled). */ - status = acpi_ev_disable_gpe(gpe_event_info); + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[0x%2X]", @@ -643,7 +539,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) * Disable the GPE. The GPE will remain disabled a handler * is installed or ACPICA is restarted. */ - status = acpi_ev_disable_gpe(gpe_event_info); + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE[0x%2X]", diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 7c7bbb4d402c..e3d9f5c8e53d 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -199,6 +199,44 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_EXPORT_SYMBOL(acpi_enable_event) +/******************************************************************************* + * + * FUNCTION: acpi_clear_and_enable_gpe + * + * PARAMETERS: gpe_event_info - GPE to enable + * + * RETURN: Status + * + * DESCRIPTION: Clear the given GPE from stale events and enable it. + * + ******************************************************************************/ +static acpi_status +acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) +{ + acpi_status status; + + /* + * We will only allow a GPE to be enabled if it has either an + * associated method (_Lxx/_Exx) or a handler. Otherwise, the + * GPE will be immediately disabled by acpi_ev_gpe_dispatch the + * first time it fires. + */ + if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { + return_ACPI_STATUS(AE_NO_HANDLER); + } + + /* Clear the GPE (of stale events) */ + status = acpi_hw_clear_gpe(gpe_event_info); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Enable the requested GPE */ + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); + + return_ACPI_STATUS(status); +} + /******************************************************************************* * * FUNCTION: acpi_set_gpe @@ -240,11 +278,11 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) switch (action) { case ACPI_GPE_ENABLE: - status = acpi_ev_enable_gpe(gpe_event_info); + status = acpi_clear_and_enable_gpe(gpe_event_info); break; case ACPI_GPE_DISABLE: - status = acpi_ev_disable_gpe(gpe_event_info); + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); break; default: @@ -307,7 +345,11 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { - status = acpi_ev_enable_gpe(gpe_event_info); + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); + if (ACPI_SUCCESS(status)) { + status = acpi_clear_and_enable_gpe(gpe_event_info); + } + if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; goto unlock_and_exit; @@ -334,7 +376,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type) */ gpe_event_info->wakeup_count++; if (gpe_event_info->wakeup_count == 1) { - (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); } } @@ -394,7 +436,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { - status = acpi_ev_disable_gpe(gpe_event_info); + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); + if (ACPI_SUCCESS(status)) { + status = acpi_hw_low_set_gpe(gpe_event_info, + ACPI_GPE_DISABLE); + } + if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; goto unlock_and_exit; @@ -415,7 +462,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type gpe_event_info->wakeup_count--; if (!gpe_event_info->wakeup_count) { - (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); } } diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index d989b8e786cc..40388e23e103 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -78,23 +78,27 @@ u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, /****************************************************************************** * - * FUNCTION: acpi_hw_low_disable_gpe + * FUNCTION: acpi_hw_low_set_gpe * * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled + * action - Enable or disable * * RETURN: Status * - * DESCRIPTION: Disable a single GPE in the enable register. + * DESCRIPTION: Enable or disable a single GPE in its enable register. * ******************************************************************************/ -acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) +acpi_status +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status; u32 enable_mask; u32 register_bit; + ACPI_FUNCTION_ENTRY(); + /* Get the info block for the entire GPE register */ gpe_register_info = gpe_event_info->register_info; @@ -109,11 +113,23 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) return (status); } - /* Clear just the bit that corresponds to this GPE */ + /* Set ot clear just the bit that corresponds to this GPE */ register_bit = acpi_hw_gpe_register_bit(gpe_event_info, gpe_register_info); - ACPI_CLEAR_BIT(enable_mask, register_bit); + switch (action) { + case ACPI_GPE_ENABLE: + ACPI_SET_BIT(enable_mask, register_bit); + break; + + case ACPI_GPE_DISABLE: + ACPI_CLEAR_BIT(enable_mask, register_bit); + break; + + default: + ACPI_ERROR((AE_INFO, "Invalid action\n")); + return (AE_BAD_PARAMETER); + } /* Write the updated enable mask */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index de5e99a99530..6881f5b7b7be 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -663,7 +663,7 @@ typedef u32 acpi_event_status; #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 -/* Actions for acpi_set_gpe */ +/* Actions for acpi_set_gpe and acpi_hw_low_set_gpe */ #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 -- cgit v1.2.3 From c9a8bbb7704cbf515c0fc68970abbe4e91d68521 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 8 Jun 2010 10:49:45 +0200 Subject: ACPI / ACPICA: Avoid writing full enable masks to GPE registers ACPICA uses acpi_hw_write_gpe_enable_reg() to re-enable a GPE after an event signaled by it has been handled. However, this function writes the entire GPE enable mask to the GPE's enable register which may not be correct. Namely, if one of the other GPEs in the same register was previously enabled by acpi_enable_gpe() and subsequently disabled using acpi_set_gpe(), acpi_hw_write_gpe_enable_reg() will re-enable it along with the target GPE. To fix this issue rework acpi_hw_write_gpe_enable_reg() so that it calls acpi_hw_low_set_gpe() with a special action value, ACPI_GPE_COND_ENABLE, that will make it only enable the GPE if the corresponding bit in its register's enable_for_run mask is set. Signed-off-by: Rafael J. Wysocki Signed-off-by: Len Brown --- drivers/acpi/acpica/hwgpe.c | 18 +++++------------- include/acpi/actypes.h | 1 + 2 files changed, 6 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 40388e23e103..3450309c2786 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -118,6 +118,10 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) register_bit = acpi_hw_gpe_register_bit(gpe_event_info, gpe_register_info); switch (action) { + case ACPI_GPE_COND_ENABLE: + if (!(register_bit & gpe_register_info->enable_for_run)) + return (AE_BAD_PARAMETER); + case ACPI_GPE_ENABLE: ACPI_SET_BIT(enable_mask, register_bit); break; @@ -154,23 +158,11 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) acpi_status acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info) { - struct acpi_gpe_register_info *gpe_register_info; acpi_status status; ACPI_FUNCTION_ENTRY(); - /* Get the info block for the entire GPE register */ - - gpe_register_info = gpe_event_info->register_info; - if (!gpe_register_info) { - return (AE_NOT_EXIST); - } - - /* Write the entire GPE (runtime) enable register */ - - status = acpi_hw_write(gpe_register_info->enable_for_run, - &gpe_register_info->enable_address); - + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); return (status); } diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 6881f5b7b7be..15a4c68fad3b 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -667,6 +667,7 @@ typedef u32 acpi_event_status; #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 +#define ACPI_GPE_COND_ENABLE 2 /* gpe_types for acpi_enable_gpe and acpi_disable_gpe */ -- cgit v1.2.3 From d70326689b70b35527765bd3decbb1229459e928 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Wed, 26 May 2010 11:06:12 +0800 Subject: ACPICA: Fix namestring associated with AE_NO_HANDLER exception Was incorrectly AE_WAKE_ONLY_GPE. Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- include/acpi/acexcep.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 5958d7845bd5..17714beb868e 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -212,7 +212,7 @@ char const *acpi_gbl_exception_names_env[] = { "AE_NO_GLOBAL_LOCK", "AE_ABORT_METHOD", "AE_SAME_HANDLER", - "AE_WAKE_ONLY_GPE", + "AE_NO_HANDLER", "AE_OWNER_ID_LIMIT" }; -- cgit v1.2.3 From b681f7d9ab4d697a214fa4428795790c3a937a89 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Wed, 26 May 2010 11:50:48 +0800 Subject: ACPICA: Truncate I/O addresses to 16 bits for Windows compatibility This feature is optional and is enabled if the BIOS requests any Windows OSI strings. It can also be enabled by the host OS. Signed-off-by: Matthew Garrett Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Len Brown --- drivers/acpi/acpica/acglobal.h | 8 ++++++++ drivers/acpi/acpica/hwvalid.c | 12 ++++++++++++ drivers/acpi/acpica/nsinit.c | 9 +++++++++ include/acpi/acpixf.h | 1 + 4 files changed, 30 insertions(+) (limited to 'include') diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 9070f1fe8f17..899d68afc3c5 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -125,6 +125,14 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE); */ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE); +/* + * Optionally truncate I/O addresses to 16 bits. Provides compatibility + * with other ACPI implementations. NOTE: During ACPICA initialization, + * this value is set to TRUE if any Windows OSI strings have been + * requested by the BIOS. + */ +u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE); + /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ struct acpi_table_fadt acpi_gbl_FADT; diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index c10d587c1641..e1d9c777b213 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -222,6 +222,12 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width) u32 one_byte; u32 i; + /* Truncate address to 16 bits if requested */ + + if (acpi_gbl_truncate_io_addresses) { + address &= ACPI_UINT16_MAX; + } + /* Validate the entire request and perform the I/O */ status = acpi_hw_validate_io_request(address, width); @@ -279,6 +285,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width) acpi_status status; u32 i; + /* Truncate address to 16 bits if requested */ + + if (acpi_gbl_truncate_io_addresses) { + address &= ACPI_UINT16_MAX; + } + /* Validate the entire request and perform the I/O */ status = acpi_hw_validate_io_request(address, width); diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 9bd6f050f299..4e5272c313e0 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -193,6 +193,15 @@ acpi_status acpi_ns_initialize_devices(void) acpi_ns_init_one_device, NULL, &info, NULL); + /* + * Any _OSI requests should be completed by now. If the BIOS has + * requested any Windows OSI strings, we will always truncate + * I/O addresses to 16 bits -- for Windows compatibility. + */ + if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) { + acpi_gbl_truncate_io_addresses = TRUE; + } + ACPI_FREE(info.evaluate_info); if (ACPI_FAILURE(status)) { goto error_exit; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 0e4ab1fe5966..1371cc997393 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -69,6 +69,7 @@ extern acpi_name acpi_gbl_trace_method_name; extern u32 acpi_gbl_trace_flags; extern u8 acpi_gbl_enable_aml_debug_object; extern u8 acpi_gbl_copy_dsdt_locally; +extern u8 acpi_gbl_truncate_io_addresses; extern u32 acpi_current_gpe_count; extern struct acpi_table_fadt acpi_gbl_FADT; -- cgit v1.2.3 From dc66c74de6f4238020db3e2041d4aca5c5b3e9bc Mon Sep 17 00:00:00 2001 From: Philipp Reisner Date: Wed, 2 Jun 2010 14:31:29 +0200 Subject: drbd: Fixed a race between disk-attach and unexpected state changes This was a very hard to trigger race condition. If we got a state packet from the peer, after drbd_nl_disk() has already changed the disk state to D_NEGOTIATING but after_state_ch() was not yet run by the worker, then receive_state() might called drbd_sync_handshake(), which in turn crashed when accessing p_uuid. Signed-off-by: Philipp Reisner Signed-off-by: Lars Ellenberg --- drivers/block/drbd/drbd_main.c | 2 -- drivers/block/drbd/drbd_nl.c | 6 ++++++ include/linux/drbd.h | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6b077f93acc6..7258c95e895e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Last part of the attaching process ... */ if (ns.conn >= C_CONNECTED && os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { - kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */ - mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */ drbd_send_sizes(mdev, 0, 0); /* to start sync... */ drbd_send_uuids(mdev); drbd_send_state(mdev); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 632e3245d1bb..2151f18b21de 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp mdev->new_state_tmp.i = ns.i; ns.i = os.i; ns.disk = D_NEGOTIATING; + + /* We expect to receive up-to-date UUIDs soon. + To avoid a race in receive_state, free p_uuid while + holding req_lock. I.e. atomic with the state change */ + kfree(mdev->p_uuid); + mdev->p_uuid = NULL; } rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 30da4ae48972..b8d2516668aa 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -53,7 +53,7 @@ extern const char *drbd_buildtag(void); -#define REL_VERSION "8.3.8rc2" +#define REL_VERSION "8.3.8" #define API_VERSION 88 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 94 -- cgit v1.2.3 From da931a931da85218add949266238c54b5fecd37f Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 15 Jun 2010 09:52:37 +1000 Subject: agp: drop vmalloc flag. Since the code that was too ugly to live is upstream, we can use it now, instead of rolling our own. Signed-off-by: Dave Airlie --- drivers/char/agp/generic.c | 4 +--- include/linux/agp_backend.h | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 4b51982fd23a..4e414417730b 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -97,20 +97,18 @@ EXPORT_SYMBOL(agp_flush_chipset); void agp_alloc_page_array(size_t size, struct agp_memory *mem) { mem->pages = NULL; - mem->vmalloc_flag = false; if (size <= 2*PAGE_SIZE) mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); if (mem->pages == NULL) { mem->pages = vmalloc(size); - mem->vmalloc_flag = true; } } EXPORT_SYMBOL(agp_alloc_page_array); void agp_free_page_array(struct agp_memory *mem) { - if (mem->vmalloc_flag) { + if (is_vmalloc_addr(mem->pages)) { vfree(mem->pages); } else { kfree(mem->pages); diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 9101ed64f803..09ea4a1e9505 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -79,7 +79,6 @@ struct agp_memory { u32 physical; bool is_bound; bool is_flushed; - bool vmalloc_flag; /* list of agp_memory mapped to the aperture */ struct list_head mapped_list; /* DMA-mapped addresses */ -- cgit v1.2.3 From 46cd09a7de52cad464d35a75924b79984646288d Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Fri, 11 Jun 2010 12:16:57 +0200 Subject: fix typos concerning "acquire" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- arch/arm/mach-omap2/sleep34xx.S | 2 +- include/linux/lru_cache.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index d522cd70bf53..ba53191ae4c5 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -60,7 +60,7 @@ #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL) .text -/* Function to aquire the semaphore in scratchpad */ +/* Function to acquire the semaphore in scratchpad */ ENTRY(lock_scratchpad_sem) stmfd sp!, {lr} @ save registers on stack wait_sem: diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index de48d167568b..78fbf24f357a 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -262,7 +262,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char * @lc: the lru cache to operate on * * Note that the reference counts and order on the active and lru lists may - * still change. Returns true if we aquired the lock. + * still change. Returns true if we acquired the lock. */ static inline int lc_try_lock(struct lru_cache *lc) { -- cgit v1.2.3 From 65155b3708137fabee865dc4da822763c0c41208 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Fri, 11 Jun 2010 12:17:01 +0200 Subject: fix typos concerning "management" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- drivers/media/dvb/siano/smscoreapi.c | 2 +- drivers/net/benet/be_hw.h | 2 +- drivers/scsi/fcoe/fcoe.c | 4 ++-- drivers/scsi/mpt2sas/mpt2sas_base.h | 2 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 4 ++-- drivers/scsi/pm8001/pm8001_hwi.c | 2 +- drivers/scsi/qla2xxx/qla_iocb.c | 2 +- drivers/scsi/qla2xxx/qla_nx.h | 2 +- include/linux/ide.h | 2 +- include/linux/if_link.h | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index f8fbbc67a406..7745394c3e63 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1013,7 +1013,7 @@ int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) } /* - * Stream managment + * Stream management */ static void vmw_stream_destroy(struct vmw_resource *res) diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index 0c87a3c3899a..a19f649666d5 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c @@ -1297,7 +1297,7 @@ int smsclient_sendrequest(struct smscore_client_t *client, EXPORT_SYMBOL_GPL(smsclient_sendrequest); -/* old GPIO managments implementation */ +/* old GPIO managements implementation */ int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, struct smscore_config_gpio *pinconfig) { diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 063026de4957..3f1b7c3965bb 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -52,7 +52,7 @@ */ #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ -/********* Power managment (WOL) **********/ +/********* Power management (WOL) **********/ #define PCICFG_PM_CONTROL_OFFSET 0x44 #define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9276121db1ef..bc39542481a4 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2452,7 +2452,7 @@ module_exit(fcoe_exit); * @fp: response frame, or error encoded in a pointer (timeout) * @arg: pointer the the fcoe_ctlr structure * - * This handles MAC address managment for FCoE, then passes control on to + * This handles MAC address management for FCoE, then passes control on to * the libfc FLOGI response handler. */ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) @@ -2484,7 +2484,7 @@ done: * @fp: response frame, or error encoded in a pointer (timeout) * @arg: pointer the the fcoe_ctlr structure * - * This handles MAC address managment for FCoE, then passes control on to + * This handles MAC address management for FCoE, then passes control on to * the libfc LOGO response handler. */ static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index b4afe431ac1e..41c29a86e834 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -474,7 +474,7 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); * @shost_recovery: host reset in progress * @ioc_reset_in_progress_lock: * @ioc_link_reset_in_progress: phy/hard reset in progress - * @ignore_loginfos: ignore loginfos during task managment + * @ignore_loginfos: ignore loginfos during task management * @remove_host: flag for when driver unloads, to avoid sending dev resets * @wait_for_port_enable_to_complete: * @msix_enable: flag indicating msix is enabled diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c5ff26a2a51d..06d645a36f1b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2979,7 +2979,7 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) /* host recovery or link resets sent via IOCTLs */ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; - /* device busy with task managment */ + /* device busy with task management */ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy) return SCSI_MLQUEUE_DEVICE_BUSY; /* device has been deleted */ @@ -6845,7 +6845,7 @@ _scsih_init(void) /* queuecommand callback hander */ scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done); - /* task managment callback handler */ + /* task management callback handler */ tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5ff8261c5d67..0e05e8a22167 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4152,7 +4152,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_chip_ssp_tm_req - built the task managment command. + * pm8001_chip_ssp_tm_req - built the task management command. * @pm8001_ha: our hba card information. * @ccb: the ccb information. * @tmf: task management function. diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 8ef945365412..782b30d0eea1 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1129,7 +1129,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); fcp_cmnd->task_attribute = 0; - fcp_cmnd->task_managment = 0; + fcp_cmnd->task_management = 0; cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index f8f99a5ea532..1b44d013f151 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -832,7 +832,7 @@ struct fcp_cmnd { struct scsi_lun lun; uint8_t crn; uint8_t task_attribute; - uint8_t task_managment; + uint8_t task_management; uint8_t additional_cdb_len; uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */ }; diff --git a/include/linux/ide.h b/include/linux/ide.h index 3239d1c10acb..c2c598ed4eed 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -458,7 +458,7 @@ enum { IDE_DFLAG_DOORLOCKING = (1 << 15), /* disallow DMA */ IDE_DFLAG_NODMA = (1 << 16), - /* powermanagment told us not to do anything, so sleep nicely */ + /* powermanagement told us not to do anything, so sleep nicely */ IDE_DFLAG_BLOCKED = (1 << 17), /* sleeping & sleep field valid */ IDE_DFLAG_SLEEPING = (1 << 18), diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 85c812db5a3f..9d8f0807daed 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -233,7 +233,7 @@ enum macvlan_mode { MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */ }; -/* SR-IOV virtual function managment section */ +/* SR-IOV virtual function management section */ enum { IFLA_VF_INFO_UNSPEC, -- cgit v1.2.3 From b70e4f0529c089b00d0a6da13106db4de1ada4c7 Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Mon, 21 Jun 2010 19:09:09 +0800 Subject: tracing: Fix undeclared ENOSYS in include/linux/tracepoint.h The header file include/linux/tracepoint.h may be included without include/linux/errno.h and then the compiler will fail on building for undelcared ENOSYS. This patch fixes this problem via including to include/linux/tracepoint.h. Signed-off-by: Wu Zhangjin LKML-Reference: <1277118549-622-1-git-send-email-wuzhangjin@gmail.com> Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9a59d1f98cd4..103d1b61aacb 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -14,6 +14,7 @@ * See the file COPYING for more details. */ +#include #include #include -- cgit v1.2.3 From 63a6404d8ae693e71ab27c4f9c4032aa29113e92 Mon Sep 17 00:00:00 2001 From: Henrik Rydberg Date: Thu, 10 Jun 2010 12:05:24 -0700 Subject: Input: evdev - use driver hint to compute size of event buffer Some devices, in particular MT devices, produce a lot of data. This may lead to overflowing of the event queues in evdev driver, which by default are fairly small. Let the drivers hint the average number of events per packet generated by the device, and use that information when computing the buffer size evdev should use for the device. Signed-off-by: Henrik Rydberg Acked-by: Chase Douglas Signed-off-by: Dmitry Torokhov --- drivers/input/evdev.c | 9 +++++++-- include/linux/input.h | 21 +++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index cff7bf9351a8..30836c05edd7 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -10,7 +10,8 @@ #define EVDEV_MINOR_BASE 64 #define EVDEV_MINORS 32 -#define EVDEV_MIN_BUFFER_SIZE 64 +#define EVDEV_MIN_BUFFER_SIZE 64U +#define EVDEV_BUF_PACKETS 8 #include #include @@ -245,7 +246,11 @@ static int evdev_release(struct inode *inode, struct file *file) static unsigned int evdev_compute_buffer_size(struct input_dev *dev) { - return EVDEV_MIN_BUFFER_SIZE; + unsigned int n_events = + max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS, + EVDEV_MIN_BUFFER_SIZE); + + return roundup_pow_of_two(n_events); } static int evdev_open(struct inode *inode, struct file *file) diff --git a/include/linux/input.h b/include/linux/input.h index 6fcc9101beeb..cc524c8b6703 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1063,6 +1063,10 @@ struct ff_effect { * @sndbit: bitmap of sound effects supported by the device * @ffbit: bitmap of force feedback effects supported by the device * @swbit: bitmap of switches present on the device + * @hint_events_per_packet: average number of events generated by the + * device in a packet (between EV_SYN/SYN_REPORT events). Used by + * event handlers to estimate size of the buffer needed to hold + * events. * @keycodemax: size of keycode table * @keycodesize: size of elements in keycode table * @keycode: map of scancodes to keycodes for this device @@ -1140,6 +1144,8 @@ struct input_dev { unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; unsigned long swbit[BITS_TO_LONGS(SW_CNT)]; + unsigned int hint_events_per_packet; + unsigned int keycodemax; unsigned int keycodesize; void *keycode; @@ -1408,6 +1414,21 @@ static inline void input_mt_sync(struct input_dev *dev) void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); +/** + * input_set_events_per_packet - tell handlers about the driver event rate + * @dev: the input device used by the driver + * @n_events: the average number of events between calls to input_sync() + * + * If the event rate sent from a device is unusually large, use this + * function to set the expected event rate. This will allow handlers + * to set up an appropriate buffer size for the event stream, in order + * to minimize information loss. + */ +static inline void input_set_events_per_packet(struct input_dev *dev, int n_events) +{ + dev->hint_events_per_packet = n_events; +} + static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) { dev->absmin[axis] = min; -- cgit v1.2.3 From 69a4af606ed4836faa2ec69b1d217f384b8235e7 Mon Sep 17 00:00:00 2001 From: Xiaolong CHEN Date: Thu, 24 Jun 2010 19:10:40 -0700 Subject: Input: adp5588-keys - support GPI events for ADP5588 devices A column or row configured as a GPI can be programmed to be part of the key event table and therefore also capable of generating a key event interrupt. A key event interrupt caused by a GPI follows the same process flow as a key event interrupt caused by a key press. GPIs configured as part of the key event table allow single key switches and other GPI interrupts to be monitored. As part of the event table, GPIs are represented by the decimal value 97 (0x61 or 1100001) through the decimal value 114 (0x72 or 1110010). See table below for GPI event number assignments for rows and columns. GPI Event Number Assignments for Rows Row0 Row1 Row2 Row3 Row4 Row5 Row6 Row7 97 98 99 100 101 102 103 104 GPI Event Number Assignments for Cols Col0 Col1 Col2 Col3 Col4 Col5 Col6 Col7 Col8 Col9 105 106 107 108 109 110 111 112 113 114 Signed-off-by: Xiaolong Chen Signed-off-by: Yuanbo Ye Signed-off-by: Tao Hu Acked-by: Michael Hennerich Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/adp5588-keys.c | 134 ++++++++++++++++++++++++++++++++-- include/linux/i2c/adp5588.h | 36 +++++++++ 2 files changed, 163 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 4771ab172b59..4ef789ef1042 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -67,6 +67,8 @@ struct adp5588_kpad { struct delayed_work work; unsigned long delay; unsigned short keycode[ADP5588_KEYMAPSIZE]; + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; }; static int adp5588_read(struct i2c_client *client, u8 reg) @@ -84,12 +86,37 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val) return i2c_smbus_write_byte_data(client, reg, val); } +static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt) +{ + int i, j; + + for (i = 0; i < ev_cnt; i++) { + int key = adp5588_read(kpad->client, Key_EVENTA + i); + int key_val = key & KEY_EV_MASK; + + if (key_val >= GPI_PIN_BASE && key_val <= GPI_PIN_END) { + for (j = 0; j < kpad->gpimapsize; j++) { + if (key_val == kpad->gpimap[j].pin) { + input_report_switch(kpad->input, + kpad->gpimap[j].sw_evt, + key & KEY_EV_PRESSED); + break; + } + } + } else { + input_report_key(kpad->input, + kpad->keycode[key_val - 1], + key & KEY_EV_PRESSED); + } + } +} + static void adp5588_work(struct work_struct *work) { struct adp5588_kpad *kpad = container_of(work, struct adp5588_kpad, work.work); struct i2c_client *client = kpad->client; - int i, key, status, ev_cnt; + int status, ev_cnt; status = adp5588_read(client, INT_STAT); @@ -99,12 +126,7 @@ static void adp5588_work(struct work_struct *work) if (status & KE_INT) { ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; if (ev_cnt) { - for (i = 0; i < ev_cnt; i++) { - key = adp5588_read(client, Key_EVENTA + i); - input_report_key(kpad->input, - kpad->keycode[(key & KEY_EV_MASK) - 1], - key & KEY_EV_PRESSED); - } + adp5588_report_events(kpad, ev_cnt); input_sync(kpad->input); } } @@ -130,6 +152,7 @@ static int __devinit adp5588_setup(struct i2c_client *client) { struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; int i, ret; + unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0; ret = adp5588_write(client, KP_GPIO1, KP_SEL(pdata->rows)); ret |= adp5588_write(client, KP_GPIO2, KP_SEL(pdata->cols) & 0xFF); @@ -144,6 +167,23 @@ static int __devinit adp5588_setup(struct i2c_client *client) for (i = 0; i < KEYP_MAX_EVENT; i++) ret |= adp5588_read(client, Key_EVENTA); + for (i = 0; i < pdata->gpimapsize; i++) { + unsigned short pin = pdata->gpimap[i].pin; + + if (pin <= GPI_PIN_ROW_END) { + evt_mode1 |= (1 << (pin - GPI_PIN_ROW_BASE)); + } else { + evt_mode2 |= ((1 << (pin - GPI_PIN_COL_BASE)) & 0xFF); + evt_mode3 |= ((1 << (pin - GPI_PIN_COL_BASE)) >> 8); + } + } + + if (pdata->gpimapsize) { + ret |= adp5588_write(client, GPI_EM1, evt_mode1); + ret |= adp5588_write(client, GPI_EM2, evt_mode2); + ret |= adp5588_write(client, GPI_EM3, evt_mode3); + } + ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | OVR_FLOW_INT | K_LCK_INT | GPI_INT | KE_INT); /* Status is W1C */ @@ -158,6 +198,44 @@ static int __devinit adp5588_setup(struct i2c_client *client) return 0; } +static void __devinit adp5588_report_switch_state(struct adp5588_kpad *kpad) +{ + int gpi_stat1 = adp5588_read(kpad->client, GPIO_DAT_STAT1); + int gpi_stat2 = adp5588_read(kpad->client, GPIO_DAT_STAT2); + int gpi_stat3 = adp5588_read(kpad->client, GPIO_DAT_STAT3); + int gpi_stat_tmp, pin_loc; + int i; + + for (i = 0; i < kpad->gpimapsize; i++) { + unsigned short pin = kpad->gpimap[i].pin; + + if (pin <= GPI_PIN_ROW_END) { + gpi_stat_tmp = gpi_stat1; + pin_loc = pin - GPI_PIN_ROW_BASE; + } else if ((pin - GPI_PIN_COL_BASE) < 8) { + gpi_stat_tmp = gpi_stat2; + pin_loc = pin - GPI_PIN_COL_BASE; + } else { + gpi_stat_tmp = gpi_stat3; + pin_loc = pin - GPI_PIN_COL_BASE - 8; + } + + if (gpi_stat_tmp < 0) { + dev_err(&kpad->client->dev, + "Can't read GPIO_DAT_STAT switch %d default to OFF\n", + pin); + gpi_stat_tmp = 0; + } + + input_report_switch(kpad->input, + kpad->gpimap[i].sw_evt, + !(gpi_stat_tmp & (1 << pin_loc))); + } + + input_sync(kpad->input); +} + + static int __devinit adp5588_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -189,6 +267,37 @@ static int __devinit adp5588_probe(struct i2c_client *client, return -EINVAL; } + if (!pdata->gpimap && pdata->gpimapsize) { + dev_err(&client->dev, "invalid gpimap from pdata\n"); + return -EINVAL; + } + + if (pdata->gpimapsize > ADP5588_GPIMAPSIZE_MAX) { + dev_err(&client->dev, "invalid gpimapsize\n"); + return -EINVAL; + } + + for (i = 0; i < pdata->gpimapsize; i++) { + unsigned short pin = pdata->gpimap[i].pin; + + if (pin < GPI_PIN_BASE || pin > GPI_PIN_END) { + dev_err(&client->dev, "invalid gpi pin data\n"); + return -EINVAL; + } + + if (pin <= GPI_PIN_ROW_END) { + if (pin - GPI_PIN_ROW_BASE + 1 <= pdata->rows) { + dev_err(&client->dev, "invalid gpi row data\n"); + return -EINVAL; + } + } else { + if (pin - GPI_PIN_COL_BASE + 1 <= pdata->cols) { + dev_err(&client->dev, "invalid gpi col data\n"); + return -EINVAL; + } + } + } + if (!client->irq) { dev_err(&client->dev, "no IRQ?\n"); return -EINVAL; @@ -233,6 +342,9 @@ static int __devinit adp5588_probe(struct i2c_client *client, memcpy(kpad->keycode, pdata->keymap, pdata->keymapsize * input->keycodesize); + kpad->gpimap = pdata->gpimap; + kpad->gpimapsize = pdata->gpimapsize; + /* setup input device */ __set_bit(EV_KEY, input->evbit); @@ -243,6 +355,11 @@ static int __devinit adp5588_probe(struct i2c_client *client, __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); __clear_bit(KEY_RESERVED, input->keybit); + if (kpad->gpimapsize) + __set_bit(EV_SW, input->evbit); + for (i = 0; i < kpad->gpimapsize; i++) + __set_bit(kpad->gpimap[i].sw_evt, input->swbit); + error = input_register_device(input); if (error) { dev_err(&client->dev, "unable to register input device\n"); @@ -261,6 +378,9 @@ static int __devinit adp5588_probe(struct i2c_client *client, if (error) goto err_free_irq; + if (kpad->gpimapsize) + adp5588_report_switch_state(kpad); + device_init_wakeup(&client->dev, 1); i2c_set_clientdata(client, kpad); diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h index 02c9af374741..b5f57c498e24 100644 --- a/include/linux/i2c/adp5588.h +++ b/include/linux/i2c/adp5588.h @@ -78,6 +78,40 @@ #define ADP5588_KEYMAPSIZE 80 +#define GPI_PIN_ROW0 97 +#define GPI_PIN_ROW1 98 +#define GPI_PIN_ROW2 99 +#define GPI_PIN_ROW3 100 +#define GPI_PIN_ROW4 101 +#define GPI_PIN_ROW5 102 +#define GPI_PIN_ROW6 103 +#define GPI_PIN_ROW7 104 +#define GPI_PIN_COL0 105 +#define GPI_PIN_COL1 106 +#define GPI_PIN_COL2 107 +#define GPI_PIN_COL3 108 +#define GPI_PIN_COL4 109 +#define GPI_PIN_COL5 110 +#define GPI_PIN_COL6 111 +#define GPI_PIN_COL7 112 +#define GPI_PIN_COL8 113 +#define GPI_PIN_COL9 114 + +#define GPI_PIN_ROW_BASE GPI_PIN_ROW0 +#define GPI_PIN_ROW_END GPI_PIN_ROW7 +#define GPI_PIN_COL_BASE GPI_PIN_COL0 +#define GPI_PIN_COL_END GPI_PIN_COL9 + +#define GPI_PIN_BASE GPI_PIN_ROW_BASE +#define GPI_PIN_END GPI_PIN_COL_END + +#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1) + +struct adp5588_gpi_map { + unsigned short pin; + unsigned short sw_evt; +}; + struct adp5588_kpad_platform_data { int rows; /* Number of rows */ int cols; /* Number of columns */ @@ -87,6 +121,8 @@ struct adp5588_kpad_platform_data { unsigned en_keylock:1; /* Enable Key Lock feature */ unsigned short unlock_key1; /* Unlock Key 1 */ unsigned short unlock_key2; /* Unlock Key 2 */ + const struct adp5588_gpi_map *gpimap; + unsigned short gpimapsize; }; struct adp5588_gpio_platform_data { -- cgit v1.2.3 From e27c729219ad24c8ac9a4b34cf192e56917565c5 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Fri, 25 Jun 2010 08:44:10 -0700 Subject: Input: add driver for ADXL345/346 Digital Accelerometers This is a driver for the ADXL345/346 Three-Axis Digital Accelerometers. Signed-off-by: Michael Hennerich Signed-off-by: Chris Verges Signed-off-by: Luotao Fu Signed-off-by: Barry Song Signed-off-by: Mike Frysinger Signed-off-by: Dmitry Torokhov --- drivers/input/misc/Kconfig | 37 ++ drivers/input/misc/Makefile | 3 + drivers/input/misc/adxl34x-i2c.c | 163 ++++++++ drivers/input/misc/adxl34x-spi.c | 145 +++++++ drivers/input/misc/adxl34x.c | 840 +++++++++++++++++++++++++++++++++++++++ drivers/input/misc/adxl34x.h | 30 ++ include/linux/input/adxl34x.h | 293 ++++++++++++++ 7 files changed, 1511 insertions(+) create mode 100644 drivers/input/misc/adxl34x-i2c.c create mode 100644 drivers/input/misc/adxl34x-spi.c create mode 100644 drivers/input/misc/adxl34x.c create mode 100644 drivers/input/misc/adxl34x.h create mode 100644 include/linux/input/adxl34x.h (limited to 'include') diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index c44b9eafc556..ede6d52fe95c 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -390,4 +390,41 @@ config INPUT_PCAP To compile this driver as a module, choose M here: the module will be called pcap_keys. +config INPUT_ADXL34X + tristate "Analog Devices ADXL34x Three-Axis Digital Accelerometer" + default n + help + Say Y here if you have a Accelerometer interface using the + ADXL345/6 controller, and your board-specific initialization + code includes that in its table of devices. + + This driver can use either I2C or SPI communication to the + ADXL345/6 controller. Select the appropriate method for + your system. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called adxl34x. + +config INPUT_ADXL34X_I2C + tristate "support I2C bus connection" + depends on INPUT_ADXL34X && I2C + default y + help + Say Y here if you have ADXL345/6 hooked to an I2C bus. + + To compile this driver as a module, choose M here: the + module will be called adxl34x-i2c. + +config INPUT_ADXL34X_SPI + tristate "support SPI bus connection" + depends on INPUT_ADXL34X && SPI + default y + help + Say Y here if you have ADXL345/6 hooked to a SPI bus. + + To compile this driver as a module, choose M here: the + module will be called adxl34x-spi. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 71fe57d8023f..97b5dc32df19 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -8,6 +8,9 @@ obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o obj-$(CONFIG_INPUT_AD714X) += ad714x.o obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o +obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o +obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o +obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o obj-$(CONFIG_INPUT_APANEL) += apanel.o obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c new file mode 100644 index 000000000000..76194b58bd0b --- /dev/null +++ b/drivers/input/misc/adxl34x-i2c.c @@ -0,0 +1,163 @@ +/* + * ADLX345/346 Three-Axis Digital Accelerometers (I2C Interface) + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include /* BUS_I2C */ +#include +#include +#include +#include "adxl34x.h" + +static int adxl34x_smbus_read(struct device *dev, unsigned char reg) +{ + struct i2c_client *client = to_i2c_client(dev); + + return i2c_smbus_read_byte_data(client, reg); +} + +static int adxl34x_smbus_write(struct device *dev, + unsigned char reg, unsigned char val) +{ + struct i2c_client *client = to_i2c_client(dev); + + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int adxl34x_smbus_read_block(struct device *dev, + unsigned char reg, int count, + void *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + + return i2c_smbus_read_i2c_block_data(client, reg, count, buf); +} + +static int adxl34x_i2c_read_block(struct device *dev, + unsigned char reg, int count, + void *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + int ret; + + ret = i2c_master_send(client, ®, 1); + if (ret < 0) + return ret; + + ret = i2c_master_recv(client, buf, count); + if (ret < 0) + return ret; + + if (ret != count) + return -EIO; + + return 0; +} + +static const struct adxl34x_bus_ops adx134x_smbus_bops = { + .bustype = BUS_I2C, + .write = adxl34x_smbus_write, + .read = adxl34x_smbus_read, + .read_block = adxl34x_smbus_read_block, +}; + +static const struct adxl34x_bus_ops adx134x_i2c_bops = { + .bustype = BUS_I2C, + .write = adxl34x_smbus_write, + .read = adxl34x_smbus_read, + .read_block = adxl34x_i2c_read_block, +}; + +static int __devinit adxl34x_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct adxl34x *ac; + int error; + + error = i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA); + if (!error) { + dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); + return -EIO; + } + + ac = adxl34x_probe(&client->dev, client->irq, false, + i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_I2C_BLOCK) ? + &adx134x_smbus_bops : &adx134x_i2c_bops); + if (IS_ERR(ac)) + return PTR_ERR(ac); + + i2c_set_clientdata(client, ac); + + return 0; +} + +static int __devexit adxl34x_i2c_remove(struct i2c_client *client) +{ + struct adxl34x *ac = i2c_get_clientdata(client); + + return adxl34x_remove(ac); +} + +#ifdef CONFIG_PM +static int adxl34x_suspend(struct i2c_client *client, pm_message_t message) +{ + struct adxl34x *ac = i2c_get_clientdata(client); + + adxl34x_disable(ac); + + return 0; +} + +static int adxl34x_resume(struct i2c_client *client) +{ + struct adxl34x *ac = i2c_get_clientdata(client); + + adxl34x_enable(ac); + + return 0; +} +#else +# define adxl34x_suspend NULL +# define adxl34x_resume NULL +#endif + +static const struct i2c_device_id adxl34x_id[] = { + { "adxl34x", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, adxl34x_id); + +static struct i2c_driver adxl34x_driver = { + .driver = { + .name = "adxl34x", + .owner = THIS_MODULE, + }, + .probe = adxl34x_i2c_probe, + .remove = __devexit_p(adxl34x_i2c_remove), + .suspend = adxl34x_suspend, + .resume = adxl34x_resume, + .id_table = adxl34x_id, +}; + +static int __init adxl34x_i2c_init(void) +{ + return i2c_add_driver(&adxl34x_driver); +} +module_init(adxl34x_i2c_init); + +static void __exit adxl34x_i2c_exit(void) +{ + i2c_del_driver(&adxl34x_driver); +} +module_exit(adxl34x_i2c_exit); + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer I2C Bus Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c new file mode 100644 index 000000000000..7f992353ffdd --- /dev/null +++ b/drivers/input/misc/adxl34x-spi.c @@ -0,0 +1,145 @@ +/* + * ADLX345/346 Three-Axis Digital Accelerometers (SPI Interface) + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include /* BUS_SPI */ +#include +#include +#include +#include "adxl34x.h" + +#define MAX_SPI_FREQ_HZ 5000000 +#define MAX_FREQ_NO_FIFODELAY 1500000 +#define ADXL34X_CMD_MULTB (1 << 6) +#define ADXL34X_CMD_READ (1 << 7) +#define ADXL34X_WRITECMD(reg) (reg & 0x3F) +#define ADXL34X_READCMD(reg) (ADXL34X_CMD_READ | (reg & 0x3F)) +#define ADXL34X_READMB_CMD(reg) (ADXL34X_CMD_READ | ADXL34X_CMD_MULTB \ + | (reg & 0x3F)) + +static int adxl34x_spi_read(struct device *dev, unsigned char reg) +{ + struct spi_device *spi = to_spi_device(dev); + unsigned char cmd; + + cmd = ADXL34X_READCMD(reg); + + return spi_w8r8(spi, cmd); +} + +static int adxl34x_spi_write(struct device *dev, + unsigned char reg, unsigned char val) +{ + struct spi_device *spi = to_spi_device(dev); + unsigned char buf[2]; + + buf[0] = ADXL34X_WRITECMD(reg); + buf[1] = val; + + return spi_write(spi, buf, sizeof(buf)); +} + +static int adxl34x_spi_read_block(struct device *dev, + unsigned char reg, int count, + void *buf) +{ + struct spi_device *spi = to_spi_device(dev); + ssize_t status; + + reg = ADXL34X_READMB_CMD(reg); + status = spi_write_then_read(spi, ®, 1, buf, count); + + return (status < 0) ? status : 0; +} + +static const struct adxl34x_bus_ops adx134x_spi_bops = { + .bustype = BUS_SPI, + .write = adxl34x_spi_write, + .read = adxl34x_spi_read, + .read_block = adxl34x_spi_read_block, +}; + +static int __devinit adxl34x_spi_probe(struct spi_device *spi) +{ + struct adxl34x *ac; + + /* don't exceed max specified SPI CLK frequency */ + if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { + dev_err(&spi->dev, "SPI CLK %d Hz too fast\n", spi->max_speed_hz); + return -EINVAL; + } + + ac = adxl34x_probe(&spi->dev, spi->irq, + spi->max_speed_hz > MAX_FREQ_NO_FIFODELAY, + &adx134x_spi_bops); + + if (IS_ERR(ac)) + return PTR_ERR(ac); + + spi_set_drvdata(spi, ac); + + return 0; +} + +static int __devexit adxl34x_spi_remove(struct spi_device *spi) +{ + struct adxl34x *ac = dev_get_drvdata(&spi->dev); + + return adxl34x_remove(ac); +} + +#ifdef CONFIG_PM +static int adxl34x_suspend(struct spi_device *spi, pm_message_t message) +{ + struct adxl34x *ac = dev_get_drvdata(&spi->dev); + + adxl34x_disable(ac); + + return 0; +} + +static int adxl34x_resume(struct spi_device *spi) +{ + struct adxl34x *ac = dev_get_drvdata(&spi->dev); + + adxl34x_enable(ac); + + return 0; +} +#else +# define adxl34x_suspend NULL +# define adxl34x_resume NULL +#endif + +static struct spi_driver adxl34x_driver = { + .driver = { + .name = "adxl34x", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = adxl34x_spi_probe, + .remove = __devexit_p(adxl34x_spi_remove), + .suspend = adxl34x_suspend, + .resume = adxl34x_resume, +}; + +static int __init adxl34x_spi_init(void) +{ + return spi_register_driver(&adxl34x_driver); +} +module_init(adxl34x_spi_init); + +static void __exit adxl34x_spi_exit(void) +{ + spi_unregister_driver(&adxl34x_driver); +} +module_exit(adxl34x_spi_exit); + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer SPI Bus Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c new file mode 100644 index 000000000000..07f9ef631540 --- /dev/null +++ b/drivers/input/misc/adxl34x.c @@ -0,0 +1,840 @@ +/* + * ADXL345/346 Three-Axis Digital Accelerometers + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adxl34x.h" + +/* ADXL345/6 Register Map */ +#define DEVID 0x00 /* R Device ID */ +#define THRESH_TAP 0x1D /* R/W Tap threshold */ +#define OFSX 0x1E /* R/W X-axis offset */ +#define OFSY 0x1F /* R/W Y-axis offset */ +#define OFSZ 0x20 /* R/W Z-axis offset */ +#define DUR 0x21 /* R/W Tap duration */ +#define LATENT 0x22 /* R/W Tap latency */ +#define WINDOW 0x23 /* R/W Tap window */ +#define THRESH_ACT 0x24 /* R/W Activity threshold */ +#define THRESH_INACT 0x25 /* R/W Inactivity threshold */ +#define TIME_INACT 0x26 /* R/W Inactivity time */ +#define ACT_INACT_CTL 0x27 /* R/W Axis enable control for activity and */ + /* inactivity detection */ +#define THRESH_FF 0x28 /* R/W Free-fall threshold */ +#define TIME_FF 0x29 /* R/W Free-fall time */ +#define TAP_AXES 0x2A /* R/W Axis control for tap/double tap */ +#define ACT_TAP_STATUS 0x2B /* R Source of tap/double tap */ +#define BW_RATE 0x2C /* R/W Data rate and power mode control */ +#define POWER_CTL 0x2D /* R/W Power saving features control */ +#define INT_ENABLE 0x2E /* R/W Interrupt enable control */ +#define INT_MAP 0x2F /* R/W Interrupt mapping control */ +#define INT_SOURCE 0x30 /* R Source of interrupts */ +#define DATA_FORMAT 0x31 /* R/W Data format control */ +#define DATAX0 0x32 /* R X-Axis Data 0 */ +#define DATAX1 0x33 /* R X-Axis Data 1 */ +#define DATAY0 0x34 /* R Y-Axis Data 0 */ +#define DATAY1 0x35 /* R Y-Axis Data 1 */ +#define DATAZ0 0x36 /* R Z-Axis Data 0 */ +#define DATAZ1 0x37 /* R Z-Axis Data 1 */ +#define FIFO_CTL 0x38 /* R/W FIFO control */ +#define FIFO_STATUS 0x39 /* R FIFO status */ +#define TAP_SIGN 0x3A /* R Sign and source for tap/double tap */ +/* Orientation ADXL346 only */ +#define ORIENT_CONF 0x3B /* R/W Orientation configuration */ +#define ORIENT 0x3C /* R Orientation status */ + +/* DEVIDs */ +#define ID_ADXL345 0xE5 +#define ID_ADXL346 0xE6 + +/* INT_ENABLE/INT_MAP/INT_SOURCE Bits */ +#define DATA_READY (1 << 7) +#define SINGLE_TAP (1 << 6) +#define DOUBLE_TAP (1 << 5) +#define ACTIVITY (1 << 4) +#define INACTIVITY (1 << 3) +#define FREE_FALL (1 << 2) +#define WATERMARK (1 << 1) +#define OVERRUN (1 << 0) + +/* ACT_INACT_CONTROL Bits */ +#define ACT_ACDC (1 << 7) +#define ACT_X_EN (1 << 6) +#define ACT_Y_EN (1 << 5) +#define ACT_Z_EN (1 << 4) +#define INACT_ACDC (1 << 3) +#define INACT_X_EN (1 << 2) +#define INACT_Y_EN (1 << 1) +#define INACT_Z_EN (1 << 0) + +/* TAP_AXES Bits */ +#define SUPPRESS (1 << 3) +#define TAP_X_EN (1 << 2) +#define TAP_Y_EN (1 << 1) +#define TAP_Z_EN (1 << 0) + +/* ACT_TAP_STATUS Bits */ +#define ACT_X_SRC (1 << 6) +#define ACT_Y_SRC (1 << 5) +#define ACT_Z_SRC (1 << 4) +#define ASLEEP (1 << 3) +#define TAP_X_SRC (1 << 2) +#define TAP_Y_SRC (1 << 1) +#define TAP_Z_SRC (1 << 0) + +/* BW_RATE Bits */ +#define LOW_POWER (1 << 4) +#define RATE(x) ((x) & 0xF) + +/* POWER_CTL Bits */ +#define PCTL_LINK (1 << 5) +#define PCTL_AUTO_SLEEP (1 << 4) +#define PCTL_MEASURE (1 << 3) +#define PCTL_SLEEP (1 << 2) +#define PCTL_WAKEUP(x) ((x) & 0x3) + +/* DATA_FORMAT Bits */ +#define SELF_TEST (1 << 7) +#define SPI (1 << 6) +#define INT_INVERT (1 << 5) +#define FULL_RES (1 << 3) +#define JUSTIFY (1 << 2) +#define RANGE(x) ((x) & 0x3) +#define RANGE_PM_2g 0 +#define RANGE_PM_4g 1 +#define RANGE_PM_8g 2 +#define RANGE_PM_16g 3 + +/* + * Maximum value our axis may get in full res mode for the input device + * (signed 13 bits) + */ +#define ADXL_FULLRES_MAX_VAL 4096 + +/* + * Maximum value our axis may get in fixed res mode for the input device + * (signed 10 bits) + */ +#define ADXL_FIXEDRES_MAX_VAL 512 + +/* FIFO_CTL Bits */ +#define FIFO_MODE(x) (((x) & 0x3) << 6) +#define FIFO_BYPASS 0 +#define FIFO_FIFO 1 +#define FIFO_STREAM 2 +#define FIFO_TRIGGER 3 +#define TRIGGER (1 << 5) +#define SAMPLES(x) ((x) & 0x1F) + +/* FIFO_STATUS Bits */ +#define FIFO_TRIG (1 << 7) +#define ENTRIES(x) ((x) & 0x3F) + +/* TAP_SIGN Bits ADXL346 only */ +#define XSIGN (1 << 6) +#define YSIGN (1 << 5) +#define ZSIGN (1 << 4) +#define XTAP (1 << 3) +#define YTAP (1 << 2) +#define ZTAP (1 << 1) + +/* ORIENT_CONF ADXL346 only */ +#define ORIENT_DEADZONE(x) (((x) & 0x7) << 4) +#define ORIENT_DIVISOR(x) ((x) & 0x7) + +/* ORIENT ADXL346 only */ +#define ADXL346_2D_VALID (1 << 6) +#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4) +#define ADXL346_3D_VALID (1 << 3) +#define ADXL346_3D_ORIENT(x) ((x) & 0x7) +#define ADXL346_2D_PORTRAIT_POS 0 /* +X */ +#define ADXL346_2D_PORTRAIT_NEG 1 /* -X */ +#define ADXL346_2D_LANDSCAPE_POS 2 /* +Y */ +#define ADXL346_2D_LANDSCAPE_NEG 3 /* -Y */ + +#define ADXL346_3D_FRONT 3 /* +X */ +#define ADXL346_3D_BACK 4 /* -X */ +#define ADXL346_3D_RIGHT 2 /* +Y */ +#define ADXL346_3D_LEFT 5 /* -Y */ +#define ADXL346_3D_TOP 1 /* +Z */ +#define ADXL346_3D_BOTTOM 6 /* -Z */ + +#undef ADXL_DEBUG + +#define ADXL_X_AXIS 0 +#define ADXL_Y_AXIS 1 +#define ADXL_Z_AXIS 2 + +#define AC_READ(ac, reg) ((ac)->bops->read((ac)->dev, reg)) +#define AC_WRITE(ac, reg, val) ((ac)->bops->write((ac)->dev, reg, val)) + +struct axis_triple { + int x; + int y; + int z; +}; + +struct adxl34x { + struct device *dev; + struct input_dev *input; + struct mutex mutex; /* reentrant protection for struct */ + struct adxl34x_platform_data pdata; + struct axis_triple swcal; + struct axis_triple hwcal; + struct axis_triple saved; + char phys[32]; + bool disabled; /* P: mutex */ + bool opened; /* P: mutex */ + bool fifo_delay; + int irq; + unsigned model; + unsigned int_mask; + + const struct adxl34x_bus_ops *bops; +}; + +static const struct adxl34x_platform_data adxl34x_default_init = { + .tap_threshold = 35, + .tap_duration = 3, + .tap_latency = 20, + .tap_window = 20, + .tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN, + .act_axis_control = 0xFF, + .activity_threshold = 6, + .inactivity_threshold = 4, + .inactivity_time = 3, + .free_fall_threshold = 8, + .free_fall_time = 0x20, + .data_rate = 8, + .data_range = ADXL_FULL_RES, + + .ev_type = EV_ABS, + .ev_code_x = ABS_X, /* EV_REL */ + .ev_code_y = ABS_Y, /* EV_REL */ + .ev_code_z = ABS_Z, /* EV_REL */ + + .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */ + .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, + .fifo_mode = FIFO_STREAM, + .watermark = 0, +}; + +static void adxl34x_get_triple(struct adxl34x *ac, struct axis_triple *axis) +{ + short buf[3]; + + ac->bops->read_block(ac->dev, DATAX0, DATAZ1 - DATAX0 + 1, buf); + + mutex_lock(&ac->mutex); + ac->saved.x = (s16) le16_to_cpu(buf[0]); + axis->x = ac->saved.x; + + ac->saved.y = (s16) le16_to_cpu(buf[1]); + axis->y = ac->saved.y; + + ac->saved.z = (s16) le16_to_cpu(buf[2]); + axis->z = ac->saved.z; + mutex_unlock(&ac->mutex); +} + +static void adxl34x_service_ev_fifo(struct adxl34x *ac) +{ + struct adxl34x_platform_data *pdata = &ac->pdata; + struct axis_triple axis; + + adxl34x_get_triple(ac, &axis); + + input_event(ac->input, pdata->ev_type, pdata->ev_code_x, + axis.x - ac->swcal.x); + input_event(ac->input, pdata->ev_type, pdata->ev_code_y, + axis.y - ac->swcal.y); + input_event(ac->input, pdata->ev_type, pdata->ev_code_z, + axis.z - ac->swcal.z); +} + +static void adxl34x_report_key_single(struct input_dev *input, int key) +{ + input_report_key(input, key, true); + input_sync(input); + input_report_key(input, key, false); +} + +static void adxl34x_send_key_events(struct adxl34x *ac, + struct adxl34x_platform_data *pdata, int status, int press) +{ + int i; + + for (i = ADXL_X_AXIS; i <= ADXL_Z_AXIS; i++) { + if (status & (1 << (ADXL_Z_AXIS - i))) + input_report_key(ac->input, + pdata->ev_code_tap[i], press); + } +} + +static void adxl34x_do_tap(struct adxl34x *ac, + struct adxl34x_platform_data *pdata, int status) +{ + adxl34x_send_key_events(ac, pdata, status, true); + input_sync(ac->input); + adxl34x_send_key_events(ac, pdata, status, false); +} + +static irqreturn_t adxl34x_irq(int irq, void *handle) +{ + struct adxl34x *ac = handle; + struct adxl34x_platform_data *pdata = &ac->pdata; + int int_stat, tap_stat, samples; + + /* + * ACT_TAP_STATUS should be read before clearing the interrupt + * Avoid reading ACT_TAP_STATUS in case TAP detection is disabled + */ + + if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN)) + tap_stat = AC_READ(ac, ACT_TAP_STATUS); + else + tap_stat = 0; + + int_stat = AC_READ(ac, INT_SOURCE); + + if (int_stat & FREE_FALL) + adxl34x_report_key_single(ac->input, pdata->ev_code_ff); + + if (int_stat & OVERRUN) + dev_dbg(ac->dev, "OVERRUN\n"); + + if (int_stat & (SINGLE_TAP | DOUBLE_TAP)) { + adxl34x_do_tap(ac, pdata, tap_stat); + + if (int_stat & DOUBLE_TAP) + adxl34x_do_tap(ac, pdata, tap_stat); + } + + if (pdata->ev_code_act_inactivity) { + if (int_stat & ACTIVITY) + input_report_key(ac->input, + pdata->ev_code_act_inactivity, 1); + if (int_stat & INACTIVITY) + input_report_key(ac->input, + pdata->ev_code_act_inactivity, 0); + } + + if (int_stat & (DATA_READY | WATERMARK)) { + + if (pdata->fifo_mode) + samples = ENTRIES(AC_READ(ac, FIFO_STATUS)) + 1; + else + samples = 1; + + for (; samples > 0; samples--) { + adxl34x_service_ev_fifo(ac); + /* + * To ensure that the FIFO has + * completely popped, there must be at least 5 us between + * the end of reading the data registers, signified by the + * transition to register 0x38 from 0x37 or the CS pin + * going high, and the start of new reads of the FIFO or + * reading the FIFO_STATUS register. For SPI operation at + * 1.5 MHz or lower, the register addressing portion of the + * transmission is sufficient delay to ensure the FIFO has + * completely popped. It is necessary for SPI operation + * greater than 1.5 MHz to de-assert the CS pin to ensure a + * total of 5 us, which is at most 3.4 us at 5 MHz + * operation. + */ + if (ac->fifo_delay && (samples > 1)) + udelay(3); + } + } + + input_sync(ac->input); + + return IRQ_HANDLED; +} + +static void __adxl34x_disable(struct adxl34x *ac) +{ + if (!ac->disabled && ac->opened) { + /* + * A '0' places the ADXL34x into standby mode + * with minimum power consumption. + */ + AC_WRITE(ac, POWER_CTL, 0); + + ac->disabled = true; + } +} + +static void __adxl34x_enable(struct adxl34x *ac) +{ + if (ac->disabled && ac->opened) { + AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE); + ac->disabled = false; + } +} + +void adxl34x_disable(struct adxl34x *ac) +{ + mutex_lock(&ac->mutex); + __adxl34x_disable(ac); + mutex_unlock(&ac->mutex); +} +EXPORT_SYMBOL_GPL(adxl34x_disable); + +void adxl34x_enable(struct adxl34x *ac) +{ + mutex_lock(&ac->mutex); + __adxl34x_enable(ac); + mutex_unlock(&ac->mutex); +} + +EXPORT_SYMBOL_GPL(adxl34x_enable); + +static ssize_t adxl34x_disable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ac->disabled); +} + +static ssize_t adxl34x_disable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + if (val) + adxl34x_disable(ac); + else + adxl34x_enable(ac); + + return count; +} + +static DEVICE_ATTR(disable, 0664, adxl34x_disable_show, adxl34x_disable_store); + +static ssize_t adxl34x_calibrate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + ssize_t count; + + mutex_lock(&ac->mutex); + count = sprintf(buf, "%d,%d,%d\n", + ac->hwcal.x * 4 + ac->swcal.x, + ac->hwcal.y * 4 + ac->swcal.y, + ac->hwcal.z * 4 + ac->swcal.z); + mutex_unlock(&ac->mutex); + + return count; +} + +static ssize_t adxl34x_calibrate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + + /* + * Hardware offset calibration has a resolution of 15.6 mg/LSB. + * We use HW calibration and handle the remaining bits in SW. (4mg/LSB) + */ + + mutex_lock(&ac->mutex); + ac->hwcal.x -= (ac->saved.x / 4); + ac->swcal.x = ac->saved.x % 4; + + ac->hwcal.y -= (ac->saved.y / 4); + ac->swcal.y = ac->saved.y % 4; + + ac->hwcal.z -= (ac->saved.z / 4); + ac->swcal.z = ac->saved.z % 4; + + AC_WRITE(ac, OFSX, (s8) ac->hwcal.x); + AC_WRITE(ac, OFSY, (s8) ac->hwcal.y); + AC_WRITE(ac, OFSZ, (s8) ac->hwcal.z); + mutex_unlock(&ac->mutex); + + return count; +} + +static DEVICE_ATTR(calibrate, 0664, + adxl34x_calibrate_show, adxl34x_calibrate_store); + +static ssize_t adxl34x_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", RATE(ac->pdata.data_rate)); +} + +static ssize_t adxl34x_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ac->mutex); + + ac->pdata.data_rate = RATE(val); + AC_WRITE(ac, BW_RATE, + ac->pdata.data_rate | + (ac->pdata.low_power_mode ? LOW_POWER : 0)); + + mutex_unlock(&ac->mutex); + + return count; +} + +static DEVICE_ATTR(rate, 0664, adxl34x_rate_show, adxl34x_rate_store); + +static ssize_t adxl34x_autosleep_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", + ac->pdata.power_mode & (PCTL_AUTO_SLEEP | PCTL_LINK) ? 1 : 0); +} + +static ssize_t adxl34x_autosleep_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ac->mutex); + + if (val) + ac->pdata.power_mode |= (PCTL_AUTO_SLEEP | PCTL_LINK); + else + ac->pdata.power_mode &= ~(PCTL_AUTO_SLEEP | PCTL_LINK); + + if (!ac->disabled && ac->opened) + AC_WRITE(ac, POWER_CTL, ac->pdata.power_mode | PCTL_MEASURE); + + mutex_unlock(&ac->mutex); + + return count; +} + +static DEVICE_ATTR(autosleep, 0664, + adxl34x_autosleep_show, adxl34x_autosleep_store); + +static ssize_t adxl34x_position_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + ssize_t count; + + mutex_lock(&ac->mutex); + count = sprintf(buf, "(%d, %d, %d)\n", + ac->saved.x, ac->saved.y, ac->saved.z); + mutex_unlock(&ac->mutex); + + return count; +} + +static DEVICE_ATTR(position, S_IRUGO, adxl34x_position_show, NULL); + +#ifdef ADXL_DEBUG +static ssize_t adxl34x_write_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adxl34x *ac = dev_get_drvdata(dev); + unsigned long val; + int error; + + /* + * This allows basic ADXL register write access for debug purposes. + */ + error = strict_strtoul(buf, 16, &val); + if (error) + return error; + + mutex_lock(&ac->mutex); + AC_WRITE(ac, val >> 8, val & 0xFF); + mutex_unlock(&ac->mutex); + + return count; +} + +static DEVICE_ATTR(write, 0664, NULL, adxl34x_write_store); +#endif + +static struct attribute *adxl34x_attributes[] = { + &dev_attr_disable.attr, + &dev_attr_calibrate.attr, + &dev_attr_rate.attr, + &dev_attr_autosleep.attr, + &dev_attr_position.attr, +#ifdef ADXL_DEBUG + &dev_attr_write.attr, +#endif + NULL +}; + +static const struct attribute_group adxl34x_attr_group = { + .attrs = adxl34x_attributes, +}; + +static int adxl34x_input_open(struct input_dev *input) +{ + struct adxl34x *ac = input_get_drvdata(input); + + mutex_lock(&ac->mutex); + ac->opened = true; + __adxl34x_enable(ac); + mutex_unlock(&ac->mutex); + + return 0; +} + +static void adxl34x_input_close(struct input_dev *input) +{ + struct adxl34x *ac = input_get_drvdata(input); + + mutex_lock(&ac->mutex); + __adxl34x_disable(ac); + ac->opened = false; + mutex_unlock(&ac->mutex); +} + +struct adxl34x *adxl34x_probe(struct device *dev, int irq, + bool fifo_delay_default, + const struct adxl34x_bus_ops *bops) +{ + struct adxl34x *ac; + struct input_dev *input_dev; + const struct adxl34x_platform_data *pdata; + int err, range; + unsigned char revid; + + if (!irq) { + dev_err(dev, "no IRQ?\n"); + err = -ENODEV; + goto err_out; + } + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!ac || !input_dev) { + err = -ENOMEM; + goto err_out; + } + + ac->fifo_delay = fifo_delay_default; + + pdata = dev->platform_data; + if (!pdata) { + dev_dbg(dev, + "No platfrom data: Using default initialization\n"); + pdata = &adxl34x_default_init; + } + + ac->pdata = *pdata; + pdata = &ac->pdata; + + ac->input = input_dev; + ac->disabled = true; + ac->dev = dev; + ac->irq = irq; + ac->bops = bops; + + mutex_init(&ac->mutex); + + input_dev->name = "ADXL34x accelerometer"; + revid = ac->bops->read(dev, DEVID); + + switch (revid) { + case ID_ADXL345: + ac->model = 345; + break; + case ID_ADXL346: + ac->model = 346; + break; + default: + dev_err(dev, "Failed to probe %s\n", input_dev->name); + err = -ENODEV; + goto err_free_mem; + } + + snprintf(ac->phys, sizeof(ac->phys), "%s/input0", dev_name(dev)); + + input_dev->phys = ac->phys; + input_dev->dev.parent = dev; + input_dev->id.product = ac->model; + input_dev->id.bustype = bops->bustype; + input_dev->open = adxl34x_input_open; + input_dev->close = adxl34x_input_close; + + input_set_drvdata(input_dev, ac); + + __set_bit(ac->pdata.ev_type, input_dev->evbit); + + if (ac->pdata.ev_type == EV_REL) { + __set_bit(REL_X, input_dev->relbit); + __set_bit(REL_Y, input_dev->relbit); + __set_bit(REL_Z, input_dev->relbit); + } else { + /* EV_ABS */ + __set_bit(ABS_X, input_dev->absbit); + __set_bit(ABS_Y, input_dev->absbit); + __set_bit(ABS_Z, input_dev->absbit); + + if (pdata->data_range & FULL_RES) + range = ADXL_FULLRES_MAX_VAL; /* Signed 13-bit */ + else + range = ADXL_FIXEDRES_MAX_VAL; /* Signed 10-bit */ + + input_set_abs_params(input_dev, ABS_X, -range, range, 3, 3); + input_set_abs_params(input_dev, ABS_Y, -range, range, 3, 3); + input_set_abs_params(input_dev, ABS_Z, -range, range, 3, 3); + } + + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(pdata->ev_code_tap[ADXL_X_AXIS], input_dev->keybit); + __set_bit(pdata->ev_code_tap[ADXL_Y_AXIS], input_dev->keybit); + __set_bit(pdata->ev_code_tap[ADXL_Z_AXIS], input_dev->keybit); + + if (pdata->ev_code_ff) { + ac->int_mask = FREE_FALL; + __set_bit(pdata->ev_code_ff, input_dev->keybit); + } + + if (pdata->ev_code_act_inactivity) + __set_bit(pdata->ev_code_act_inactivity, input_dev->keybit); + + ac->int_mask |= ACTIVITY | INACTIVITY; + + if (pdata->watermark) { + ac->int_mask |= WATERMARK; + if (!FIFO_MODE(pdata->fifo_mode)) + ac->pdata.fifo_mode |= FIFO_STREAM; + } else { + ac->int_mask |= DATA_READY; + } + + if (pdata->tap_axis_control & (TAP_X_EN | TAP_Y_EN | TAP_Z_EN)) + ac->int_mask |= SINGLE_TAP | DOUBLE_TAP; + + if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS) + ac->fifo_delay = false; + + ac->bops->write(dev, POWER_CTL, 0); + + err = request_threaded_irq(ac->irq, NULL, adxl34x_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + dev_name(dev), ac); + if (err) { + dev_err(dev, "irq %d busy?\n", ac->irq); + goto err_free_mem; + } + + err = sysfs_create_group(&dev->kobj, &adxl34x_attr_group); + if (err) + goto err_free_irq; + + err = input_register_device(input_dev); + if (err) + goto err_remove_attr; + + AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold); + AC_WRITE(ac, OFSX, pdata->x_axis_offset); + ac->hwcal.x = pdata->x_axis_offset; + AC_WRITE(ac, OFSY, pdata->y_axis_offset); + ac->hwcal.y = pdata->y_axis_offset; + AC_WRITE(ac, OFSZ, pdata->z_axis_offset); + ac->hwcal.z = pdata->z_axis_offset; + AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold); + AC_WRITE(ac, DUR, pdata->tap_duration); + AC_WRITE(ac, LATENT, pdata->tap_latency); + AC_WRITE(ac, WINDOW, pdata->tap_window); + AC_WRITE(ac, THRESH_ACT, pdata->activity_threshold); + AC_WRITE(ac, THRESH_INACT, pdata->inactivity_threshold); + AC_WRITE(ac, TIME_INACT, pdata->inactivity_time); + AC_WRITE(ac, THRESH_FF, pdata->free_fall_threshold); + AC_WRITE(ac, TIME_FF, pdata->free_fall_time); + AC_WRITE(ac, TAP_AXES, pdata->tap_axis_control); + AC_WRITE(ac, ACT_INACT_CTL, pdata->act_axis_control); + AC_WRITE(ac, BW_RATE, RATE(ac->pdata.data_rate) | + (pdata->low_power_mode ? LOW_POWER : 0)); + AC_WRITE(ac, DATA_FORMAT, pdata->data_range); + AC_WRITE(ac, FIFO_CTL, FIFO_MODE(pdata->fifo_mode) | + SAMPLES(pdata->watermark)); + + if (pdata->use_int2) + /* Map all INTs to INT2 */ + AC_WRITE(ac, INT_MAP, ac->int_mask | OVERRUN); + else + /* Map all INTs to INT1 */ + AC_WRITE(ac, INT_MAP, 0); + + AC_WRITE(ac, INT_ENABLE, ac->int_mask | OVERRUN); + + ac->pdata.power_mode &= (PCTL_AUTO_SLEEP | PCTL_LINK); + + return ac; + + err_remove_attr: + sysfs_remove_group(&dev->kobj, &adxl34x_attr_group); + err_free_irq: + free_irq(ac->irq, ac); + err_free_mem: + input_free_device(input_dev); + kfree(ac); + err_out: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(adxl34x_probe); + +int adxl34x_remove(struct adxl34x *ac) +{ + adxl34x_disable(ac); + sysfs_remove_group(&ac->dev->kobj, &adxl34x_attr_group); + free_irq(ac->irq, ac); + input_unregister_device(ac->input); + kfree(ac); + + dev_dbg(ac->dev, "unregistered accelerometer\n"); + return 0; +} +EXPORT_SYMBOL_GPL(adxl34x_remove); + +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADXL345/346 Three-Axis Digital Accelerometer Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/adxl34x.h b/drivers/input/misc/adxl34x.h new file mode 100644 index 000000000000..ea9093c15c81 --- /dev/null +++ b/drivers/input/misc/adxl34x.h @@ -0,0 +1,30 @@ +/* + * ADXL345/346 Three-Axis Digital Accelerometers (I2C/SPI Interface) + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (C) 2009 Michael Hennerich, Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#ifndef _ADXL34X_H_ +#define _ADXL34X_H_ + +struct device; +struct adxl34x; + +struct adxl34x_bus_ops { + u16 bustype; + int (*read)(struct device *, unsigned char); + int (*read_block)(struct device *, unsigned char, int, void *); + int (*write)(struct device *, unsigned char, unsigned char); +}; + +void adxl34x_disable(struct adxl34x *ac); +void adxl34x_enable(struct adxl34x *ac); +struct adxl34x *adxl34x_probe(struct device *dev, int irq, + bool fifo_delay_default, + const struct adxl34x_bus_ops *bops); +int adxl34x_remove(struct adxl34x *ac); + +#endif diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h new file mode 100644 index 000000000000..712118238038 --- /dev/null +++ b/include/linux/input/adxl34x.h @@ -0,0 +1,293 @@ +/* + * include/linux/input/adxl34x.h + * + * Digital Accelerometer characteristics are highly application specific + * and may vary between boards and models. The platform_data for the + * device's "struct device" holds this information. + * + * Copyright 2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_INPUT_ADXL34X_H__ +#define __LINUX_INPUT_ADXL34X_H__ + +struct adxl34x_platform_data { + + /* + * X,Y,Z Axis Offset: + * offer user offset adjustments in twoscompliment + * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g) + */ + + s8 x_axis_offset; + s8 y_axis_offset; + s8 z_axis_offset; + + /* + * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X, + * Y, or Z participation in Tap detection. A '0' excludes the + * selected axis from participation in Tap detection. + * Setting the SUPPRESS bit suppresses Double Tap detection if + * acceleration greater than tap_threshold is present between + * taps. + */ + +#define ADXL_SUPPRESS (1 << 3) +#define ADXL_TAP_X_EN (1 << 2) +#define ADXL_TAP_Y_EN (1 << 1) +#define ADXL_TAP_Z_EN (1 << 0) + + u8 tap_axis_control; + + /* + * tap_threshold: + * holds the threshold value for tap detection/interrupts. + * The data format is unsigned. The scale factor is 62.5 mg/LSB + * (i.e. 0xFF = +16 g). A zero value may result in undesirable + * behavior if Tap/Double Tap is enabled. + */ + + u8 tap_threshold; + + /* + * tap_duration: + * is an unsigned time value representing the maximum + * time that an event must be above the tap_threshold threshold + * to qualify as a tap event. The scale factor is 625 us/LSB. A zero + * value will prevent Tap/Double Tap functions from working. + */ + + u8 tap_duration; + + /* + * tap_latency: + * is an unsigned time value representing the wait time + * from the detection of a tap event to the opening of the time + * window tap_window for a possible second tap event. The scale + * factor is 1.25 ms/LSB. A zero value will disable the Double Tap + * function. + */ + + u8 tap_latency; + + /* + * tap_window: + * is an unsigned time value representing the amount + * of time after the expiration of tap_latency during which a second + * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will + * disable the Double Tap function. + */ + + u8 tap_window; + + /* + * act_axis_control: + * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity + * or inactivity detection. A '0' excludes the selected axis from + * participation. If all of the axes are excluded, the function is + * disabled. + * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled + * operation. In DC coupled operation, the current acceleration is + * compared with activity_threshold and inactivity_threshold directly + * to determine whether activity or inactivity is detected. In AC + * coupled operation for activity detection, the acceleration value + * at the start of activity detection is taken as a reference value. + * New samples of acceleration are then compared to this + * reference value and if the magnitude of the difference exceeds + * activity_threshold the device will trigger an activity interrupt. In + * AC coupled operation for inactivity detection, a reference value + * is used again for comparison and is updated whenever the + * device exceeds the inactivity threshold. Once the reference + * value is selected, the device compares the magnitude of the + * difference between the reference value and the current + * acceleration with inactivity_threshold. If the difference is below + * inactivity_threshold for a total of inactivity_time, the device is + * considered inactive and the inactivity interrupt is triggered. + */ + +#define ADXL_ACT_ACDC (1 << 7) +#define ADXL_ACT_X_EN (1 << 6) +#define ADXL_ACT_Y_EN (1 << 5) +#define ADXL_ACT_Z_EN (1 << 4) +#define ADXL_INACT_ACDC (1 << 3) +#define ADXL_INACT_X_EN (1 << 2) +#define ADXL_INACT_Y_EN (1 << 1) +#define ADXL_INACT_Z_EN (1 << 0) + + u8 act_axis_control; + + /* + * activity_threshold: + * holds the threshold value for activity detection. + * The data format is unsigned. The scale factor is + * 62.5 mg/LSB. A zero value may result in undesirable behavior if + * Activity interrupt is enabled. + */ + + u8 activity_threshold; + + /* + * inactivity_threshold: + * holds the threshold value for inactivity + * detection. The data format is unsigned. The scale + * factor is 62.5 mg/LSB. A zero value may result in undesirable + * behavior if Inactivity interrupt is enabled. + */ + + u8 inactivity_threshold; + + /* + * inactivity_time: + * is an unsigned time value representing the + * amount of time that acceleration must be below the value in + * inactivity_threshold for inactivity to be declared. The scale factor + * is 1 second/LSB. Unlike the other interrupt functions, which + * operate on unfiltered data, the inactivity function operates on the + * filtered output data. At least one output sample must be + * generated for the inactivity interrupt to be triggered. This will + * result in the function appearing un-responsive if the + * inactivity_time register is set with a value less than the time + * constant of the Output Data Rate. A zero value will result in an + * interrupt when the output data is below inactivity_threshold. + */ + + u8 inactivity_time; + + /* + * free_fall_threshold: + * holds the threshold value for Free-Fall detection. + * The data format is unsigned. The root-sum-square(RSS) value + * of all axes is calculated and compared to the value in + * free_fall_threshold to determine if a free fall event may be + * occurring. The scale factor is 62.5 mg/LSB. A zero value may + * result in undesirable behavior if Free-Fall interrupt is + * enabled. Values between 300 and 600 mg (0x05 to 0x09) are + * recommended. + */ + + u8 free_fall_threshold; + + /* + * free_fall_time: + * is an unsigned time value representing the minimum + * time that the RSS value of all axes must be less than + * free_fall_threshold to generate a Free-Fall interrupt. The + * scale factor is 5 ms/LSB. A zero value may result in + * undesirable behavior if Free-Fall interrupt is enabled. + * Values between 100 to 350 ms (0x14 to 0x46) are recommended. + */ + + u8 free_fall_time; + + /* + * data_rate: + * Selects device bandwidth and output data rate. + * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz + * Output Data Rate. An Output Data Rate should be selected that + * is appropriate for the communication protocol and frequency + * selected. Selecting too high of an Output Data Rate with a low + * communication speed will result in samples being discarded. + */ + + u8 data_rate; + + /* + * data_range: + * FULL_RES: When this bit is set with the device is + * in Full-Resolution Mode, where the output resolution increases + * with RANGE to maintain a 4 mg/LSB scale factor. When this + * bit is cleared the device is in 10-bit Mode and RANGE determine the + * maximum g-Range and scale factor. + */ + +#define ADXL_FULL_RES (1 << 3) +#define ADXL_RANGE_PM_2g 0 +#define ADXL_RANGE_PM_4g 1 +#define ADXL_RANGE_PM_8g 2 +#define ADXL_RANGE_PM_16g 3 + + u8 data_range; + + /* + * low_power_mode: + * A '0' = Normal operation and a '1' = Reduced + * power operation with somewhat higher noise. + */ + + u8 low_power_mode; + + /* + * power_mode: + * LINK: A '1' with both the activity and inactivity functions + * enabled will delay the start of the activity function until + * inactivity is detected. Once activity is detected, inactivity + * detection will begin and prevent the detection of activity. This + * bit serially links the activity and inactivity functions. When '0' + * the inactivity and activity functions are concurrent. Additional + * information can be found in the Application section under Link + * Mode. + * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode + * when inactivity (acceleration has been below inactivity_threshold + * for at least inactivity_time) is detected and the LINK bit is set. + * A '0' disables automatic switching to Sleep Mode. See SLEEP + * for further description. + */ + +#define ADXL_LINK (1 << 5) +#define ADXL_AUTO_SLEEP (1 << 4) + + u8 power_mode; + + /* + * fifo_mode: + * BYPASS The FIFO is bypassed + * FIFO FIFO collects up to 32 values then stops collecting data + * STREAM FIFO holds the last 32 data values. Once full, the FIFO's + * oldest data is lost as it is replaced with newer data + * + * DEFAULT should be ADXL_FIFO_STREAM + */ + +#define ADXL_FIFO_BYPASS 0 +#define ADXL_FIFO_FIFO 1 +#define ADXL_FIFO_STREAM 2 + + u8 fifo_mode; + + /* + * watermark: + * The Watermark feature can be used to reduce the interrupt load + * of the system. The FIFO fills up to the value stored in watermark + * [1..32] and then generates an interrupt. + * A '0' disables the watermark feature. + */ + + u8 watermark; + + u32 ev_type; /* EV_ABS or EV_REL */ + + u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */ + u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */ + + /* + * A valid BTN or KEY Code; use tap_axis_control to disable + * event reporting + */ + + u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */ + + /* + * A valid BTN or KEY Code for Free-Fall or Activity enables + * input event reporting. A '0' disables the Free-Fall or + * Activity reporting. + */ + + u32 ev_code_ff; /* EV_KEY */ + u32 ev_code_act_inactivity; /* EV_KEY */ + + u8 use_int2; +}; +#endif -- cgit v1.2.3 From 671386bb23c57e5448f386a41101ed65ad1d488c Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Fri, 25 Jun 2010 08:44:10 -0700 Subject: Input: adxl34x - add support for ADXL346 orientation sensing Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger Signed-off-by: Dmitry Torokhov --- drivers/input/misc/adxl34x.c | 62 ++++++++++++++++++++++++++++++++++++++++--- include/linux/input/adxl34x.h | 56 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c index 07f9ef631540..77fb40987059 100644 --- a/drivers/input/misc/adxl34x.c +++ b/drivers/input/misc/adxl34x.c @@ -196,6 +196,8 @@ struct adxl34x { struct axis_triple hwcal; struct axis_triple saved; char phys[32]; + unsigned orient2d_saved; + unsigned orient3d_saved; bool disabled; /* P: mutex */ bool opened; /* P: mutex */ bool fifo_delay; @@ -296,7 +298,7 @@ static irqreturn_t adxl34x_irq(int irq, void *handle) { struct adxl34x *ac = handle; struct adxl34x_platform_data *pdata = &ac->pdata; - int int_stat, tap_stat, samples; + int int_stat, tap_stat, samples, orient, orient_code; /* * ACT_TAP_STATUS should be read before clearing the interrupt @@ -332,6 +334,36 @@ static irqreturn_t adxl34x_irq(int irq, void *handle) pdata->ev_code_act_inactivity, 0); } + /* + * ORIENTATION SENSING ADXL346 only + */ + if (pdata->orientation_enable) { + orient = AC_READ(ac, ORIENT); + if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) && + (orient & ADXL346_2D_VALID)) { + + orient_code = ADXL346_2D_ORIENT(orient); + /* Report orientation only when it changes */ + if (ac->orient2d_saved != orient_code) { + ac->orient2d_saved = orient_code; + adxl34x_report_key_single(ac->input, + pdata->ev_codes_orient_2d[orient_code]); + } + } + + if ((pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) && + (orient & ADXL346_3D_VALID)) { + + orient_code = ADXL346_3D_ORIENT(orient) - 1; + /* Report orientation only when it changes */ + if (ac->orient3d_saved != orient_code) { + ac->orient3d_saved = orient_code; + adxl34x_report_key_single(ac->input, + pdata->ev_codes_orient_3d[orient_code]); + } + } + } + if (int_stat & (DATA_READY | WATERMARK)) { if (pdata->fifo_mode) @@ -641,7 +673,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, struct adxl34x *ac; struct input_dev *input_dev; const struct adxl34x_platform_data *pdata; - int err, range; + int err, range, i; unsigned char revid; if (!irq) { @@ -797,12 +829,34 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, AC_WRITE(ac, FIFO_CTL, FIFO_MODE(pdata->fifo_mode) | SAMPLES(pdata->watermark)); - if (pdata->use_int2) + if (pdata->use_int2) { /* Map all INTs to INT2 */ AC_WRITE(ac, INT_MAP, ac->int_mask | OVERRUN); - else + } else { /* Map all INTs to INT1 */ AC_WRITE(ac, INT_MAP, 0); + } + + if (ac->model == 346 && ac->pdata.orientation_enable) { + AC_WRITE(ac, ORIENT_CONF, + ORIENT_DEADZONE(ac->pdata.deadzone_angle) | + ORIENT_DIVISOR(ac->pdata.divisor_length)); + + ac->orient2d_saved = 1234; + ac->orient3d_saved = 1234; + + if (pdata->orientation_enable & ADXL_EN_ORIENTATION_3D) + for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_3d); i++) + __set_bit(pdata->ev_codes_orient_3d[i], + input_dev->keybit); + + if (pdata->orientation_enable & ADXL_EN_ORIENTATION_2D) + for (i = 0; i < ARRAY_SIZE(pdata->ev_codes_orient_2d); i++) + __set_bit(pdata->ev_codes_orient_2d[i], + input_dev->keybit); + } else { + ac->pdata.orientation_enable = 0; + } AC_WRITE(ac, INT_ENABLE, ac->int_mask | OVERRUN); diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h index 712118238038..df00d998a44a 100644 --- a/include/linux/input/adxl34x.h +++ b/include/linux/input/adxl34x.h @@ -288,6 +288,62 @@ struct adxl34x_platform_data { u32 ev_code_ff; /* EV_KEY */ u32 ev_code_act_inactivity; /* EV_KEY */ + /* + * Use ADXL34x INT2 instead of INT1 + */ u8 use_int2; + + /* + * ADXL346 only ORIENTATION SENSING feature + * The orientation function of the ADXL346 reports both 2-D and + * 3-D orientation concurrently. + */ + +#define ADXL_EN_ORIENTATION_2D 1 +#define ADXL_EN_ORIENTATION_3D 2 +#define ADXL_EN_ORIENTATION_2D_3D 3 + + u8 orientation_enable; + + /* + * The width of the deadzone region between two or more + * orientation positions is determined by setting the Deadzone + * value. The deadzone region size can be specified with a + * resolution of 3.6deg. The deadzone angle represents the total + * angle where the orientation is considered invalid. + */ + +#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */ +#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */ +#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */ +#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */ +#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */ +#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */ + + u8 deadzone_angle; + + /* + * To eliminate most human motion such as walking or shaking, + * a Divisor value should be selected to effectively limit the + * orientation bandwidth. Set the depth of the filter used to + * low-pass filter the measured acceleration for stable + * orientation sensing + */ + +#define ADXL_LP_FILTER_DIVISOR_2 0 +#define ADXL_LP_FILTER_DIVISOR_4 1 +#define ADXL_LP_FILTER_DIVISOR_8 2 +#define ADXL_LP_FILTER_DIVISOR_16 3 +#define ADXL_LP_FILTER_DIVISOR_32 4 +#define ADXL_LP_FILTER_DIVISOR_64 5 +#define ADXL_LP_FILTER_DIVISOR_128 6 +#define ADXL_LP_FILTER_DIVISOR_256 7 + + u8 divisor_length; + + u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */ + u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ }; #endif -- cgit v1.2.3 From 4ba6ce250e406b20bcd6f0f3aed6b3d80965e6c2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 27 Jun 2010 18:49:59 +0200 Subject: percpu: make @dyn_size always mean min dyn_size in first chunk init functions In pcpu_build_alloc_info() and pcpu_embed_first_chunk(), @dyn_size was ssize_t, -1 meant auto-size, 0 forced 0 and positive meant minimum size. There's no use case for forcing 0 and the upcoming early alloc support always requires non-zero dynamic size. Make @dyn_size always mean minimum dyn_size. While at it, make pcpu_build_alloc_info() static which doesn't have any external caller as suggested by David Rientjes. Signed-off-by: Tejun Heo Cc: David Rientjes --- include/linux/percpu.h | 7 +------ mm/percpu.c | 35 ++++++++++------------------------- 2 files changed, 11 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index d3a38d687104..3ffd05e550de 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -104,16 +104,11 @@ extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units); extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); -extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( - size_t reserved_size, ssize_t dyn_size, - size_t atom_size, - pcpu_fc_cpu_distance_fn_t cpu_distance_fn); - extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr); #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK -extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, +extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, diff --git a/mm/percpu.c b/mm/percpu.c index 6470e7710231..c3e7010c6d71 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1013,20 +1013,6 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) return page_to_phys(pcpu_addr_to_page(addr)); } -static inline size_t pcpu_calc_fc_sizes(size_t static_size, - size_t reserved_size, - ssize_t *dyn_sizep) -{ - size_t size_sum; - - size_sum = PFN_ALIGN(static_size + reserved_size + - (*dyn_sizep >= 0 ? *dyn_sizep : 0)); - if (*dyn_sizep != 0) - *dyn_sizep = size_sum - static_size - reserved_size; - - return size_sum; -} - /** * pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups @@ -1085,7 +1071,7 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes - * @dyn_size: free size for dynamic allocation in bytes, -1 for auto + * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @@ -1103,8 +1089,8 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ -struct pcpu_alloc_info * __init pcpu_build_alloc_info( - size_t reserved_size, ssize_t dyn_size, +static struct pcpu_alloc_info * __init pcpu_build_alloc_info( + size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { @@ -1123,13 +1109,15 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); + size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size); + dyn_size = size_sum - static_size - reserved_size; + /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest * which can accomodate 4k aligned segments which are equal to * or larger than min_unit_size. */ - size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); alloc_size = roundup(min_unit_size, atom_size); @@ -1532,7 +1520,7 @@ early_param("percpu_alloc", percpu_alloc_setup); /** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * @reserved_size: the size of reserved percpu area in bytes - * @dyn_size: free size for dynamic allocation in bytes, -1 for auto + * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @alloc_fn: function to allocate percpu page @@ -1553,10 +1541,7 @@ early_param("percpu_alloc", percpu_alloc_setup); * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). * - * When @dyn_size is positive, dynamic area might be larger than - * specified to fill page alignment. When @dyn_size is auto, - * @dyn_size is just big enough to fill page alignment after static - * and reserved areas. + * @dyn_size specifies the minimum dynamic area size. * * If the needed size is smaller than the minimum or specified unit * size, the leftover is returned using @free_fn. @@ -1564,7 +1549,7 @@ early_param("percpu_alloc", percpu_alloc_setup); * RETURNS: * 0 on success, -errno on failure. */ -int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, +int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_alloc_fn_t alloc_fn, @@ -1695,7 +1680,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); - ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); + ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); -- cgit v1.2.3 From 099a19d91ca429944743d51bef8fee240e94d8e3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 27 Jun 2010 18:50:00 +0200 Subject: percpu: allow limited allocation before slab is online This patch updates percpu allocator such that it can serve limited amount of allocation before slab comes online. This is primarily to allow slab to depend on working percpu allocator. Two parameters, PERCPU_DYNAMIC_EARLY_SIZE and SLOTS, determine how much memory space and allocation map slots are reserved. If this reserved area is exhausted, WARN_ON_ONCE() will trigger and allocation will fail till slab comes online. The following changes are made to implement early alloc. * pcpu_mem_alloc() now checks slab_is_available() * Chunks are allocated using pcpu_mem_alloc() * Init paths make sure ai->dyn_size is at least as large as PERCPU_DYNAMIC_EARLY_SIZE. * Initial alloc maps are allocated in __initdata and copied to kmalloc'd areas once slab is online. Signed-off-by: Tejun Heo Cc: Christoph Lameter --- include/linux/percpu.h | 13 +++++++++++++ init/main.c | 1 + mm/percpu.c | 52 ++++++++++++++++++++++++++++++++++++++------------ 3 files changed, 54 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3ffd05e550de..b8b9084527b1 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -44,6 +44,16 @@ /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) +/* + * Percpu allocator can serve percpu allocations before slab is + * initialized which allows slab to depend on the percpu allocator. + * The following two parameters decide how much resource to + * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or + * larger than PERCPU_DYNAMIC_EARLY_SIZE. + */ +#define PERCPU_DYNAMIC_EARLY_SLOTS 128 +#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) + /* * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy * back on the first chunk for dynamic percpu allocation if arch is @@ -135,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr); #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void __init setup_per_cpu_areas(void); #endif +extern void __init percpu_init_late(void); #else /* CONFIG_SMP */ @@ -148,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr) static inline void __init setup_per_cpu_areas(void) { } +static inline void __init percpu_init_late(void) { } + static inline void *pcpu_lpage_remapped(void *kaddr) { return NULL; diff --git a/init/main.c b/init/main.c index 3bdb152f412f..3ff8dd0fb512 100644 --- a/init/main.c +++ b/init/main.c @@ -522,6 +522,7 @@ static void __init mm_init(void) page_cgroup_init_flatmem(); mem_init(); kmem_cache_init(); + percpu_init_late(); pgtable_cache_init(); vmalloc_init(); } diff --git a/mm/percpu.c b/mm/percpu.c index c3e7010c6d71..e61dc2cc5873 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -282,6 +282,9 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, */ static void *pcpu_mem_alloc(size_t size) { + if (WARN_ON_ONCE(!slab_is_available())) + return NULL; + if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else { @@ -392,13 +395,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) old_size = chunk->map_alloc * sizeof(chunk->map[0]); memcpy(new, chunk->map, old_size); - /* - * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is - * one of the first chunks and still using static map. - */ - if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) - old = chunk->map; - chunk->map_alloc = new_alloc; chunk->map = new; new = NULL; @@ -604,7 +600,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) { struct pcpu_chunk *chunk; - chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); + chunk = pcpu_mem_alloc(pcpu_chunk_struct_size); if (!chunk) return NULL; @@ -1109,7 +1105,9 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); - size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size); + /* calculate size_sum and ensure dyn_size is enough for early alloc */ + size_sum = PFN_ALIGN(static_size + reserved_size + + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* @@ -1338,7 +1336,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { static char cpus_buf[4096] __initdata; - static int smap[2], dmap[2]; + static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; + static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; size_t dyn_size = ai->dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; struct pcpu_chunk *schunk, *dchunk = NULL; @@ -1361,14 +1360,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, } while (0) /* sanity checks */ - BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || - ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); + PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ @@ -1806,3 +1804,33 @@ void __init setup_per_cpu_areas(void) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ + +/* + * First and reserved chunks are initialized with temporary allocation + * map in initdata so that they can be used before slab is online. + * This function is called after slab is brought up and replaces those + * with properly allocated maps. + */ +void __init percpu_init_late(void) +{ + struct pcpu_chunk *target_chunks[] = + { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; + struct pcpu_chunk *chunk; + unsigned long flags; + int i; + + for (i = 0; (chunk = target_chunks[i]); i++) { + int *map; + const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); + + BUILD_BUG_ON(size > PAGE_SIZE); + + map = pcpu_mem_alloc(size); + BUG_ON(!map); + + spin_lock_irqsave(&pcpu_lock, flags); + memcpy(map, chunk->map, size); + chunk->map = map; + spin_unlock_irqrestore(&pcpu_lock, flags); + } +} -- cgit v1.2.3 From 7804302b14032d357d889e4a23e463eb6a6c5136 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Mon, 28 Jun 2010 01:25:19 -0700 Subject: Input: ads7846 - allow specifying irq trigger type in platform data On some platforms, for example with GPIO interrupts on mpc5121, it is not possible to configure falling edge interrupts. Specifying irq trigger type in platform data structure allows using ads7846 driver on such platforms. Signed-off-by: Anatolij Gustschin Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/ads7846.c | 5 ++++- include/linux/spi/ads7846.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a9fdf55c0238..69210cb56c54 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1174,7 +1174,10 @@ static int __devinit ads7846_probe(struct spi_device *spi) goto err_put_regulator; } - if (request_irq(spi->irq, ads7846_irq, IRQF_TRIGGER_FALLING, + if (!pdata->irq_flags) + pdata->irq_flags = IRQF_TRIGGER_FALLING; + + if (request_irq(spi->irq, ads7846_irq, pdata->irq_flags, spi->dev.driver->name, ts)) { dev_info(&spi->dev, "trying pin change workaround on irq %d\n", spi->irq); diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index b4ae570d3c98..95d36bfb34bc 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -54,5 +54,6 @@ struct ads7846_platform_data { void (*filter_cleanup)(void *filter_data); void (*wait_for_sync)(void); bool wakeup; + unsigned long irq_flags; }; -- cgit v1.2.3 From 57439f878afafefad8836ebf5c49da2a0a746105 Mon Sep 17 00:00:00 2001 From: "npiggin@suse.de" Date: Thu, 24 Jun 2010 13:02:14 +1000 Subject: fs: fix superblock iteration race list_for_each_entry_safe is not suitable to protect against concurrent modification of the list. 6754af6 introduced a race in sb walking. list_for_each_entry can use the trick of pinning the current entry in the list before we drop and retake the lock because it subsequently follows cur->next. However list_for_each_entry_safe saves n=cur->next for following before entering the loop body, so when the lock is dropped, n may be deleted. Signed-off-by: Nick Piggin Cc: Christoph Hellwig Cc: John Stultz Cc: Frank Mayhar Cc: Al Viro Signed-off-by: Linus Torvalds --- fs/dcache.c | 2 ++ fs/super.c | 6 ++++++ include/linux/list.h | 15 +++++++++++++++ 3 files changed, 23 insertions(+) (limited to 'include') diff --git a/fs/dcache.c b/fs/dcache.c index d96047b4a633..c8c78ba07827 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -590,6 +590,8 @@ static void prune_dcache(int count) up_read(&sb->s_umount); } spin_lock(&sb_lock); + /* lock was dropped, must reset next */ + list_safe_reset_next(sb, n, s_list); count -= pruned; __put_super(sb); /* more work left to do? */ diff --git a/fs/super.c b/fs/super.c index 5c35bc7a499e..938119ab8dcb 100644 --- a/fs/super.c +++ b/fs/super.c @@ -374,6 +374,8 @@ void sync_supers(void) up_read(&sb->s_umount); spin_lock(&sb_lock); + /* lock was dropped, must reset next */ + list_safe_reset_next(sb, n, s_list); __put_super(sb); } } @@ -405,6 +407,8 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) up_read(&sb->s_umount); spin_lock(&sb_lock); + /* lock was dropped, must reset next */ + list_safe_reset_next(sb, n, s_list); __put_super(sb); } spin_unlock(&sb_lock); @@ -585,6 +589,8 @@ static void do_emergency_remount(struct work_struct *work) } up_write(&sb->s_umount); spin_lock(&sb_lock); + /* lock was dropped, must reset next */ + list_safe_reset_next(sb, n, s_list); __put_super(sb); } spin_unlock(&sb_lock); diff --git a/include/linux/list.h b/include/linux/list.h index 8392884a2977..5d57a3a1fa1b 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -544,6 +544,21 @@ static inline void list_splice_tail_init(struct list_head *list, &pos->member != (head); \ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) +/** + * list_safe_reset_next - reset a stale list_for_each_entry_safe loop + * @pos: the loop cursor used in the list_for_each_entry_safe loop + * @n: temporary storage used in list_for_each_entry_safe + * @member: the name of the list_struct within the struct. + * + * list_safe_reset_next is not safe to use in general if the list may be + * modified concurrently (eg. the lock is dropped in the loop body). An + * exception to this is if the cursor element (pos) is pinned in the list, + * and list_safe_reset_next is called after re-taking the lock and before + * completing the current iteration of the loop body. + */ +#define list_safe_reset_next(pos, n, member) \ + n = list_entry(pos->member.next, typeof(*pos), member) + /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is -- cgit v1.2.3 From 9c695203a7ddbe49dba5f22f4c941d24f47475df Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Tue, 29 Jun 2010 15:05:25 -0700 Subject: compiler-gcc.h: gcc-4.5 needs noclone and noinline on __naked functions A __naked function is defined in C but with a body completely implemented by asm(), including any prologue and epilogue. These asm() bodies expect standard calling conventions for parameter passing. Older GCCs implement that correctly, but 4.[56] currently do not, see GCC PR44290. In the Linux kernel this breaks ARM, causing most arch/arm/mm/copypage-*.c modules to get miscompiled, resulting in kernel crashes during bootup. Part of the kernel fix is to augment the __naked function attribute to also imply noinline and noclone. This patch implements that, and has been verified to fix boot failures with gcc-4.5 compiled 2.6.34 and 2.6.35-rc1 kernels. The patch is a no-op with older GCCs. Signed-off-by: Mikael Pettersson Signed-off-by: Khem Raj Cc: Russell King Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler-gcc.h | 10 +++++++++- include/linux/compiler-gcc4.h | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 73dcf804bc94..0da5b187f124 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -58,8 +58,12 @@ * naked functions because then mcount is called without stack and frame pointer * being set up and there is no chance to restore the lr register to the value * before mcount was called. + * + * The asm() bodies of naked functions often depend on standard calling conventions, + * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce + * this, so we must do so ourselves. See GCC PR44290. */ -#define __naked __attribute__((naked)) notrace +#define __naked __attribute__((naked)) noinline __noclone notrace #define __noreturn __attribute__((noreturn)) @@ -85,3 +89,7 @@ #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) #define gcc_header(x) _gcc_header(x) #include gcc_header(__GNUC__) + +#if !defined(__noclone) +#define __noclone /* not needed */ +#endif diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 94dea3ffbfa1..fcfa5b9a4317 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -48,6 +48,10 @@ * unreleased. Really, we need to have autoconf for the kernel. */ #define unreachable() __builtin_unreachable() + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__)) + #endif #endif -- cgit v1.2.3 From c59690fa484c04ab96fe932241b569a09755a4d2 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 30 Jun 2010 00:53:53 -0700 Subject: Input: i8042 - mark stubs in i8042.h "static inline" Otherwise we may run into following: drivers/platform/built-in.o: In function `i8042_lock_chip': /home/test/ws2/projects/linux-2.6/include/linux/i8042.h:50: multiple definition of `i8042_lock_chip' drivers/input/serio/built-in.o:/home/test/ws2/projects/linux-2.6/include/linux/i8042.h:50: first defined here ... make[1]: *** [drivers/built-in.o] Error 1 make: *** [drivers] Error 2 Signed-off-by: Feng Tang Signed-off-by: Dmitry Torokhov --- include/linux/i8042.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 9bf6870ee5f4..a986ff588944 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -46,31 +46,31 @@ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, #else -void i8042_lock_chip(void) +static inline void i8042_lock_chip(void) { } -void i8042_unlock_chip(void) +static inline void i8042_unlock_chip(void) { } -int i8042_command(unsigned char *param, int command) +static inline int i8042_command(unsigned char *param, int command) { return -ENODEV; } -bool i8042_check_port_owner(const struct serio *serio) +static inline bool i8042_check_port_owner(const struct serio *serio) { return false; } -int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, +static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; } -int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, +static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { return -ENODEV; -- cgit v1.2.3 From b26c949755c06ec79e55a75817210083bd78fc9a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 23 Jun 2010 11:35:41 +1000 Subject: fb: fix colliding defines for fb flags. When I added the flags I must have been using a 25 line terminal and missed the following flags. The collided with flag has one user in staging despite being in-tree for 5 years. I'm happy to push this via my drm tree unless someone really wants to do it. Signed-off-by: Dave Airlie Cc: stable@kernel.org --- include/linux/fb.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/fb.h b/include/linux/fb.h index 907ace3a64c8..8e5a9dfb76bf 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -786,8 +786,6 @@ struct fb_tile_ops { #define FBINFO_MISC_USEREVENT 0x10000 /* event request from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ -#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware - inited framebuffer */ /* A driver may set this flag to indicate that it does want a set_par to be * called every time when fbcon_switch is executed. The advantage is that with @@ -801,6 +799,8 @@ struct fb_tile_ops { */ #define FBINFO_MISC_ALWAYS_SETPAR 0x40000 +/* where the fb is a firmware driver, and can be replaced with a proper one */ +#define FBINFO_MISC_FIRMWARE 0x80000 /* * Host and GPU endianness differ. */ -- cgit v1.2.3 From 8c215bd3890c347dfb6a2db4779755f8b9c298a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 1 Jul 2010 09:07:17 +0200 Subject: sched: Cure nr_iowait_cpu() users Commit 0224cf4c5e (sched: Intoduce get_cpu_iowait_time_us()) broke things by not making sure preemption was indeed disabled by the callers of nr_iowait_cpu() which took the iowait value of the current cpu. This resulted in a heap of preempt warnings. Cure this by making nr_iowait_cpu() take a cpu number and fix up the callers to pass in the right number. Signed-off-by: Peter Zijlstra Cc: Arjan van de Ven Cc: Sergey Senozhatsky Cc: Rafael J. Wysocki Cc: Maxim Levitsky Cc: Len Brown Cc: Pavel Machek Cc: Jiri Slaby Cc: linux-pm@lists.linux-foundation.org LKML-Reference: <1277968037.1868.120.camel@laptop> Signed-off-by: Ingo Molnar --- drivers/cpuidle/governors/menu.c | 4 ++-- include/linux/sched.h | 2 +- kernel/sched.c | 4 ++-- kernel/time/tick-sched.c | 16 ++++++++-------- 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52ff8aa63f84..1b128702d300 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration) * This allows us to calculate * E(duration)|iowait */ - if (nr_iowait_cpu()) + if (nr_iowait_cpu(smp_processor_id())) bucket = BUCKETS/2; if (duration < 10) @@ -175,7 +175,7 @@ static inline int performance_multiplier(void) mult += 2 * get_loadavg(); /* for IO wait tasks (per cpu!) we add 5x each */ - mult += 10 * nr_iowait_cpu(); + mult += 10 * nr_iowait_cpu(smp_processor_id()); return mult; } diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953f..747fcaedddb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -139,7 +139,7 @@ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(void); +extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); diff --git a/kernel/sched.c b/kernel/sched.c index a24d6d5d83f6..f87abe3b0176 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2864,9 +2864,9 @@ unsigned long nr_iowait(void) return sum; } -unsigned long nr_iowait_cpu(void) +unsigned long nr_iowait_cpu(int cpu) { - struct rq *this = this_rq(); + struct rq *this = cpu_rq(cpu); return atomic_read(&this->nr_iowait); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1d7b9bc1c034..1a6f828e57a0 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) * Updates the per cpu time idle statistics counters */ static void -update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) +update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) { ktime_t delta; if (ts->idle_active) { delta = ktime_sub(now, ts->idle_entrytime); ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); - if (nr_iowait_cpu() > 0) + if (nr_iowait_cpu(cpu) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); ts->idle_entrytime = now; } @@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - update_ts_time_stats(ts, now, NULL); + update_ts_time_stats(cpu, ts, now, NULL); ts->idle_active = 0; sched_clock_idle_wakeup_event(0); } -static ktime_t tick_nohz_start_idle(struct tick_sched *ts) +static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) { ktime_t now; now = ktime_get(); - update_ts_time_stats(ts, now, NULL); + update_ts_time_stats(cpu, ts, now, NULL); ts->idle_entrytime = now; ts->idle_active = 1; @@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) if (!tick_nohz_enabled) return -1; - update_ts_time_stats(ts, ktime_get(), last_update_time); + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->idle_sleeptime); } @@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) if (!tick_nohz_enabled) return -1; - update_ts_time_stats(ts, ktime_get(), last_update_time); + update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->iowait_sleeptime); } @@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) */ ts->inidle = 1; - now = tick_nohz_start_idle(ts); + now = tick_nohz_start_idle(cpu, ts); /* * If this cpu is offline and it is the one which updates -- cgit v1.2.3 From c6353b4520788e34098bbf61c73fb9618ca7fdd6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Jun 2010 11:42:22 +0200 Subject: ahci,ata_generic: let ata_generic handle new MBP w/ MCP89 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For yet unknown reason, MCP89 on MBP 7,1 doesn't work w/ ahci under linux but the controller doesn't require explicit mode setting and works fine with ata_generic. Make ahci ignore the controller on MBP 7,1 and let ata_generic take it for now. Reported in bko#15923. https://bugzilla.kernel.org/show_bug.cgi?id=15923 NVIDIA is investigating why ahci mode doesn't work. Signed-off-by: Tejun Heo Cc: Peer Chen Cc: stable@kernel.org Reported-by: Anders Østhus Reported-by: Andreas Graf Reported-by: Benoit Gschwind Reported-by: Damien Cassou Reported-by: tixetsal@juno.com Signed-off-by: Jeff Garzik --- drivers/ata/ahci.c | 10 ++++++++++ drivers/ata/ata_generic.c | 6 ++++++ include/linux/pci_ids.h | 1 + 3 files changed, 17 insertions(+) (limited to 'include') diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8ca16f54e1ed..f2522534ae63 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1053,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) return -ENODEV; + /* + * For some reason, MCP89 on MacBook 7,1 doesn't work with + * ahci, use ata_generic instead. + */ + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && + pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + pdev->subsystem_device == 0xcb89) + return -ENODEV; + /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. * At the moment, we can only use the AHCI mode. Let the users know * that for SAS drives they're out of luck. diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 573158a9668d..d4ccf74c4c94 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -168,6 +168,12 @@ static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, + /* + * For some reason, MCP89 on MacBook 7,1 doesn't work with + * ahci, use ata_generic instead. + */ + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, + PCI_VENDOR_ID_APPLE, 0xcb89, }, #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4eb467910a45..3bedcc149c84 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1261,6 +1261,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_TT128 0x9128 -- cgit v1.2.3 From 0f622bf465e78c390e13c5f4a14d0b3f8fb7c7e5 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 1 Jul 2010 09:01:50 -0700 Subject: Input: ads7846 - do not allow altering platform data Tested-by: Anatolij Gustschin Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/ads7846.c | 35 +++++++++++++++++++---------------- include/linux/spi/ads7846.h | 2 +- 2 files changed, 20 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 69210cb56c54..a3771607ead5 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -878,14 +878,15 @@ static int __devinit setup_pendown(struct spi_device *spi, struct ads7846 *ts) static int __devinit ads7846_probe(struct spi_device *spi) { - struct ads7846 *ts; - struct ads7846_packet *packet; - struct input_dev *input_dev; - struct ads7846_platform_data *pdata = spi->dev.platform_data; - struct spi_message *m; - struct spi_transfer *x; - int vref; - int err; + struct ads7846 *ts; + struct ads7846_packet *packet; + struct input_dev *input_dev; + const struct ads7846_platform_data *pdata = spi->dev.platform_data; + struct spi_message *m; + struct spi_transfer *x; + unsigned long irq_flags; + int vref; + int err; if (!spi->irq) { dev_dbg(&spi->dev, "no IRQ?\n"); @@ -1174,20 +1175,22 @@ static int __devinit ads7846_probe(struct spi_device *spi) goto err_put_regulator; } - if (!pdata->irq_flags) - pdata->irq_flags = IRQF_TRIGGER_FALLING; + irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING; - if (request_irq(spi->irq, ads7846_irq, pdata->irq_flags, - spi->dev.driver->name, ts)) { + err = request_irq(spi->irq, ads7846_irq, irq_flags, + spi->dev.driver->name, ts); + + if (err && !pdata->irq_flags) { dev_info(&spi->dev, "trying pin change workaround on irq %d\n", spi->irq); err = request_irq(spi->irq, ads7846_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, spi->dev.driver->name, ts); - if (err) { - dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); - goto err_disable_regulator; - } + } + + if (err) { + dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); + goto err_disable_regulator; } err = ads784x_hwmon_register(spi, ts); diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index 95d36bfb34bc..92bd0839d5b4 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -48,7 +48,7 @@ struct ads7846_platform_data { * state if get_pendown_state == NULL */ int (*get_pendown_state)(void); - int (*filter_init) (struct ads7846_platform_data *pdata, + int (*filter_init) (const struct ads7846_platform_data *pdata, void **filter_data); int (*filter) (void *filter_data, int data_idx, int *val); void (*filter_cleanup)(void *filter_data); -- cgit v1.2.3 From 312e8e8a9e2471b0ada7366497fffb3ff1a40e2c Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Sun, 4 Jul 2010 01:21:25 -0700 Subject: Input: mcs - Add MCS touchkey driver This adds support for MELPAS MCS5000/MSC5080 touch key controllers. Signed-off-by: Joonyoung Shim Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/Kconfig | 12 ++ drivers/input/keyboard/Makefile | 1 + drivers/input/keyboard/mcs_touchkey.c | 239 +++++++++++++++++++++++++++++++++ drivers/input/touchscreen/mcs5000_ts.c | 6 +- include/linux/i2c/mcs.h | 34 +++++ include/linux/i2c/mcs5000_ts.h | 24 ---- 6 files changed, 289 insertions(+), 27 deletions(-) create mode 100644 drivers/input/keyboard/mcs_touchkey.c create mode 100644 include/linux/i2c/mcs.h delete mode 100644 include/linux/i2c/mcs5000_ts.h (limited to 'include') diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index d8fa5d724c57..c7480934ceeb 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -297,6 +297,18 @@ config KEYBOARD_MAX7359 To compile this driver as a module, choose M here: the module will be called max7359_keypad. +config KEYBOARD_MCS + tristate "MELFAS MCS Touchkey" + depends on I2C + help + Say Y here if you have the MELFAS MCS5000/5080 touchkey controller + chip in your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called mcs_touchkey. + config KEYBOARD_IMX tristate "IMX keypad support" depends on ARCH_MXC diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 4596d0c6f922..0a16674ed3d2 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o +obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c new file mode 100644 index 000000000000..63b849d7e90b --- /dev/null +++ b/drivers/input/keyboard/mcs_touchkey.c @@ -0,0 +1,239 @@ +/* + * mcs_touchkey.c - Touchkey driver for MELFAS MCS5000/5080 controller + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: HeungJun Kim + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* MCS5000 Touchkey */ +#define MCS5000_TOUCHKEY_STATUS 0x04 +#define MCS5000_TOUCHKEY_STATUS_PRESS 7 +#define MCS5000_TOUCHKEY_FW 0x0a +#define MCS5000_TOUCHKEY_BASE_VAL 0x61 + +/* MCS5080 Touchkey */ +#define MCS5080_TOUCHKEY_STATUS 0x00 +#define MCS5080_TOUCHKEY_STATUS_PRESS 3 +#define MCS5080_TOUCHKEY_FW 0x01 +#define MCS5080_TOUCHKEY_BASE_VAL 0x1 + +enum mcs_touchkey_type { + MCS5000_TOUCHKEY, + MCS5080_TOUCHKEY, +}; + +struct mcs_touchkey_chip { + unsigned int status_reg; + unsigned int pressbit; + unsigned int press_invert; + unsigned int baseval; +}; + +struct mcs_touchkey_data { + struct i2c_client *client; + struct input_dev *input_dev; + struct mcs_touchkey_chip chip; + unsigned int key_code; + unsigned int key_val; + unsigned short keycodes[]; +}; + +static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id) +{ + struct mcs_touchkey_data *data = dev_id; + struct mcs_touchkey_chip *chip = &data->chip; + struct i2c_client *client = data->client; + struct input_dev *input = data->input_dev; + unsigned int key_val; + unsigned int pressed; + int val; + + val = i2c_smbus_read_byte_data(client, chip->status_reg); + if (val < 0) { + dev_err(&client->dev, "i2c read error [%d]\n", val); + goto out; + } + + pressed = (val & (1 << chip->pressbit)) >> chip->pressbit; + if (chip->press_invert) + pressed ^= chip->press_invert; + + /* key_val is 0 when released, so we should use key_val of press. */ + if (pressed) { + key_val = val & (0xff >> (8 - chip->pressbit)); + if (!key_val) + goto out; + key_val -= chip->baseval; + data->key_code = data->keycodes[key_val]; + data->key_val = key_val; + } + + input_event(input, EV_MSC, MSC_SCAN, data->key_val); + input_report_key(input, data->key_code, pressed); + input_sync(input); + + dev_dbg(&client->dev, "key %d %d %s\n", data->key_val, data->key_code, + pressed ? "pressed" : "released"); + + out: + return IRQ_HANDLED; +} + +static int __devinit mcs_touchkey_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct mcs_platform_data *pdata; + struct mcs_touchkey_data *data; + struct input_dev *input_dev; + unsigned int fw_reg; + int fw_ver; + int error; + int i; + + pdata = client->dev.platform_data; + if (!pdata) { + dev_err(&client->dev, "no platform data defined\n"); + return -EINVAL; + } + + data = kzalloc(sizeof(struct mcs_touchkey_data) + + sizeof(data->keycodes[0]) * (pdata->key_maxval + 1), + GFP_KERNEL); + input_dev = input_allocate_device(); + if (!data || !input_dev) { + dev_err(&client->dev, "Failed to allocate memory\n"); + error = -ENOMEM; + goto err_free_mem; + } + + data->client = client; + data->input_dev = input_dev; + + if (id->driver_data == MCS5000_TOUCHKEY) { + data->chip.status_reg = MCS5000_TOUCHKEY_STATUS; + data->chip.pressbit = MCS5000_TOUCHKEY_STATUS_PRESS; + data->chip.baseval = MCS5000_TOUCHKEY_BASE_VAL; + fw_reg = MCS5000_TOUCHKEY_FW; + } else { + data->chip.status_reg = MCS5080_TOUCHKEY_STATUS; + data->chip.pressbit = MCS5080_TOUCHKEY_STATUS_PRESS; + data->chip.press_invert = 1; + data->chip.baseval = MCS5080_TOUCHKEY_BASE_VAL; + fw_reg = MCS5080_TOUCHKEY_FW; + } + + fw_ver = i2c_smbus_read_byte_data(client, fw_reg); + if (fw_ver < 0) { + error = fw_ver; + dev_err(&client->dev, "i2c read error[%d]\n", error); + goto err_free_mem; + } + dev_info(&client->dev, "Firmware version: %d\n", fw_ver); + + input_dev->name = "MELPAS MCS Touchkey"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + input_dev->evbit[0] = BIT_MASK(EV_KEY); + if (!pdata->no_autorepeat) + input_dev->evbit[0] |= BIT_MASK(EV_REP); + input_dev->keycode = data->keycodes; + input_dev->keycodesize = sizeof(data->keycodes[0]); + input_dev->keycodemax = pdata->key_maxval + 1; + + for (i = 0; i < pdata->keymap_size; i++) { + unsigned int val = MCS_KEY_VAL(pdata->keymap[i]); + unsigned int code = MCS_KEY_CODE(pdata->keymap[i]); + + data->keycodes[val] = code; + __set_bit(code, input_dev->keybit); + } + + input_set_capability(input_dev, EV_MSC, MSC_SCAN); + input_set_drvdata(input_dev, data); + + if (pdata->cfg_pin) + pdata->cfg_pin(); + + error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt, + IRQF_TRIGGER_FALLING, client->dev.driver->name, data); + if (error) { + dev_err(&client->dev, "Failed to register interrupt\n"); + goto err_free_mem; + } + + error = input_register_device(input_dev); + if (error) + goto err_free_irq; + + i2c_set_clientdata(client, data); + return 0; + +err_free_irq: + free_irq(client->irq, data); +err_free_mem: + input_free_device(input_dev); + kfree(data); + return error; +} + +static int __devexit mcs_touchkey_remove(struct i2c_client *client) +{ + struct mcs_touchkey_data *data = i2c_get_clientdata(client); + + free_irq(client->irq, data); + input_unregister_device(data->input_dev); + kfree(data); + + return 0; +} + +static const struct i2c_device_id mcs_touchkey_id[] = { + { "mcs5000_touchkey", MCS5000_TOUCHKEY }, + { "mcs5080_touchkey", MCS5080_TOUCHKEY }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mcs_touchkey_id); + +static struct i2c_driver mcs_touchkey_driver = { + .driver = { + .name = "mcs_touchkey", + .owner = THIS_MODULE, + }, + .probe = mcs_touchkey_probe, + .remove = __devexit_p(mcs_touchkey_remove), + .id_table = mcs_touchkey_id, +}; + +static int __init mcs_touchkey_init(void) +{ + return i2c_add_driver(&mcs_touchkey_driver); +} + +static void __exit mcs_touchkey_exit(void) +{ + i2c_del_driver(&mcs_touchkey_driver); +} + +module_init(mcs_touchkey_init); +module_exit(mcs_touchkey_exit); + +/* Module information */ +MODULE_AUTHOR("Joonyoung Shim "); +MODULE_AUTHOR("HeungJun Kim "); +MODULE_DESCRIPTION("Touchkey driver for MELFAS MCS5000/5080 controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c index 1fb0c2f06a44..6ee9940aaf5b 100644 --- a/drivers/input/touchscreen/mcs5000_ts.c +++ b/drivers/input/touchscreen/mcs5000_ts.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -105,7 +105,7 @@ enum mcs5000_ts_read_offset { struct mcs5000_ts_data { struct i2c_client *client; struct input_dev *input_dev; - const struct mcs5000_ts_platform_data *platform_data; + const struct mcs_platform_data *platform_data; }; static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id) @@ -164,7 +164,7 @@ static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id) static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data) { - const struct mcs5000_ts_platform_data *platform_data = + const struct mcs_platform_data *platform_data = data->platform_data; struct i2c_client *client = data->client; diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h new file mode 100644 index 000000000000..725ae7c313ff --- /dev/null +++ b/include/linux/i2c/mcs.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * Author: HeungJun Kim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MCS_H +#define __LINUX_MCS_H + +#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) +#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) +#define MCS_KEY_CODE(v) ((v) & 0xffff) + +struct mcs_platform_data { + void (*cfg_pin)(void); + + /* touchscreen */ + unsigned int x_size; + unsigned int y_size; + + /* touchkey */ + const u32 *keymap; + unsigned int keymap_size; + unsigned int key_maxval; + bool no_autorepeat; +}; + +#endif /* __LINUX_MCS_H */ diff --git a/include/linux/i2c/mcs5000_ts.h b/include/linux/i2c/mcs5000_ts.h deleted file mode 100644 index 5a117b5ca15e..000000000000 --- a/include/linux/i2c/mcs5000_ts.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * mcs5000_ts.h - * - * Copyright (C) 2009 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#ifndef __LINUX_MCS5000_TS_H -#define __LINUX_MCS5000_TS_H - -/* platform data for the MELFAS MCS-5000 touchscreen driver */ -struct mcs5000_ts_platform_data { - void (*cfg_pin)(void); - int x_size; - int y_size; -}; - -#endif /* __LINUX_MCS5000_TS_H */ -- cgit v1.2.3 From ff49d74ad383f54041378144ca1a229ee9aeaa59 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Sat, 3 Jul 2010 13:07:35 +1000 Subject: module: initialize module dynamic debug later We should initialize the module dynamic debug datastructures only after determining that the module is not loaded yet. This fixes a bug that introduced in 2.6.35-rc2, where when a trying to load a module twice, we also load it's dynamic printing data twice which causes all sorts of nasty issues. Also handle the dynamic debug cleanup later on failure. Signed-off-by: Yehuda Sadeh Signed-off-by: Rusty Russell (removed a #ifdef) Signed-off-by: Linus Torvalds --- include/linux/dynamic_debug.h | 4 ++-- kernel/module.c | 23 +++++++++++++++-------- lib/dynamic_debug.c | 2 +- 3 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index b3cd4de9432b..52c0da4bdd18 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -40,7 +40,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); #if defined(CONFIG_DYNAMIC_DEBUG) -extern int ddebug_remove_module(char *mod_name); +extern int ddebug_remove_module(const char *mod_name); #define __dynamic_dbg_enabled(dd) ({ \ int __ret = 0; \ @@ -73,7 +73,7 @@ extern int ddebug_remove_module(char *mod_name); #else -static inline int ddebug_remove_module(char *mod) +static inline int ddebug_remove_module(const char *mod) { return 0; } diff --git a/kernel/module.c b/kernel/module.c index 8c6b42840dd1..5d2d28197c82 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2062,6 +2062,12 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) #endif } +static void dynamic_debug_remove(struct _ddebug *debug) +{ + if (debug) + ddebug_remove_module(debug->modname); +} + static void *module_alloc_update_bounds(unsigned long size) { void *ret = module_alloc(size); @@ -2124,6 +2130,8 @@ static noinline struct module *load_module(void __user *umod, void *ptr = NULL; /* Stops spurious gcc warning */ unsigned long symoffs, stroffs, *strmap; void __percpu *percpu; + struct _ddebug *debug = NULL; + unsigned int num_debug = 0; mm_segment_t old_fs; @@ -2476,15 +2484,9 @@ static noinline struct module *load_module(void __user *umod, kfree(strmap); strmap = NULL; - if (!mod->taints) { - struct _ddebug *debug; - unsigned int num_debug; - + if (!mod->taints) debug = section_objs(hdr, sechdrs, secstrings, "__verbose", sizeof(*debug), &num_debug); - if (debug) - dynamic_debug_setup(debug, num_debug); - } err = module_finalize(hdr, sechdrs, mod); if (err < 0) @@ -2526,10 +2528,13 @@ static noinline struct module *load_module(void __user *umod, goto unlock; } + if (debug) + dynamic_debug_setup(debug, num_debug); + /* Find duplicate symbols */ err = verify_export_symbols(mod); if (err < 0) - goto unlock; + goto ddebug; list_add_rcu(&mod->list, &modules); mutex_unlock(&module_mutex); @@ -2557,6 +2562,8 @@ static noinline struct module *load_module(void __user *umod, mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); + ddebug: + dynamic_debug_remove(debug); unlock: mutex_unlock(&module_mutex); synchronize_sched(); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 3df8eb17a607..02afc2533728 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -692,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt) * Called in response to a module being unloaded. Removes * any ddebug_table's which point at the module. */ -int ddebug_remove_module(char *mod_name) +int ddebug_remove_module(const char *mod_name) { struct ddebug_table *dt, *nextdt; int ret = -ENOENT; -- cgit v1.2.3 From b945d6b2554d550fe95caadc61e521c0ad71fb9c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 29 May 2010 15:31:43 +0200 Subject: rbtree: Undo augmented trees performance damage and regression Reimplement augmented RB-trees without sprinkling extra branches all over the RB-tree code (which lives in the scheduler hot path). This approach is 'borrowed' from Fabio's BFQ implementation and relies on traversing the rebalance path after the RB-tree-op to correct the heap property for insertion/removal and make up for the damage done by the tree rotations. For insertion the rebalance path is trivially that from the new node upwards to the root, for removal it is that from the deepest node in the path from the to be removed node that will still be around after the removal. [ This patch also fixes a video driver regression reported by Ali Gholami Rudi - the memtype->subtree_max_end was updated incorrectly. ] Acked-by: Suresh Siddha Acked-by: Venkatesh Pallipadi Signed-off-by: Peter Zijlstra Tested-by: Ali Gholami Rudi Cc: Fabio Checconi Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Linus Torvalds LKML-Reference: <1275414172.27810.27961.camel@twins> Signed-off-by: Ingo Molnar --- arch/x86/mm/pat_rbtree.c | 34 +++----------- include/linux/rbtree.h | 13 ++++-- lib/rbtree.c | 116 +++++++++++++++++++++++++++++------------------ 3 files changed, 87 insertions(+), 76 deletions(-) (limited to 'include') diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index f20eeec85a86..8acaddd0fb21 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -34,8 +34,7 @@ * memtype_lock protects the rbtree. */ -static void memtype_rb_augment_cb(struct rb_node *node); -static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb); +static struct rb_root memtype_rbroot = RB_ROOT; static int is_node_overlap(struct memtype *node, u64 start, u64 end) { @@ -56,7 +55,7 @@ static u64 get_subtree_max_end(struct rb_node *node) } /* Update 'subtree_max_end' for a node, based on node and its children */ -static void update_node_max_end(struct rb_node *node) +static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) { struct memtype *data; u64 max_end, child_max_end; @@ -78,25 +77,6 @@ static void update_node_max_end(struct rb_node *node) data->subtree_max_end = max_end; } -/* Update 'subtree_max_end' for a node and all its ancestors */ -static void update_path_max_end(struct rb_node *node) -{ - u64 old_max_end, new_max_end; - - while (node) { - struct memtype *data = container_of(node, struct memtype, rb); - - old_max_end = data->subtree_max_end; - update_node_max_end(node); - new_max_end = data->subtree_max_end; - - if (new_max_end == old_max_end) - break; - - node = rb_parent(node); - } -} - /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, u64 start, u64 end) @@ -190,12 +170,6 @@ failure: return -EBUSY; } -static void memtype_rb_augment_cb(struct rb_node *node) -{ - if (node) - update_path_max_end(node); -} - static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) { struct rb_node **node = &(root->rb_node); @@ -213,6 +187,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) rb_link_node(&newdata->rb, parent, node); rb_insert_color(&newdata->rb, root); + rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); } int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) @@ -234,13 +209,16 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) struct memtype *rbt_memtype_erase(u64 start, u64 end) { + struct rb_node *deepest; struct memtype *data; data = memtype_rb_exact_match(&memtype_rbroot, start, end); if (!data) goto out; + deepest = rb_augment_erase_begin(&data->rb); rb_erase(&data->rb, &memtype_rbroot); + rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); out: return data; } diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index fe1872e5b37e..7066acb2c530 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -110,7 +110,6 @@ struct rb_node struct rb_root { struct rb_node *rb_node; - void (*augment_cb)(struct rb_node *node); }; @@ -130,9 +129,7 @@ static inline void rb_set_color(struct rb_node *rb, int color) rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } -#define RB_ROOT (struct rb_root) { NULL, NULL, } -#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x} - +#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) @@ -142,6 +139,14 @@ static inline void rb_set_color(struct rb_node *rb, int color) extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); +typedef void (*rb_augment_f)(struct rb_node *node, void *data); + +extern void rb_augment_insert(struct rb_node *node, + rb_augment_f func, void *data); +extern struct rb_node *rb_augment_erase_begin(struct rb_node *node); +extern void rb_augment_erase_end(struct rb_node *node, + rb_augment_f func, void *data); + /* Find logical next and previous nodes in a tree */ extern struct rb_node *rb_next(const struct rb_node *); extern struct rb_node *rb_prev(const struct rb_node *); diff --git a/lib/rbtree.c b/lib/rbtree.c index 15e10b1afdd2..4693f79195d3 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -44,11 +44,6 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) else root->rb_node = right; rb_set_parent(node, right); - - if (root->augment_cb) { - root->augment_cb(node); - root->augment_cb(right); - } } static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) @@ -72,20 +67,12 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) else root->rb_node = left; rb_set_parent(node, left); - - if (root->augment_cb) { - root->augment_cb(node); - root->augment_cb(left); - } } void rb_insert_color(struct rb_node *node, struct rb_root *root) { struct rb_node *parent, *gparent; - if (root->augment_cb) - root->augment_cb(node); - while ((parent = rb_parent(node)) && rb_is_red(parent)) { gparent = rb_parent(parent); @@ -240,15 +227,12 @@ void rb_erase(struct rb_node *node, struct rb_root *root) else { struct rb_node *old = node, *left; - int old_parent_cb = 0; - int successor_parent_cb = 0; node = node->rb_right; while ((left = node->rb_left) != NULL) node = left; if (rb_parent(old)) { - old_parent_cb = 1; if (rb_parent(old)->rb_left == old) rb_parent(old)->rb_left = node; else @@ -263,10 +247,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root) if (parent == old) { parent = node; } else { - successor_parent_cb = 1; if (child) rb_set_parent(child, parent); - parent->rb_left = child; node->rb_right = old->rb_right; @@ -277,24 +259,6 @@ void rb_erase(struct rb_node *node, struct rb_root *root) node->rb_left = old->rb_left; rb_set_parent(old->rb_left, node); - if (root->augment_cb) { - /* - * Here, three different nodes can have new children. - * The parent of the successor node that was selected - * to replace the node to be erased. - * The node that is getting erased and is now replaced - * by its successor. - * The parent of the node getting erased-replaced. - */ - if (successor_parent_cb) - root->augment_cb(parent); - - root->augment_cb(node); - - if (old_parent_cb) - root->augment_cb(rb_parent(old)); - } - goto color; } @@ -303,19 +267,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root) if (child) rb_set_parent(child, parent); - - if (parent) { + if (parent) + { if (parent->rb_left == node) parent->rb_left = child; else parent->rb_right = child; - - if (root->augment_cb) - root->augment_cb(parent); - - } else { - root->rb_node = child; } + else + root->rb_node = child; color: if (color == RB_BLACK) @@ -323,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root) } EXPORT_SYMBOL(rb_erase); +static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) +{ + struct rb_node *parent; + +up: + func(node, data); + parent = rb_parent(node); + if (!parent) + return; + + if (node == parent->rb_left && parent->rb_right) + func(parent->rb_right, data); + else if (parent->rb_left) + func(parent->rb_left, data); + + node = parent; + goto up; +} + +/* + * after inserting @node into the tree, update the tree to account for + * both the new entry and any damage done by rebalance + */ +void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) +{ + if (node->rb_left) + node = node->rb_left; + else if (node->rb_right) + node = node->rb_right; + + rb_augment_path(node, func, data); +} + +/* + * before removing the node, find the deepest node on the rebalance path + * that will still be there after @node gets removed + */ +struct rb_node *rb_augment_erase_begin(struct rb_node *node) +{ + struct rb_node *deepest; + + if (!node->rb_right && !node->rb_left) + deepest = rb_parent(node); + else if (!node->rb_right) + deepest = node->rb_left; + else if (!node->rb_left) + deepest = node->rb_right; + else { + deepest = rb_next(node); + if (deepest->rb_right) + deepest = deepest->rb_right; + else if (rb_parent(deepest) != node) + deepest = rb_parent(deepest); + } + + return deepest; +} + +/* + * after removal, update the tree to account for the removed entry + * and any rebalance damage. + */ +void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) +{ + if (node) + rb_augment_path(node, func, data); +} + /* * This function returns the first node (in sort order) of the tree. */ -- cgit v1.2.3 From 9c3a8ee8a1d72c5c0d7fbdf426d80e270ddfa54c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 10 Jun 2010 12:07:27 +0200 Subject: writeback: remove writeback_inodes_wbc This was just an odd wrapper around writeback_inodes_wb. Removing this also allows to get rid of the bdi member of struct writeback_control which was rather out of place there. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/afs/write.c | 1 - fs/btrfs/extent_io.c | 2 -- fs/fs-writeback.c | 12 ++---------- include/linux/writeback.h | 5 ++--- mm/backing-dev.c | 3 +-- mm/page-writeback.c | 3 +-- 6 files changed, 6 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/fs/afs/write.c b/fs/afs/write.c index 3dab9e9948d0..722743b152d8 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -680,7 +680,6 @@ int afs_writeback_all(struct afs_vnode *vnode) { struct address_space *mapping = vnode->vfs_inode.i_mapping; struct writeback_control wbc = { - .bdi = mapping->backing_dev_info, .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_cyclic = 1, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a4080c21ec55..d74e6af9b53a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2594,7 +2594,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, .sync_io = wbc->sync_mode == WB_SYNC_ALL, }; struct writeback_control wbc_writepages = { - .bdi = wbc->bdi, .sync_mode = wbc->sync_mode, .older_than_this = NULL, .nr_to_write = 64, @@ -2628,7 +2627,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, .sync_io = mode == WB_SYNC_ALL, }; struct writeback_control wbc_writepages = { - .bdi = inode->i_mapping->backing_dev_info, .sync_mode = mode, .older_than_this = NULL, .nr_to_write = nr_pages * 2, diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6981e4b7c148..94a602e98bb5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -614,8 +614,8 @@ static int writeback_sb_inodes(struct super_block *sb, return 1; } -static void writeback_inodes_wb(struct bdi_writeback *wb, - struct writeback_control *wbc) +void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc) { int ret = 0; @@ -660,13 +660,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, /* Leave any unwritten inodes on b_io */ } -void writeback_inodes_wbc(struct writeback_control *wbc) -{ - struct backing_dev_info *bdi = wbc->bdi; - - writeback_inodes_wb(&bdi->wb, wbc); -} - /* * The maximum number of pages to writeout in a single bdi flush/kupdate * operation. We do this so we don't hold I_SYNC against an inode for @@ -705,7 +698,6 @@ static long wb_writeback(struct bdi_writeback *wb, struct wb_writeback_args *args) { struct writeback_control wbc = { - .bdi = wb->bdi, .sb = args->sb, .sync_mode = args->sync_mode, .older_than_this = NULL, diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d63ef8f9609f..f6756f6a610c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -27,8 +27,6 @@ enum writeback_sync_modes { * in a manner such that unspecified fields are set to zero. */ struct writeback_control { - struct backing_dev_info *bdi; /* If !NULL, only write back this - queue */ struct super_block *sb; /* if !NULL, only write inodes from this super_block */ enum writeback_sync_modes sync_mode; @@ -66,7 +64,8 @@ int inode_wait(void *); void writeback_inodes_sb(struct super_block *); int writeback_inodes_sb_if_idle(struct super_block *); void sync_inodes_sb(struct super_block *); -void writeback_inodes_wbc(struct writeback_control *wbc); +void writeback_inodes_wb(struct bdi_writeback *wb, + struct writeback_control *wbc); long wb_do_writeback(struct bdi_writeback *wb, int force_wait); void wakeup_flusher_threads(long nr_pages); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 660a87a22511..6e0b09a1ec2c 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -340,14 +340,13 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) static void bdi_flush_io(struct backing_dev_info *bdi) { struct writeback_control wbc = { - .bdi = bdi, .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .range_cyclic = 1, .nr_to_write = 1024, }; - writeback_inodes_wbc(&wbc); + writeback_inodes_wb(&bdi->wb, &wbc); } /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 54f28bd493d3..37498ef61548 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -495,7 +495,6 @@ static void balance_dirty_pages(struct address_space *mapping, for (;;) { struct writeback_control wbc = { - .bdi = bdi, .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .nr_to_write = write_chunk, @@ -537,7 +536,7 @@ static void balance_dirty_pages(struct address_space *mapping, * up. */ if (bdi_nr_reclaimable > bdi_thresh) { - writeback_inodes_wbc(&wbc); + writeback_inodes_wb(&bdi->wb, &wbc); pages_written += write_chunk - wbc.nr_to_write; get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); -- cgit v1.2.3 From edadfb10ba35da7253541e4155aa92eff758ebe6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 10 Jun 2010 12:07:54 +0200 Subject: writeback: split writeback_inodes_wb The case where we have a superblock doesn't require a loop here as we scan over all inodes in writeback_sb_inodes. Split it out into a separate helper to make the code simpler. This also allows to get rid of the sb member in struct writeback_control, which was rather out of place there. Also update the comments in writeback_sb_inodes that explain the handling of inodes from wrong superblocks. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 82 ++++++++++++++++++++++++++--------------------- include/linux/writeback.h | 2 -- 2 files changed, 46 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 94a602e98bb5..8cc06d5432b5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -554,29 +554,41 @@ static bool pin_sb_for_writeback(struct super_block *sb) /* * Write a portion of b_io inodes which belong to @sb. - * If @wbc->sb != NULL, then find and write all such + * + * If @only_this_sb is true, then find and write all such * inodes. Otherwise write only ones which go sequentially * in reverse order. + * * Return 1, if the caller writeback routine should be * interrupted. Otherwise return 0. */ -static int writeback_sb_inodes(struct super_block *sb, - struct bdi_writeback *wb, - struct writeback_control *wbc) +static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, + struct writeback_control *wbc, bool only_this_sb) { while (!list_empty(&wb->b_io)) { long pages_skipped; struct inode *inode = list_entry(wb->b_io.prev, struct inode, i_list); - if (wbc->sb && sb != inode->i_sb) { - /* super block given and doesn't - match, skip this inode */ - redirty_tail(inode); - continue; - } - if (sb != inode->i_sb) - /* finish with this superblock */ + + if (inode->i_sb != sb) { + if (only_this_sb) { + /* + * We only want to write back data for this + * superblock, move all inodes not belonging + * to it back onto the dirty list. + */ + redirty_tail(inode); + continue; + } + + /* + * The inode belongs to a different superblock. + * Bounce back to the caller to unpin this and + * pin the next superblock. + */ return 0; + } + if (inode->i_state & (I_NEW | I_WILL_FREE)) { requeue_io(inode); continue; @@ -629,29 +641,12 @@ void writeback_inodes_wb(struct bdi_writeback *wb, struct inode, i_list); struct super_block *sb = inode->i_sb; - if (wbc->sb) { - /* - * We are requested to write out inodes for a specific - * superblock. This means we already have s_umount - * taken by the caller which also waits for us to - * complete the writeout. - */ - if (sb != wbc->sb) { - redirty_tail(inode); - continue; - } - - WARN_ON(!rwsem_is_locked(&sb->s_umount)); - - ret = writeback_sb_inodes(sb, wb, wbc); - } else { - if (!pin_sb_for_writeback(sb)) { - requeue_io(inode); - continue; - } - ret = writeback_sb_inodes(sb, wb, wbc); - drop_super(sb); + if (!pin_sb_for_writeback(sb)) { + requeue_io(inode); + continue; } + ret = writeback_sb_inodes(sb, wb, wbc, false); + drop_super(sb); if (ret) break; @@ -660,6 +655,19 @@ void writeback_inodes_wb(struct bdi_writeback *wb, /* Leave any unwritten inodes on b_io */ } +static void __writeback_inodes_sb(struct super_block *sb, + struct bdi_writeback *wb, struct writeback_control *wbc) +{ + WARN_ON(!rwsem_is_locked(&sb->s_umount)); + + wbc->wb_start = jiffies; /* livelock avoidance */ + spin_lock(&inode_lock); + if (!wbc->for_kupdate || list_empty(&wb->b_io)) + queue_io(wb, wbc->older_than_this); + writeback_sb_inodes(sb, wb, wbc, true); + spin_unlock(&inode_lock); +} + /* * The maximum number of pages to writeout in a single bdi flush/kupdate * operation. We do this so we don't hold I_SYNC against an inode for @@ -698,7 +706,6 @@ static long wb_writeback(struct bdi_writeback *wb, struct wb_writeback_args *args) { struct writeback_control wbc = { - .sb = args->sb, .sync_mode = args->sync_mode, .older_than_this = NULL, .for_kupdate = args->for_kupdate, @@ -736,7 +743,10 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.more_io = 0; wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.pages_skipped = 0; - writeback_inodes_wb(wb, &wbc); + if (args->sb) + __writeback_inodes_sb(args->sb, wb, &wbc); + else + writeback_inodes_wb(wb, &wbc); args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index f6756f6a610c..c24eca71e80c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -27,8 +27,6 @@ enum writeback_sync_modes { * in a manner such that unspecified fields are set to zero. */ struct writeback_control { - struct super_block *sb; /* if !NULL, only write inodes from - this super_block */ enum writeback_sync_modes sync_mode; unsigned long *older_than_this; /* If !NULL, only write back inodes older than this */ -- cgit v1.2.3 From 83ba7b071f30f7c01f72518ad72d5cd203c27502 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 6 Jul 2010 08:59:53 +0200 Subject: writeback: simplify the write back thread queue First remove items from work_list as soon as we start working on them. This means we don't have to track any pending or visited state and can get rid of all the RCU magic freeing the work items - we can simply free them once the operation has finished. Second use a real completion for tracking synchronous requests - if the caller sets the completion pointer we complete it, otherwise use it as a boolean indicator that we can free the work item directly. Third unify struct wb_writeback_args and struct bdi_work into a single data structure, wb_writeback_work. Previous we set all parameters into a struct wb_writeback_args, copied it into struct bdi_work, copied it again on the stack to use it there. Instead of just allocate one structure dynamically or on the stack and use it all the way through the stack. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 253 ++++++++++++-------------------------------- include/linux/backing-dev.h | 2 - mm/backing-dev.c | 14 +-- 3 files changed, 72 insertions(+), 197 deletions(-) (limited to 'include') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8cc06d5432b5..d5be1693ac93 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -38,43 +38,18 @@ int nr_pdflush_threads; /* * Passed into wb_writeback(), essentially a subset of writeback_control */ -struct wb_writeback_args { +struct wb_writeback_work { long nr_pages; struct super_block *sb; enum writeback_sync_modes sync_mode; unsigned int for_kupdate:1; unsigned int range_cyclic:1; unsigned int for_background:1; -}; -/* - * Work items for the bdi_writeback threads - */ -struct bdi_work { struct list_head list; /* pending work list */ - struct rcu_head rcu_head; /* for RCU free/clear of work */ - - unsigned long seen; /* threads that have seen this work */ - atomic_t pending; /* number of threads still to do work */ - - struct wb_writeback_args args; /* writeback arguments */ - - unsigned long state; /* flag bits, see WS_* */ -}; - -enum { - WS_INPROGRESS = 0, - WS_ONSTACK, + struct completion *done; /* set if the caller waits */ }; -static inline void bdi_work_init(struct bdi_work *work, - struct wb_writeback_args *args) -{ - INIT_RCU_HEAD(&work->rcu_head); - work->args = *args; - __set_bit(WS_INPROGRESS, &work->state); -} - /** * writeback_in_progress - determine whether there is writeback in progress * @bdi: the device's backing_dev_info structure. @@ -87,49 +62,11 @@ int writeback_in_progress(struct backing_dev_info *bdi) return !list_empty(&bdi->work_list); } -static void bdi_work_free(struct rcu_head *head) -{ - struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); - - clear_bit(WS_INPROGRESS, &work->state); - smp_mb__after_clear_bit(); - wake_up_bit(&work->state, WS_INPROGRESS); - - if (!test_bit(WS_ONSTACK, &work->state)) - kfree(work); -} - -static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) -{ - /* - * The caller has retrieved the work arguments from this work, - * drop our reference. If this is the last ref, delete and free it - */ - if (atomic_dec_and_test(&work->pending)) { - struct backing_dev_info *bdi = wb->bdi; - - spin_lock(&bdi->wb_lock); - list_del_rcu(&work->list); - spin_unlock(&bdi->wb_lock); - - call_rcu(&work->rcu_head, bdi_work_free); - } -} - -static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) +static void bdi_queue_work(struct backing_dev_info *bdi, + struct wb_writeback_work *work) { - work->seen = bdi->wb_mask; - BUG_ON(!work->seen); - atomic_set(&work->pending, bdi->wb_cnt); - BUG_ON(!bdi->wb_cnt); - - /* - * list_add_tail_rcu() contains the necessary barriers to - * make sure the above stores are seen before the item is - * noticed on the list - */ spin_lock(&bdi->wb_lock); - list_add_tail_rcu(&work->list, &bdi->work_list); + list_add_tail(&work->list, &bdi->work_list); spin_unlock(&bdi->wb_lock); /* @@ -146,55 +83,29 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) } } -/* - * Used for on-stack allocated work items. The caller needs to wait until - * the wb threads have acked the work before it's safe to continue. - */ -static void bdi_wait_on_work_done(struct bdi_work *work) -{ - wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait, - TASK_UNINTERRUPTIBLE); -} - -static void bdi_alloc_queue_work(struct backing_dev_info *bdi, - struct wb_writeback_args *args) +static void +__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, + bool range_cyclic, bool for_background) { - struct bdi_work *work; + struct wb_writeback_work *work; /* * This is WB_SYNC_NONE writeback, so if allocation fails just * wakeup the thread for old dirty data writeback */ - work = kmalloc(sizeof(*work), GFP_ATOMIC); - if (work) { - bdi_work_init(work, args); - bdi_queue_work(bdi, work); - } else { - struct bdi_writeback *wb = &bdi->wb; - - if (wb->task) - wake_up_process(wb->task); + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + if (bdi->wb.task) + wake_up_process(bdi->wb.task); + return; } -} -/** - * bdi_queue_work_onstack - start and wait for writeback - * @args: parameters to control the work queue writeback - * - * Description: - * This function initiates writeback and waits for the operation to - * complete. Callers must hold the sb s_umount semaphore for - * reading, to avoid having the super disappear before we are done. - */ -static void bdi_queue_work_onstack(struct wb_writeback_args *args) -{ - struct bdi_work work; + work->sync_mode = WB_SYNC_NONE; + work->nr_pages = nr_pages; + work->range_cyclic = range_cyclic; + work->for_background = for_background; - bdi_work_init(&work, args); - __set_bit(WS_ONSTACK, &work.state); - - bdi_queue_work(args->sb->s_bdi, &work); - bdi_wait_on_work_done(&work); + bdi_queue_work(bdi, work); } /** @@ -210,13 +121,7 @@ static void bdi_queue_work_onstack(struct wb_writeback_args *args) */ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) { - struct wb_writeback_args args = { - .sync_mode = WB_SYNC_NONE, - .nr_pages = nr_pages, - .range_cyclic = 1, - }; - - bdi_alloc_queue_work(bdi, &args); + __bdi_start_writeback(bdi, nr_pages, true, false); } /** @@ -230,13 +135,7 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) */ void bdi_start_background_writeback(struct backing_dev_info *bdi) { - struct wb_writeback_args args = { - .sync_mode = WB_SYNC_NONE, - .nr_pages = LONG_MAX, - .for_background = 1, - .range_cyclic = 1, - }; - bdi_alloc_queue_work(bdi, &args); + __bdi_start_writeback(bdi, LONG_MAX, true, true); } /* @@ -703,14 +602,14 @@ static inline bool over_bground_thresh(void) * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, - struct wb_writeback_args *args) + struct wb_writeback_work *work) { struct writeback_control wbc = { - .sync_mode = args->sync_mode, + .sync_mode = work->sync_mode, .older_than_this = NULL, - .for_kupdate = args->for_kupdate, - .for_background = args->for_background, - .range_cyclic = args->range_cyclic, + .for_kupdate = work->for_kupdate, + .for_background = work->for_background, + .range_cyclic = work->range_cyclic, }; unsigned long oldest_jif; long wrote = 0; @@ -730,24 +629,24 @@ static long wb_writeback(struct bdi_writeback *wb, /* * Stop writeback when nr_pages has been consumed */ - if (args->nr_pages <= 0) + if (work->nr_pages <= 0) break; /* * For background writeout, stop when we are below the * background dirty threshold */ - if (args->for_background && !over_bground_thresh()) + if (work->for_background && !over_bground_thresh()) break; wbc.more_io = 0; wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.pages_skipped = 0; - if (args->sb) - __writeback_inodes_sb(args->sb, wb, &wbc); + if (work->sb) + __writeback_inodes_sb(work->sb, wb, &wbc); else writeback_inodes_wb(wb, &wbc); - args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; + work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; /* @@ -783,31 +682,21 @@ static long wb_writeback(struct bdi_writeback *wb, } /* - * Return the next bdi_work struct that hasn't been processed by this - * wb thread yet. ->seen is initially set for each thread that exists - * for this device, when a thread first notices a piece of work it - * clears its bit. Depending on writeback type, the thread will notify - * completion on either receiving the work (WB_SYNC_NONE) or after - * it is done (WB_SYNC_ALL). + * Return the next wb_writeback_work struct that hasn't been processed yet. */ -static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, - struct bdi_writeback *wb) +static struct wb_writeback_work * +get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) { - struct bdi_work *work, *ret = NULL; - - rcu_read_lock(); - - list_for_each_entry_rcu(work, &bdi->work_list, list) { - if (!test_bit(wb->nr, &work->seen)) - continue; - clear_bit(wb->nr, &work->seen); + struct wb_writeback_work *work = NULL; - ret = work; - break; + spin_lock(&bdi->wb_lock); + if (!list_empty(&bdi->work_list)) { + work = list_entry(bdi->work_list.next, + struct wb_writeback_work, list); + list_del_init(&work->list); } - - rcu_read_unlock(); - return ret; + spin_unlock(&bdi->wb_lock); + return work; } static long wb_check_old_data_flush(struct bdi_writeback *wb) @@ -832,14 +721,14 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) (inodes_stat.nr_inodes - inodes_stat.nr_unused); if (nr_pages) { - struct wb_writeback_args args = { + struct wb_writeback_work work = { .nr_pages = nr_pages, .sync_mode = WB_SYNC_NONE, .for_kupdate = 1, .range_cyclic = 1, }; - return wb_writeback(wb, &args); + return wb_writeback(wb, &work); } return 0; @@ -851,33 +740,27 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) long wb_do_writeback(struct bdi_writeback *wb, int force_wait) { struct backing_dev_info *bdi = wb->bdi; - struct bdi_work *work; + struct wb_writeback_work *work; long wrote = 0; while ((work = get_next_work_item(bdi, wb)) != NULL) { - struct wb_writeback_args args = work->args; - /* * Override sync mode, in case we must wait for completion + * because this thread is exiting now. */ if (force_wait) - work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; + work->sync_mode = WB_SYNC_ALL; - /* - * If this isn't a data integrity operation, just notify - * that we have seen this work and we are now starting it. - */ - if (!test_bit(WS_ONSTACK, &work->state)) - wb_clear_pending(wb, work); - - wrote += wb_writeback(wb, &args); + wrote += wb_writeback(wb, work); /* - * This is a data integrity writeback, so only do the - * notification when we have completed the work. + * Notify the caller of completion if this is a synchronous + * work item, otherwise just free it. */ - if (test_bit(WS_ONSTACK, &work->state)) - wb_clear_pending(wb, work); + if (work->done) + complete(work->done); + else + kfree(work); } /* @@ -940,14 +823,9 @@ int bdi_writeback_task(struct bdi_writeback *wb) void wakeup_flusher_threads(long nr_pages) { struct backing_dev_info *bdi; - struct wb_writeback_args args = { - .sync_mode = WB_SYNC_NONE, - }; - if (nr_pages) { - args.nr_pages = nr_pages; - } else { - args.nr_pages = global_page_state(NR_FILE_DIRTY) + + if (!nr_pages) { + nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); } @@ -955,7 +833,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - bdi_alloc_queue_work(bdi, &args); + __bdi_start_writeback(bdi, nr_pages, false, false); } rcu_read_unlock(); } @@ -1164,17 +1042,20 @@ void writeback_inodes_sb(struct super_block *sb) { unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); - struct wb_writeback_args args = { + DECLARE_COMPLETION_ONSTACK(done); + struct wb_writeback_work work = { .sb = sb, .sync_mode = WB_SYNC_NONE, + .done = &done, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); - args.nr_pages = nr_dirty + nr_unstable + + work.nr_pages = nr_dirty + nr_unstable + (inodes_stat.nr_inodes - inodes_stat.nr_unused); - bdi_queue_work_onstack(&args); + bdi_queue_work(sb->s_bdi, &work); + wait_for_completion(&done); } EXPORT_SYMBOL(writeback_inodes_sb); @@ -1206,16 +1087,20 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); */ void sync_inodes_sb(struct super_block *sb) { - struct wb_writeback_args args = { + DECLARE_COMPLETION_ONSTACK(done); + struct wb_writeback_work work = { .sb = sb, .sync_mode = WB_SYNC_ALL, .nr_pages = LONG_MAX, .range_cyclic = 0, + .done = &done, }; WARN_ON(!rwsem_is_locked(&sb->s_umount)); - bdi_queue_work_onstack(&args); + bdi_queue_work(sb->s_bdi, &work); + wait_for_completion(&done); + wait_sb_inodes(sb); } EXPORT_SYMBOL(sync_inodes_sb); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9ae2889096b6..e9aec0d099df 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -82,8 +82,6 @@ struct backing_dev_info { struct bdi_writeback wb; /* default writeback info for this bdi */ spinlock_t wb_lock; /* protects update side of wb_list */ struct list_head wb_list; /* the flusher threads hanging off this bdi */ - unsigned long wb_mask; /* bitmask of registered tasks */ - unsigned int wb_cnt; /* number of registered tasks */ struct list_head work_list; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6e0b09a1ec2c..123bcef13e51 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -104,15 +104,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) "b_more_io: %8lu\n" "bdi_list: %8u\n" "state: %8lx\n" - "wb_mask: %8lx\n" - "wb_list: %8u\n" - "wb_cnt: %8u\n", + "wb_list: %8u\n", (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), K(bdi_thresh), K(dirty_thresh), K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, - !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask, - !list_empty(&bdi->wb_list), bdi->wb_cnt); + !list_empty(&bdi->bdi_list), bdi->state, + !list_empty(&bdi->wb_list)); #undef K return 0; @@ -674,12 +672,6 @@ int bdi_init(struct backing_dev_info *bdi) bdi_wb_init(&bdi->wb, bdi); - /* - * Just one thread support for now, hard code mask and count - */ - bdi->wb_mask = 1; - bdi->wb_cnt = 1; - for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { err = percpu_counter_init(&bdi->bdi_stat[i], 0); if (err) -- cgit v1.2.3 From 140236b4b1c749c9b795ea3d11558a0eb5a3a080 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 10 Jun 2010 13:56:33 +0300 Subject: VFS: introduce s_dirty accessors This patch introduces 3 VFS accessors: 'sb_mark_dirty()', 'sb_mark_clean()', and 'sb_is_dirty()'. They simply set 'sb->s_dirt' or test 'sb->s_dirt'. The plan is to make every FS use these accessors later instead of manipulating the 'sb->s_dirt' flag directly. Ultimately, this change is a preparation for the periodic superblock synchronization optimization which is about preventing the "sync_supers" kernel thread from waking up even if there is nothing to synchronize. This patch does not do any functional change, just adds accessor functions. Signed-off-by: Artem Bityutskiy Signed-off-by: Linus Torvalds --- include/linux/fs.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 471e1ff5079a..68ca1b0491af 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1783,6 +1783,19 @@ extern int get_sb_pseudo(struct file_system_type *, char *, struct vfsmount *mnt); extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); +static inline void sb_mark_dirty(struct super_block *sb) +{ + sb->s_dirt = 1; +} +static inline void sb_mark_clean(struct super_block *sb) +{ + sb->s_dirt = 0; +} +static inline int sb_is_dirty(struct super_block *sb) +{ + return sb->s_dirt; +} + /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) -- cgit v1.2.3 From 5870a4d97da136908ca477e3a21bc9f4c2705161 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 4 Jul 2010 04:03:07 +0200 Subject: drm/ttm: Allocate the page pool manager in the heap. Repeated ttm_page_alloc_init/fini fails noisily because the pool manager kobj isn't zeroed out between uses (we could do just that but statically allocated kobjects are generally considered a bad thing). Move it to kzalloc'ed memory. Note that this patch drops the refcounting behavior of the pool allocator init/fini functions: it would have led to a race condition in its current form, and anyway it was never exploited. This fixes a regression with reloading kms modules at runtime, since page allocator was introduced. Signed-off-by: Francisco Jerez Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 68 +++++++++++++++++------------------- include/drm/ttm/ttm_page_alloc.h | 4 --- 2 files changed, 33 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 2f047577b1e3..b1d67dc973dc 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -104,7 +104,6 @@ struct ttm_pool_opts { struct ttm_pool_manager { struct kobject kobj; struct shrinker mm_shrink; - atomic_t page_alloc_inited; struct ttm_pool_opts options; union { @@ -142,7 +141,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj) { struct ttm_pool_manager *m = container_of(kobj, struct ttm_pool_manager, kobj); - (void)m; + kfree(m); } static ssize_t ttm_pool_store(struct kobject *kobj, @@ -214,9 +213,7 @@ static struct kobj_type ttm_pool_kobj_type = { .default_attrs = ttm_pool_attrs, }; -static struct ttm_pool_manager _manager = { - .page_alloc_inited = ATOMIC_INIT(0) -}; +static struct ttm_pool_manager *_manager; #ifndef CONFIG_X86 static int set_pages_array_wb(struct page **pages, int addrinarray) @@ -271,7 +268,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags, if (flags & TTM_PAGE_FLAG_DMA32) pool_index |= 0x2; - return &_manager.pools[pool_index]; + return &_manager->pools[pool_index]; } /* set memory back to wb and free the pages. */ @@ -387,7 +384,7 @@ static int ttm_pool_get_num_unused_pages(void) unsigned i; int total = 0; for (i = 0; i < NUM_POOLS; ++i) - total += _manager.pools[i].npages; + total += _manager->pools[i].npages; return total; } @@ -408,7 +405,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; - pool = &_manager.pools[(i + pool_offset)%NUM_POOLS]; + pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; shrink_pages = ttm_page_pool_free(pool, nr_free); } /* return estimated number of unused pages in pool */ @@ -576,10 +573,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, /* If allocation request is small and there is not enough * pages in pool we fill the pool first */ - if (count < _manager.options.small + if (count < _manager->options.small && count > pool->npages) { struct list_head new_pages; - unsigned alloc_size = _manager.options.alloc_size; + unsigned alloc_size = _manager->options.alloc_size; /** * Can't change page caching if in irqsave context. We have to @@ -759,8 +756,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, pool->npages += page_count; /* Check that we don't go over the pool limit */ page_count = 0; - if (pool->npages > _manager.options.max_size) { - page_count = pool->npages - _manager.options.max_size; + if (pool->npages > _manager->options.max_size) { + page_count = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (page_count < NUM_PAGES_TO_ALLOC) @@ -785,33 +782,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) { int ret; - if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) - return 0; + + WARN_ON(_manager); printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); - ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); + _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); - ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc"); + ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); - ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32, - "wc dma"); + ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); - ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32, - "uc dma"); + ttm_page_pool_init_locked(&_manager->wc_pool_dma32, + GFP_USER | GFP_DMA32, "wc dma"); - _manager.options.max_size = max_pages; - _manager.options.small = SMALL_ALLOCATION; - _manager.options.alloc_size = NUM_PAGES_TO_ALLOC; + ttm_page_pool_init_locked(&_manager->uc_pool_dma32, + GFP_USER | GFP_DMA32, "uc dma"); - kobject_init(&_manager.kobj, &ttm_pool_kobj_type); - ret = kobject_add(&_manager.kobj, &glob->kobj, "pool"); + _manager->options.max_size = max_pages; + _manager->options.small = SMALL_ALLOCATION; + _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; + + ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, + &glob->kobj, "pool"); if (unlikely(ret != 0)) { - kobject_put(&_manager.kobj); + kobject_put(&_manager->kobj); + _manager = NULL; return ret; } - ttm_pool_mm_shrink_init(&_manager); + ttm_pool_mm_shrink_init(_manager); return 0; } @@ -820,16 +820,14 @@ void ttm_page_alloc_fini() { int i; - if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) - return; - printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); - ttm_pool_mm_shrink_fini(&_manager); + ttm_pool_mm_shrink_fini(_manager); for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); - kobject_put(&_manager.kobj); + kobject_put(&_manager->kobj); + _manager = NULL; } int ttm_page_alloc_debugfs(struct seq_file *m, void *data) @@ -837,14 +835,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) struct ttm_page_pool *p; unsigned i; char *h[] = {"pool", "refills", "pages freed", "size"}; - if (atomic_read(&_manager.page_alloc_inited) == 0) { + if (!_manager) { seq_printf(m, "No pool allocator running.\n"); return 0; } seq_printf(m, "%6s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { - p = &_manager.pools[i]; + p = &_manager->pools[i]; seq_printf(m, "%6s %12ld %13ld %8d\n", p->name, p->nrefills, diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 8bb4de567b2c..116821448c38 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -56,10 +56,6 @@ void ttm_put_pages(struct list_head *pages, enum ttm_caching_state cstate); /** * Initialize pool allocator. - * - * Pool allocator is internaly reference counted so it can be initialized - * multiple times but ttm_page_alloc_fini has to be called same number of - * times. */ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); /** -- cgit v1.2.3 From 095c24710aa508a303edff86709637007113fbbf Mon Sep 17 00:00:00 2001 From: Andy Walls Date: Sat, 12 Jun 2010 20:20:36 -0300 Subject: V4L/DVB: tuner: Add a definition for the Philips FQ1236 MK5 NTSC tuner Signed-off-by: Andy Walls Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/tuners/tuner-simple.c | 1 + drivers/media/common/tuners/tuner-types.c | 16 ++++++++++++++++ include/media/tuner.h | 1 + 3 files changed, 18 insertions(+) (limited to 'include') diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c index 8abbcc5fcf95..8cf2ab609d5e 100644 --- a/drivers/media/common/tuners/tuner-simple.c +++ b/drivers/media/common/tuners/tuner-simple.c @@ -524,6 +524,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer) buffer[3] = 0x39; break; case TUNER_PHILIPS_FQ1216LME_MK3: + case TUNER_PHILIPS_FQ1236_MK5: tuner_err("This tuner doesn't have FM\n"); /* Set the low band for sanity, since it covers 88-108 MHz */ buffer[3] = 0x01; diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c index d9aaaca620c9..58a513bcd747 100644 --- a/drivers/media/common/tuners/tuner-types.c +++ b/drivers/media/common/tuners/tuner-types.c @@ -1353,6 +1353,17 @@ static struct tuner_params tuner_sony_btf_pxn01z_params[] = { }, }; +/* ------------ TUNER_PHILIPS_FQ1236_MK5 - Philips NTSC ------------ */ + +static struct tuner_params tuner_philips_fq1236_mk5_params[] = { + { + .type = TUNER_PARAM_TYPE_NTSC, + .ranges = tuner_fm1236_mk3_ntsc_ranges, + .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), + .has_tda9887 = 1, /* TDA9885, no FM radio */ + }, +}; + /* --------------------------------------------------------------------- */ struct tunertype tuners[] = { @@ -1826,6 +1837,11 @@ struct tunertype tuners[] = { .params = tuner_sony_btf_pxn01z_params, .count = ARRAY_SIZE(tuner_sony_btf_pxn01z_params), }, + [TUNER_PHILIPS_FQ1236_MK5] = { /* NTSC, TDA9885, no FM radio */ + .name = "Philips FQ1236 MK5", + .params = tuner_philips_fq1236_mk5_params, + .count = ARRAY_SIZE(tuner_philips_fq1236_mk5_params), + }, }; EXPORT_SYMBOL(tuners); diff --git a/include/media/tuner.h b/include/media/tuner.h index 5505c5360ca3..51811eac46f1 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -130,6 +130,7 @@ #define TUNER_PHILIPS_CU1216L 82 #define TUNER_NXP_TDA18271 83 #define TUNER_SONY_BTF_PXN01Z 84 +#define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */ /* tv card specific */ #define TDA9887_PRESENT (1<<0) -- cgit v1.2.3 From 44a54f787c0abcf75a2ed49b8ec8b2b512468f73 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 9 Jul 2010 15:41:44 -0400 Subject: tracing: Add alignment to syscall metadata declarations For some reason if we declare a static variable and then assign it later, and the assignment contains a __attribute__((__aligned__(#))), some versions of gcc will ignore it. This caused the syscall meta data to not be compact in its section and caused a kernel oops when the section was being read. The fix for these versions of gcc seems to be to add the aligned attribute to the declaration as well. This fixes the BZ regression: https://bugzilla.kernel.org/show_bug.cgi?id=16353 Reported-by: Zeev Tarantov Tested-by: Zeev Tarantov Acked-by: Frederic Weisbecker LKML-Reference: Signed-off-by: Steven Rostedt --- include/linux/syscalls.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7f614ce274a9..13ebb5413a79 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -124,7 +124,8 @@ extern struct trace_event_functions enter_syscall_print_funcs; extern struct trace_event_functions exit_syscall_print_funcs; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ - static struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata \ + __attribute__((__aligned__(4))) __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ static struct ftrace_event_call __used \ @@ -138,7 +139,8 @@ extern struct trace_event_functions exit_syscall_print_funcs; } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ - static struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata \ + __attribute__((__aligned__(4))) __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ static struct ftrace_event_call __used \ -- cgit v1.2.3 From 69c8f52b3897f2faf8510ea7ede8fffabe26c531 Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Thu, 1 Jul 2010 14:28:27 -0700 Subject: fix #warning about using kernel headers in userpsace Move the preprocessor #warning message: warning: #warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders from kernel.h to types.h. And also fixe the #warning message due to the preprocessor not being able to read the web address due to it thinking it was the start of a comment. also remove the extra #ifndef _KERNEL_ since it's already there. Signed-off-by: Justin P. Mattock Cc: Arnd Bergmann Signed-off-by: Jiri Kosina --- include/linux/kernel.h | 6 ------ include/linux/types.h | 5 ++++- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8317ec4b9f3b..bd8501a8ca1c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -728,12 +728,6 @@ extern int do_sysinfo(struct sysinfo *info); #endif /* __KERNEL__ */ -#ifndef __EXPORTED_HEADERS__ -#ifndef __KERNEL__ -#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders -#endif /* __KERNEL__ */ -#endif /* __EXPORTED_HEADERS__ */ - #define SI_LOAD_SHIFT 16 struct sysinfo { long uptime; /* Seconds since boot */ diff --git a/include/linux/types.h b/include/linux/types.h index 23d237a075e2..331d8baabcf2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -8,7 +8,10 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] - +#else +#ifndef __EXPORTED_HEADERS__ +#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" +#endif /* __EXPORTED_HEADERS__ */ #endif #include -- cgit v1.2.3 From b27d63d8f8d34af57805f56005e217c150187531 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Thu, 1 Jul 2010 20:48:44 +0200 Subject: fix comment typos concerning "sequential" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- drivers/media/video/gspca/sunplus.c | 4 ++-- include/linux/jffs2.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c index 0c786e00ebcf..d0a133abe767 100644 --- a/drivers/media/video/gspca/sunplus.c +++ b/drivers/media/video/gspca/sunplus.c @@ -805,7 +805,7 @@ static int sd_init(struct gspca_dev *gspca_dev) /* Set AE AWB Banding Type 3-> 50Hz 2-> 60Hz */ spca504A_acknowledged_command(gspca_dev, 0x24, 8, 3, 0x9e, 1); - /* Twice sequencial need status 0xff->0x9e->0x9d */ + /* Twice sequential need status 0xff->0x9e->0x9d */ spca504A_acknowledged_command(gspca_dev, 0x24, 8, 3, 0x9e, 0); @@ -880,7 +880,7 @@ static int sd_start(struct gspca_dev *gspca_dev) /* Set AE AWB Banding Type 3-> 50Hz 2-> 60Hz */ spca504A_acknowledged_command(gspca_dev, 0x24, 8, 3, 0x9e, 1); - /* Twice sequencial need status 0xff->0x9e->0x9d */ + /* Twice sequential need status 0xff->0x9e->0x9d */ spca504A_acknowledged_command(gspca_dev, 0x24, 8, 3, 0x9e, 0); spca504A_acknowledged_command(gspca_dev, 0x24, diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index 0874ab59ffef..edb9231f1898 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h @@ -185,7 +185,7 @@ struct jffs2_raw_xref jint32_t hdr_crc; jint32_t ino; /* inode number */ jint32_t xid; /* XATTR identifier number */ - jint32_t xseqno; /* xref sequencial number */ + jint32_t xseqno; /* xref sequential number */ jint32_t node_crc; } __attribute__((packed)); -- cgit v1.2.3 From ab0cfb928a3839e21942a28a86ad88e56ea3b136 Mon Sep 17 00:00:00 2001 From: Suresh Jayaraman Date: Tue, 6 Jul 2010 18:12:39 +0530 Subject: fscache: fix a trivial typo in the comment Signed-off-by: Suresh Jayaraman Acked-by: David Howells Signed-off-by: Jiri Kosina --- include/linux/fscache.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 595ce49288b7..1c0fc3e62f41 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -85,7 +85,7 @@ struct fscache_cookie_def { /* get an index key * - should store the key data in the buffer - * - should return the amount of amount stored + * - should return the amount of data stored * - not permitted to return an error * - the netfs data from the cookie being used as the source is * presented -- cgit v1.2.3 From 49a3df804bec09b8ee8196f79b81757e95cc6de4 Mon Sep 17 00:00:00 2001 From: Suresh Jayaraman Date: Tue, 6 Jul 2010 18:29:45 +0530 Subject: fscache: fix missing kerneldoc annotation .. and make kerneldoc scripts happy. Signed-off-by: Suresh Jayaraman Acked-by: David Howells Signed-off-by: Jiri Kosina --- include/linux/fscache.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 1c0fc3e62f41..ec0dad5ab90f 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -454,6 +454,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie, * @cookie: The cookie representing the cache object * @mapping: The netfs inode mapping to which the pages will be attached * @pages: A list of potential netfs pages to be filled + * @nr_pages: Number of pages to be read and/or allocated * @end_io_func: The callback to invoke when and if each page is filled * @context: An arbitrary piece of data to pass on to end_io_func() * @gfp: The conditions under which memory allocation should be made -- cgit v1.2.3 From 95f72d1ed41a66f1c1c29c24d479de81a0bea36f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 12 Jul 2010 14:36:09 +1000 Subject: lmb: rename to memblock via following scripts FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/lmb/memblock/g' \ -e 's/LMB/MEMBLOCK/g' \ $FILES for N in $(find . -name lmb.[ch]); do M=$(echo $N | sed 's/lmb/memblock/g') mv $N $M done and remove some wrong change like lmbench and dlmb etc. also move memblock.c from lib/ to mm/ Suggested-by: Ingo Molnar Acked-by: "H. Peter Anvin" Acked-by: Benjamin Herrenschmidt Acked-by: Linus Torvalds Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- Documentation/kernel-parameters.txt | 2 +- arch/microblaze/Kconfig | 2 +- arch/microblaze/include/asm/lmb.h | 17 - arch/microblaze/include/asm/memblock.h | 17 + arch/microblaze/kernel/prom.c | 14 +- arch/microblaze/mm/init.c | 40 +- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/abs_addr.h | 2 +- arch/powerpc/include/asm/lmb.h | 15 - arch/powerpc/include/asm/memblock.h | 15 + arch/powerpc/kernel/btext.c | 2 +- arch/powerpc/kernel/crash.c | 2 +- arch/powerpc/kernel/crash_dump.c | 4 +- arch/powerpc/kernel/dma-swiotlb.c | 2 +- arch/powerpc/kernel/dma.c | 4 +- arch/powerpc/kernel/machine_kexec.c | 12 +- arch/powerpc/kernel/paca.c | 8 +- arch/powerpc/kernel/prom.c | 62 +-- arch/powerpc/kernel/rtas.c | 6 +- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/kernel/setup_32.c | 16 +- arch/powerpc/kernel/setup_64.c | 20 +- arch/powerpc/kernel/vdso.c | 4 +- arch/powerpc/mm/40x_mmu.c | 2 +- arch/powerpc/mm/hash_utils_64.c | 26 +- arch/powerpc/mm/init_32.c | 16 +- arch/powerpc/mm/init_64.c | 2 +- arch/powerpc/mm/mem.c | 78 ++-- arch/powerpc/mm/numa.c | 84 ++-- arch/powerpc/mm/pgtable_32.c | 6 +- arch/powerpc/mm/pgtable_64.c | 4 +- arch/powerpc/mm/ppc_mmu_32.c | 4 +- arch/powerpc/mm/stab.c | 4 +- arch/powerpc/mm/tlb_nohash.c | 4 +- arch/powerpc/platforms/85xx/corenet_ds.c | 4 +- arch/powerpc/platforms/85xx/mpc8536_ds.c | 4 +- arch/powerpc/platforms/85xx/mpc85xx_ds.c | 4 +- arch/powerpc/platforms/85xx/mpc85xx_mds.c | 4 +- arch/powerpc/platforms/86xx/mpc86xx_hpcn.c | 4 +- arch/powerpc/platforms/cell/iommu.c | 10 +- arch/powerpc/platforms/embedded6xx/wii.c | 12 +- arch/powerpc/platforms/maple/setup.c | 2 +- arch/powerpc/platforms/pasemi/iommu.c | 4 +- arch/powerpc/platforms/powermac/setup.c | 4 +- arch/powerpc/platforms/ps3/htab.c | 2 +- arch/powerpc/platforms/ps3/mm.c | 6 +- arch/powerpc/platforms/ps3/os-area.c | 4 +- arch/powerpc/platforms/pseries/hotplug-memory.c | 38 +- arch/powerpc/platforms/pseries/iommu.c | 2 +- arch/powerpc/platforms/pseries/phyp_dump.c | 4 +- arch/powerpc/sysdev/dart_iommu.c | 8 +- arch/powerpc/sysdev/fsl_pci.c | 4 +- arch/sh/Kconfig | 2 +- arch/sh/include/asm/lmb.h | 6 - arch/sh/include/asm/memblock.h | 6 + arch/sh/kernel/machine_kexec.c | 18 +- arch/sh/kernel/setup.c | 8 +- arch/sh/mm/init.c | 40 +- arch/sh/mm/numa.c | 8 +- arch/sparc/Kconfig | 2 +- arch/sparc/include/asm/lmb.h | 10 - arch/sparc/include/asm/memblock.h | 10 + arch/sparc/kernel/mdesc.c | 16 +- arch/sparc/kernel/prom_64.c | 4 +- arch/sparc/mm/init_64.c | 54 +-- include/linux/lmb.h | 89 ---- include/linux/memblock.h | 89 ++++ lib/Kconfig | 3 - lib/Makefile | 2 - lib/lmb.c | 541 ------------------------ mm/Kconfig | 3 + mm/Makefile | 2 + mm/memblock.c | 541 ++++++++++++++++++++++++ 73 files changed, 1037 insertions(+), 1037 deletions(-) delete mode 100644 arch/microblaze/include/asm/lmb.h create mode 100644 arch/microblaze/include/asm/memblock.h delete mode 100644 arch/powerpc/include/asm/lmb.h create mode 100644 arch/powerpc/include/asm/memblock.h delete mode 100644 arch/sh/include/asm/lmb.h create mode 100644 arch/sh/include/asm/memblock.h delete mode 100644 arch/sparc/include/asm/lmb.h create mode 100644 arch/sparc/include/asm/memblock.h delete mode 100644 include/linux/lmb.h create mode 100644 include/linux/memblock.h delete mode 100644 lib/lmb.c create mode 100644 mm/memblock.c (limited to 'include') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 82d6aeb5228f..4ddb58df081e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1265,7 +1265,7 @@ and is between 256 and 4096 characters. It is defined in the file If there are multiple matching configurations changing the same attribute, the last one is used. - lmb=debug [KNL] Enable lmb debug messages. + memblock=debug [KNL] Enable memblock debug messages. load_ramdisk= [RAM] List of ramdisks to load from floppy See Documentation/blockdev/ramdisk.txt. diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 76818f926539..505a08592423 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -5,7 +5,7 @@ mainmenu "Linux/Microblaze Kernel Configuration" config MICROBLAZE def_bool y - select HAVE_LMB + select HAVE_MEMBLOCK select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/microblaze/include/asm/lmb.h b/arch/microblaze/include/asm/lmb.h deleted file mode 100644 index a0a0a929c293..000000000000 --- a/arch/microblaze/include/asm/lmb.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2008 Michal Simek - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_LMB_H -#define _ASM_MICROBLAZE_LMB_H - -/* LMB limit is OFF */ -#define LMB_REAL_LIMIT 0xFFFFFFFF - -#endif /* _ASM_MICROBLAZE_LMB_H */ - - diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h new file mode 100644 index 000000000000..f9c2fa331d2a --- /dev/null +++ b/arch/microblaze/include/asm/memblock.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2008 Michal Simek + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_MEMBLOCK_H +#define _ASM_MICROBLAZE_MEMBLOCK_H + +/* MEMBLOCK limit is OFF */ +#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF + +#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ + + diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index a15ef6d67ca9..427b13b4740f 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -49,12 +49,12 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node) void __init early_init_dt_add_memory_arch(u64 base, u64 size) { - lmb_add(base, size); + memblock_add(base, size); } u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return lmb_alloc(size, align); + return memblock_alloc(size, align); } #ifdef CONFIG_EARLY_PRINTK @@ -104,8 +104,8 @@ void __init early_init_devtree(void *params) */ of_scan_flat_dt(early_init_dt_scan_chosen, NULL); - /* Scan memory nodes and rebuild LMBs */ - lmb_init(); + /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory, NULL); @@ -113,9 +113,9 @@ void __init early_init_devtree(void *params) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); - lmb_analyze(); + memblock_analyze(); - pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); + pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size()); pr_debug(" <- early_init_devtree()\n"); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index cca3579d4268..db5934989926 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include /* mem_init */ #include #include @@ -76,10 +76,10 @@ void __init setup_memory(void) u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ - for (i = 0; i < lmb.memory.cnt; i++) { - memory_start = (u32) lmb.memory.region[i].base; - memory_end = (u32) lmb.memory.region[i].base - + (u32) lmb.memory.region[i].size; + for (i = 0; i < memblock.memory.cnt; i++) { + memory_start = (u32) memblock.memory.region[i].base; + memory_end = (u32) memblock.memory.region[i].base + + (u32) memblock.memory.region[i].size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { memory_size = memory_end - memory_start; @@ -100,7 +100,7 @@ void __init setup_memory(void) kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; - lmb_reserve(kernel_align_start, kernel_align_size); + memblock_reserve(kernel_align_start, kernel_align_size); printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); @@ -141,18 +141,18 @@ void __init setup_memory(void) map_size = init_bootmem_node(&contig_page_data, PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); #endif - lmb_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); + memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ free_bootmem(memory_start, memory_size); /* reserve allocate blocks */ - for (i = 0; i < lmb.reserved.cnt; i++) { + for (i = 0; i < memblock.reserved.cnt; i++) { pr_debug("reserved %d - 0x%08x-0x%08x\n", i, - (u32) lmb.reserved.region[i].base, - (u32) lmb_size_bytes(&lmb.reserved, i)); - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); + (u32) memblock.reserved.region[i].base, + (u32) memblock_size_bytes(&memblock.reserved, i)); + reserve_bootmem(memblock.reserved.region[i].base, + memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT); } #ifdef CONFIG_MMU init_bootmem_done = 1; @@ -235,7 +235,7 @@ static void mm_cmdline_setup(void) if (maxmem && memory_size > maxmem) { memory_size = maxmem; memory_end = memory_start + memory_size; - lmb.memory.region[0].size = memory_size; + memblock.memory.region[0].size = memory_size; } } } @@ -273,19 +273,19 @@ asmlinkage void __init mmu_init(void) { unsigned int kstart, ksize; - if (!lmb.reserved.cnt) { + if (!memblock.reserved.cnt) { printk(KERN_EMERG "Error memory count\n"); machine_restart(NULL); } - if ((u32) lmb.memory.region[0].size < 0x1000000) { + if ((u32) memblock.memory.region[0].size < 0x1000000) { printk(KERN_EMERG "Memory must be greater than 16MB\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ - memory_start = (u32) lmb.memory.region[0].base; - memory_end = (u32) lmb.memory.region[0].base + - (u32) lmb.memory.region[0].size; + memory_start = (u32) memblock.memory.region[0].base; + memory_end = (u32) memblock.memory.region[0].base + + (u32) memblock.memory.region[0].size; memory_size = memory_end - memory_start; mm_cmdline_setup(); /* FIXME parse args from command line - not used */ @@ -297,7 +297,7 @@ asmlinkage void __init mmu_init(void) kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ /* kernel size */ ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); - lmb_reserve(kstart, ksize); + memblock_reserve(kstart, ksize); #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ @@ -335,7 +335,7 @@ void __init *early_get_page(void) * Mem start + 32MB -> here is limit * because of mem mapping from head.S */ - p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, + p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, memory_start + 0x2000000)); } return p; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 6506bf4fbff1..2031a2846865 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -132,7 +132,7 @@ config PPC select HAVE_ARCH_KGDB select HAVE_KRETPROBES select HAVE_ARCH_TRACEHOOK - select HAVE_LMB + select HAVE_MEMBLOCK select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG select USE_GENERIC_SMP_HELPERS if SMP diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h index 98324c5a8286..9a846efe6382 100644 --- a/arch/powerpc/include/asm/abs_addr.h +++ b/arch/powerpc/include/asm/abs_addr.h @@ -12,7 +12,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include diff --git a/arch/powerpc/include/asm/lmb.h b/arch/powerpc/include/asm/lmb.h deleted file mode 100644 index 6f5fdf0a19ae..000000000000 --- a/arch/powerpc/include/asm/lmb.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _ASM_POWERPC_LMB_H -#define _ASM_POWERPC_LMB_H - -#include - -#define LMB_DBG(fmt...) udbg_printf(fmt) - -#ifdef CONFIG_PPC32 -extern phys_addr_t lowmem_end_addr; -#define LMB_REAL_LIMIT lowmem_end_addr -#else -#define LMB_REAL_LIMIT 0 -#endif - -#endif /* _ASM_POWERPC_LMB_H */ diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h new file mode 100644 index 000000000000..3c29728b56b1 --- /dev/null +++ b/arch/powerpc/include/asm/memblock.h @@ -0,0 +1,15 @@ +#ifndef _ASM_POWERPC_MEMBLOCK_H +#define _ASM_POWERPC_MEMBLOCK_H + +#include + +#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) + +#ifdef CONFIG_PPC32 +extern phys_addr_t lowmem_end_addr; +#define MEMBLOCK_REAL_LIMIT lowmem_end_addr +#else +#define MEMBLOCK_REAL_LIMIT 0 +#endif + +#endif /* _ASM_POWERPC_MEMBLOCK_H */ diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 26e58630ed7b..625942ae5585 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 29df48f2b61a..417f7b05a9ce 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 5fb667a60894..40f524643ba6 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include @@ -33,7 +33,7 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; #ifndef CONFIG_RELOCATABLE void __init reserve_kdump_trampoline(void) { - lmb_reserve(0, KDUMP_RESERVE_LIMIT); + memblock_reserve(0, KDUMP_RESERVE_LIMIT); } static void __init create_trampoline(unsigned long addr) diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index e7fe218b8697..02f724f36753 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -71,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb, sd->max_direct_dma_addr = 0; /* May need to bounce if the device can't address all of DRAM */ - if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM()) + if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) set_dma_ops(dev, &swiotlb_dma_ops); return NOTIFY_DONE; diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8d1de6f31d5a..84d6367ec003 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include @@ -89,7 +89,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) /* Could be improved so platforms can set the limit in case * they have limited DMA windows */ - return mask >= (lmb_end_of_DRAM() - 1); + return mask >= (memblock_end_of_DRAM() - 1); #else return 1; #endif diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index bb3d893a8353..89f005116aac 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include @@ -66,11 +66,11 @@ void __init reserve_crashkernel(void) unsigned long long crash_size, crash_base; int ret; - /* this is necessary because of lmb_phys_mem_size() */ - lmb_analyze(); + /* this is necessary because of memblock_phys_mem_size() */ + memblock_analyze(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -133,9 +133,9 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(lmb_phys_mem_size() >> 20)); + (unsigned long)(memblock_phys_mem_size() >> 20)); - lmb_reserve(crashk_res.start, crash_size); + memblock_reserve(crashk_res.start, crash_size); } int overlaps_crashkernel(unsigned long start, unsigned long size) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f88acf0218db..139a773853f4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include @@ -117,7 +117,7 @@ void __init allocate_pacas(void) * the first segment. On iSeries they must be within the area mapped * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. */ - limit = min(0x10000000ULL, lmb.rmo_size); + limit = min(0x10000000ULL, memblock.rmo_size); if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); @@ -128,7 +128,7 @@ void __init allocate_pacas(void) paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); - paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); + paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", @@ -148,7 +148,7 @@ void __init free_unused_pacas(void) if (new_size >= paca_size) return; - lmb_free(__pa(paca) + new_size, paca_size - new_size); + memblock_free(__pa(paca) + new_size, paca_size - new_size); printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", paca_size - new_size); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05131d634e73..9d3953983fb7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include @@ -98,7 +98,7 @@ static void __init move_device_tree(void) if ((memory_limit && (start + size) > memory_limit) || overlaps_crashkernel(start, size)) { - p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); + p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); memcpy(p, initial_boot_params, size); initial_boot_params = (struct boot_param_header *)p; DBG("Moved device tree to 0x%p\n", p); @@ -411,13 +411,13 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) { __be32 *dm, *ls, *usm; unsigned long l, n, flags; - u64 base, size, lmb_size; + u64 base, size, memblock_size; unsigned int is_kexec_kdump = 0, rngs; - ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); + ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) return 0; - lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); + memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); if (dm == NULL || l < sizeof(__be32)) @@ -442,11 +442,11 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) or if the block is not assigned to this partition (0x8) */ if ((flags & 0x80) || !(flags & 0x8)) continue; - size = lmb_size; + size = memblock_size; rngs = 1; if (is_kexec_kdump) { /* - * For each lmb in ibm,dynamic-memory, a corresponding + * For each memblock in ibm,dynamic-memory, a corresponding * entry in linux,drconf-usable-memory property contains * a counter 'p' followed by 'p' (base, size) duple. * Now read the counter from @@ -469,10 +469,10 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) if ((base + size) > 0x80000000ul) size = 0x80000000ul - base; } - lmb_add(base, size); + memblock_add(base, size); } while (--rngs); } - lmb_dump_all(); + memblock_dump_all(); return 0; } #else @@ -501,14 +501,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) } #endif - lmb_add(base, size); + memblock_add(base, size); memstart_addr = min((u64)memstart_addr, base); } u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return lmb_alloc(size, align); + return memblock_alloc(size, align); } #ifdef CONFIG_BLK_DEV_INITRD @@ -534,12 +534,12 @@ static void __init early_reserve_mem(void) /* before we do anything, lets reserve the dt blob */ self_base = __pa((unsigned long)initial_boot_params); self_size = initial_boot_params->totalsize; - lmb_reserve(self_base, self_size); + memblock_reserve(self_base, self_size); #ifdef CONFIG_BLK_DEV_INITRD /* then reserve the initrd, if any */ if (initrd_start && (initrd_end > initrd_start)) - lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); + memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); #endif /* CONFIG_BLK_DEV_INITRD */ #ifdef CONFIG_PPC32 @@ -560,7 +560,7 @@ static void __init early_reserve_mem(void) if (base_32 == self_base && size_32 == self_size) continue; DBG("reserving: %x -> %x\n", base_32, size_32); - lmb_reserve(base_32, size_32); + memblock_reserve(base_32, size_32); } return; } @@ -571,7 +571,7 @@ static void __init early_reserve_mem(void) if (size == 0) break; DBG("reserving: %llx -> %llx\n", base, size); - lmb_reserve(base, size); + memblock_reserve(base, size); } } @@ -594,7 +594,7 @@ static inline unsigned long phyp_dump_calculate_reserve_size(void) return phyp_dump_info->reserve_bootvar; /* divide by 20 to get 5% of value */ - tmp = lmb_end_of_DRAM(); + tmp = memblock_end_of_DRAM(); do_div(tmp, 20); /* round it down in multiples of 256 */ @@ -633,11 +633,11 @@ static void __init phyp_dump_reserve_mem(void) if (phyp_dump_info->phyp_dump_is_active) { /* Reserve *everything* above RMR.Area freed by userland tools*/ base = variable_reserve_size; - size = lmb_end_of_DRAM() - base; + size = memblock_end_of_DRAM() - base; /* XXX crashed_ram_end is wrong, since it may be beyond * the memory_limit, it will need to be adjusted. */ - lmb_reserve(base, size); + memblock_reserve(base, size); phyp_dump_info->init_reserve_start = base; phyp_dump_info->init_reserve_size = size; @@ -645,8 +645,8 @@ static void __init phyp_dump_reserve_mem(void) size = phyp_dump_info->cpu_state_size + phyp_dump_info->hpte_region_size + variable_reserve_size; - base = lmb_end_of_DRAM() - size; - lmb_reserve(base, size); + base = memblock_end_of_DRAM() - size; + memblock_reserve(base, size); phyp_dump_info->init_reserve_start = base; phyp_dump_info->init_reserve_size = size; } @@ -681,8 +681,8 @@ void __init early_init_devtree(void *params) */ of_scan_flat_dt(early_init_dt_scan_chosen, NULL); - /* Scan memory nodes and rebuild LMBs */ - lmb_init(); + /* Scan memory nodes and rebuild MEMBLOCKs */ + memblock_init(); of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); @@ -690,11 +690,11 @@ void __init early_init_devtree(void *params) strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); - /* Reserve LMB regions used by kernel, initrd, dt, etc... */ - lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); + /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ + memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); /* If relocatable, reserve first 32k for interrupt vectors etc. */ if (PHYSICAL_START > MEMORY_START) - lmb_reserve(MEMORY_START, 0x8000); + memblock_reserve(MEMORY_START, 0x8000); reserve_kdump_trampoline(); reserve_crashkernel(); early_reserve_mem(); @@ -706,17 +706,17 @@ void __init early_init_devtree(void *params) /* Ensure that total memory size is page-aligned, because * otherwise mark_bootmem() gets upset. */ - lmb_analyze(); - memsize = lmb_phys_mem_size(); + memblock_analyze(); + memsize = memblock_phys_mem_size(); if ((memsize & PAGE_MASK) != memsize) limit = memsize & PAGE_MASK; } - lmb_enforce_memory_limit(limit); + memblock_enforce_memory_limit(limit); - lmb_analyze(); - lmb_dump_all(); + memblock_analyze(); + memblock_dump_all(); - DBG("Phys. mem: %llx\n", lmb_phys_mem_size()); + DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 0e1ec6f746f6..d0516dbee762 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -934,11 +934,11 @@ void __init rtas_initialize(void) */ #ifdef CONFIG_PPC64 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { - rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); + rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif - rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); + rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5e4d852f640c..b7e6c7e193ae 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 7d84b210f168..a10ffc85ada7 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -246,12 +246,12 @@ static void __init irqstack_early_init(void) unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); hardirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } } @@ -261,15 +261,15 @@ static void __init exc_lvl_early_init(void) unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the lmb is limited to lowmem by LMB_REAL_LIMIT */ + * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { critirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE dbgirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #endif } } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 643dcac40fcb..d135f93cb0f6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include @@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca) * the CPU that ignores the top 2 bits of the address in real * mode so we can access kernel globals normally provided we * only toy with things in the RMO region. From here, we do - * some early parsing of the device-tree to setup out LMB + * some early parsing of the device-tree to setup out MEMBLOCK * data structures, and allocate & initialize the hash table * and segment tables so we can start running with translation * enabled. @@ -404,7 +404,7 @@ void __init setup_system(void) printk("-----------------------------------------------------\n"); printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); - printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); + printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); if (ppc64_caches.dline_size != 0x80) printk("ppc64_caches.dcache_line_size = 0x%x\n", ppc64_caches.dline_size); @@ -443,10 +443,10 @@ static void __init irqstack_early_init(void) */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc_base(THREAD_SIZE, + __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); hardirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc_base(THREAD_SIZE, + __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); } } @@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void) for_each_possible_cpu(i) { critirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); dbgirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[i] = (struct thread_info *) - __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } } #else @@ -487,11 +487,11 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(slb0_limit(), lmb.rmo_size); + limit = min(slb0_limit(), memblock.rmo_size); for_each_possible_cpu(i) { unsigned long sp; - sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); + sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); sp += THREAD_SIZE; paca[i].emergency_sp = __va(sp); } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d84d19224a95..13002fe206e7 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -734,7 +734,7 @@ static int __init vdso_init(void) vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; if (firmware_has_feature(FW_FEATURE_LPAR)) vdso_data->platform |= 1; - vdso_data->physicalMemorySize = lmb_phys_mem_size(); + vdso_data->physicalMemorySize = memblock_phys_mem_size(); vdso_data->dcache_size = ppc64_caches.dsize; vdso_data->dcache_line_size = ppc64_caches.dline_size; vdso_data->icache_size = ppc64_caches.isize; diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 65abfcfaaa9e..1dc2fa5ce1bd 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 16 and 4 MiB * pages. Consequently, restrict the top end of RAM currently - * allocable so that calls to the LMB to allocate PTEs for "tail" + * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail" * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 3ecdcec0a39e..98f262de5585 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include @@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, printk(KERN_INFO "Huge page(16GB) memory: " "addr = 0x%lX size = 0x%lX pages = %d\n", phys_addr, block_size, expected_pages); - if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) { - lmb_reserve(phys_addr, block_size * expected_pages); + if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) { + memblock_reserve(phys_addr, block_size * expected_pages); add_gpage(phys_addr, block_size, expected_pages); } return 0; @@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void) * and we have at least 1G of RAM at boot */ if (mmu_psize_defs[MMU_PAGE_16M].shift && - lmb_phys_mem_size() >= 0x40000000) + memblock_phys_mem_size() >= 0x40000000) mmu_vmemmap_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_64K].shift) mmu_vmemmap_psize = MMU_PAGE_64K; @@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void) return 1UL << ppc64_pft_size; /* round mem_size up to next power of 2 */ - mem_size = lmb_phys_mem_size(); + mem_size = memblock_phys_mem_size(); rnd_mem_size = 1UL << __ilog2(mem_size); if (rnd_mem_size < mem_size) rnd_mem_size <<= 1; @@ -627,7 +627,7 @@ static void __init htab_initialize(void) else limit = 0; - table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit); + table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); @@ -647,9 +647,9 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); #ifdef CONFIG_DEBUG_PAGEALLOC - linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; - linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, - 1, lmb.rmo_size)); + linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; + linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, + 1, memblock.rmo_size)); memset(linear_map_hash_slots, 0, linear_map_hash_count); #endif /* CONFIG_DEBUG_PAGEALLOC */ @@ -659,16 +659,16 @@ static void __init htab_initialize(void) */ /* create bolted the linear mapping in the hash table */ - for (i=0; i < lmb.memory.cnt; i++) { - base = (unsigned long)__va(lmb.memory.region[i].base); - size = lmb.memory.region[i].size; + for (i=0; i < memblock.memory.cnt; i++) { + base = (unsigned long)__va(memblock.memory.region[i].base); + size = memblock.memory.region[i].size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); #ifdef CONFIG_U3_DART /* Do not map the DART space. Fortunately, it will be aligned - * in such a way that it will not cross two lmb regions and + * in such a way that it will not cross two memblock regions and * will fit within a single 16Mb page. * The DART space is assumed to be a full 16Mb region even if * we only use 2Mb of that space. We will use more of it later diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 767333005eb4..6a6975dc2654 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -136,17 +136,17 @@ void __init MMU_init(void) /* parse args from command line */ MMU_setup(); - if (lmb.memory.cnt > 1) { + if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII - lmb.memory.cnt = 1; - lmb_analyze(); + memblock.memory.cnt = 1; + memblock_analyze(); printk(KERN_WARNING "Only using first contiguous memory region"); #else wii_memory_fixups(); #endif } - total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr; + total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; #ifdef CONFIG_FSL_BOOKE @@ -161,8 +161,8 @@ void __init MMU_init(void) lowmem_end_addr = memstart_addr + total_lowmem; #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; - lmb_enforce_memory_limit(lowmem_end_addr); - lmb_analyze(); + memblock_enforce_memory_limit(lowmem_end_addr); + memblock_analyze(); #endif /* CONFIG_HIGHMEM */ } @@ -200,7 +200,7 @@ void __init *early_get_page(void) if (init_bootmem_done) { p = alloc_bootmem_pages(PAGE_SIZE); } else { - p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, + p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, __initial_memory_limit_addr)); } return p; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index e267f223fdff..71f1415e2472 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0f594d774bf7..1a84a8d00005 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include @@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn) #else unsigned long paddr = (pfn << PAGE_SHIFT); int i; - for (i=0; i < lmb.memory.cnt; i++) { + for (i=0; i < memblock.memory.cnt; i++) { unsigned long base; - base = lmb.memory.region[i].base; + base = memblock.memory.region[i].base; if ((paddr >= base) && - (paddr < (base + lmb.memory.region[i].size))) { + (paddr < (base + memblock.memory.region[i].size))) { return 1; } } @@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size) /* * walk_memory_resource() needs to make sure there is no holes in a given * memory range. PPC64 does not maintain the memory layout in /proc/iomem. - * Instead it maintains it in lmb.memory structures. Walk through the + * Instead it maintains it in memblock.memory structures. Walk through the * memory regions, find holes and callback for contiguous regions. */ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - struct lmb_property res; + struct memblock_property res; unsigned long pfn, len; u64 end; int ret = -1; @@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, res.size = (u64) nr_pages << PAGE_SHIFT; end = res.base + res.size - 1; - while ((res.base < end) && (lmb_find(&res) >= 0)) { + while ((res.base < end) && (memblock_find(&res) >= 0)) { pfn = (unsigned long)(res.base >> PAGE_SHIFT); len = (unsigned long)(res.size >> PAGE_SHIFT); ret = (*func)(pfn, len, arg); @@ -184,8 +184,8 @@ void __init do_init_bootmem(void) unsigned long total_pages; int boot_mapsize; - max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; - total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; + max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM total_pages = total_lowmem >> PAGE_SHIFT; max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; @@ -198,16 +198,16 @@ void __init do_init_bootmem(void) */ bootmap_pages = bootmem_bootmap_pages(total_pages); - start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); + start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); min_low_pfn = MEMORY_START >> PAGE_SHIFT; boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); /* Add active regions with valid PFNs */ - for (i = 0; i < lmb.memory.cnt; i++) { + for (i = 0; i < memblock.memory.cnt; i++) { unsigned long start_pfn, end_pfn; - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); add_active_range(0, start_pfn, end_pfn); } @@ -218,17 +218,17 @@ void __init do_init_bootmem(void) free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); /* reserve the sections we're already using */ - for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long addr = lmb.reserved.region[i].base + - lmb_size_bytes(&lmb.reserved, i) - 1; + for (i = 0; i < memblock.reserved.cnt; i++) { + unsigned long addr = memblock.reserved.region[i].base + + memblock_size_bytes(&memblock.reserved, i) - 1; if (addr < lowmem_end_addr) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i), + reserve_bootmem(memblock.reserved.region[i].base, + memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); - else if (lmb.reserved.region[i].base < lowmem_end_addr) { + else if (memblock.reserved.region[i].base < lowmem_end_addr) { unsigned long adjusted_size = lowmem_end_addr - - lmb.reserved.region[i].base; - reserve_bootmem(lmb.reserved.region[i].base, + memblock.reserved.region[i].base; + reserve_bootmem(memblock.reserved.region[i].base, adjusted_size, BOOTMEM_DEFAULT); } } @@ -236,9 +236,9 @@ void __init do_init_bootmem(void) free_bootmem_with_active_regions(0, max_pfn); /* reserve the sections we're already using */ - for (i = 0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i), + for (i = 0; i < memblock.reserved.cnt; i++) + reserve_bootmem(memblock.reserved.region[i].base, + memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); #endif @@ -251,20 +251,20 @@ void __init do_init_bootmem(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - unsigned long lmb_next_region_start_pfn, - lmb_region_max_pfn; + unsigned long memblock_next_region_start_pfn, + memblock_region_max_pfn; int i; - for (i = 0; i < lmb.memory.cnt - 1; i++) { - lmb_region_max_pfn = - (lmb.memory.region[i].base >> PAGE_SHIFT) + - (lmb.memory.region[i].size >> PAGE_SHIFT); - lmb_next_region_start_pfn = - lmb.memory.region[i+1].base >> PAGE_SHIFT; + for (i = 0; i < memblock.memory.cnt - 1; i++) { + memblock_region_max_pfn = + (memblock.memory.region[i].base >> PAGE_SHIFT) + + (memblock.memory.region[i].size >> PAGE_SHIFT); + memblock_next_region_start_pfn = + memblock.memory.region[i+1].base >> PAGE_SHIFT; - if (lmb_region_max_pfn < lmb_next_region_start_pfn) - register_nosave_region(lmb_region_max_pfn, - lmb_next_region_start_pfn); + if (memblock_region_max_pfn < memblock_next_region_start_pfn) + register_nosave_region(memblock_region_max_pfn, + memblock_next_region_start_pfn); } return 0; @@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void) */ void __init paging_init(void) { - unsigned long total_ram = lmb_phys_mem_size(); - phys_addr_t top_of_ram = lmb_end_of_DRAM(); + unsigned long total_ram = memblock_phys_mem_size(); + phys_addr_t top_of_ram = memblock_end_of_DRAM(); unsigned long max_zone_pfns[MAX_NR_ZONES]; #ifdef CONFIG_PPC32 @@ -327,7 +327,7 @@ void __init mem_init(void) swiotlb_init(1); #endif - num_physpages = lmb.memory.size >> PAGE_SHIFT; + num_physpages = memblock.memory.size >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -364,7 +364,7 @@ void __init mem_init(void) highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { struct page *page = pfn_to_page(pfn); - if (lmb_is_reserved(pfn << PAGE_SHIFT)) + if (memblock_is_reserved(pfn << PAGE_SHIFT)) continue; ClearPageReserved(page); init_page_count(page); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 80d110635d24..f47364585ecd 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -351,7 +351,7 @@ struct of_drconf_cell { #define DRCONF_MEM_RESERVED 0x00000080 /* - * Read the next lmb list entry from the ibm,dynamic-memory property + * Read the next memblock list entry from the ibm,dynamic-memory property * and return the information in the provided of_drconf_cell structure. */ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) @@ -372,8 +372,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) /* * Retreive and validate the ibm,dynamic-memory property of the device tree. * - * The layout of the ibm,dynamic-memory property is a number N of lmb - * list entries followed by N lmb list entries. Each lmb list entry + * The layout of the ibm,dynamic-memory property is a number N of memblock + * list entries followed by N memblock list entries. Each memblock list entry * contains information as layed out in the of_drconf_cell struct above. */ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) @@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) } /* - * Retreive and validate the ibm,lmb-size property for drconf memory + * Retreive and validate the ibm,memblock-size property for drconf memory * from the device tree. */ -static u64 of_get_lmb_size(struct device_node *memory) +static u64 of_get_memblock_size(struct device_node *memory) { const u32 *prop; u32 len; - prop = of_get_property(memory, "ibm,lmb-size", &len); + prop = of_get_property(memory, "ibm,memblock-size", &len); if (!prop || len < sizeof(unsigned int)) return 0; @@ -540,19 +540,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) { /* - * We use lmb_end_of_DRAM() in here instead of memory_limit because + * We use memblock_end_of_DRAM() in here instead of memory_limit because * we've already adjusted it for the limit and it takes care of * having memory holes below the limit. Also, in the case of * iommu_is_off, memory_limit is not set but is implicitly enforced. */ - if (start + size <= lmb_end_of_DRAM()) + if (start + size <= memblock_end_of_DRAM()) return size; - if (start >= lmb_end_of_DRAM()) + if (start >= memblock_end_of_DRAM()) return 0; - return lmb_end_of_DRAM() - start; + return memblock_end_of_DRAM() - start; } /* @@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, static inline int __init read_usm_ranges(const u32 **usm) { /* - * For each lmb in ibm,dynamic-memory a corresponding + * For each memblock in ibm,dynamic-memory a corresponding * entry in linux,drconf-usable-memory property contains * a counter followed by that many (base, size) duple. * read the counter from linux,drconf-usable-memory @@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory) { const u32 *dm, *usm; unsigned int n, rc, ranges, is_kexec_kdump = 0; - unsigned long lmb_size, base, size, sz; + unsigned long memblock_size, base, size, sz; int nid; struct assoc_arrays aa; @@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory) if (!n) return; - lmb_size = of_get_lmb_size(memory); - if (!lmb_size) + memblock_size = of_get_memblock_size(memory); + if (!memblock_size) return; rc = of_get_assoc_arrays(memory, &aa); @@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory) continue; base = drmem.base_addr; - size = lmb_size; + size = memblock_size; ranges = 1; if (is_kexec_kdump) { @@ -731,7 +731,7 @@ new_range: } /* - * Now do the same thing for each LMB listed in the ibm,dynamic-memory + * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory * property in the ibm,dynamic-reconfiguration-memory node. */ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); @@ -743,8 +743,8 @@ new_range: static void __init setup_nonnuma(void) { - unsigned long top_of_ram = lmb_end_of_DRAM(); - unsigned long total_ram = lmb_phys_mem_size(); + unsigned long top_of_ram = memblock_end_of_DRAM(); + unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; unsigned int i, nid = 0; @@ -753,9 +753,9 @@ static void __init setup_nonnuma(void) printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for (i = 0; i < lmb.memory.cnt; ++i) { - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + for (i = 0; i < memblock.memory.cnt; ++i) { + start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); fake_numa_create_new_node(end_pfn, &nid); add_active_range(nid, start_pfn, end_pfn); @@ -813,7 +813,7 @@ static void __init dump_numa_memory_topology(void) count = 0; - for (i = 0; i < lmb_end_of_DRAM(); + for (i = 0; i < memblock_end_of_DRAM(); i += (1 << SECTION_SIZE_BITS)) { if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { if (count == 0) @@ -833,7 +833,7 @@ static void __init dump_numa_memory_topology(void) } /* - * Allocate some memory, satisfying the lmb or bootmem allocator where + * Allocate some memory, satisfying the memblock or bootmem allocator where * required. nid is the preferred node and end is the physical address of * the highest address in the node. * @@ -847,11 +847,11 @@ static void __init *careful_zallocation(int nid, unsigned long size, int new_nid; unsigned long ret_paddr; - ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); + ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); /* retry over all memory */ if (!ret_paddr) - ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); + ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); if (!ret_paddr) panic("numa.c: cannot allocate %lu bytes for node %d", @@ -861,14 +861,14 @@ static void __init *careful_zallocation(int nid, unsigned long size, /* * We initialize the nodes in numeric order: 0, 1, 2... - * and hand over control from the LMB allocator to the + * and hand over control from the MEMBLOCK allocator to the * bootmem allocator. If this function is called for * node 5, then we know that all nodes <5 are using the - * bootmem allocator instead of the LMB allocator. + * bootmem allocator instead of the MEMBLOCK allocator. * * So, check the nid from which this allocation came * and double check to see if we need to use bootmem - * instead of the LMB. We don't free the LMB memory + * instead of the MEMBLOCK. We don't free the MEMBLOCK memory * since it would be useless. */ new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); @@ -893,9 +893,9 @@ static void mark_reserved_regions_for_nid(int nid) struct pglist_data *node = NODE_DATA(nid); int i; - for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long physbase = lmb.reserved.region[i].base; - unsigned long size = lmb.reserved.region[i].size; + for (i = 0; i < memblock.reserved.cnt; i++) { + unsigned long physbase = memblock.reserved.region[i].base; + unsigned long size = memblock.reserved.region[i].size; unsigned long start_pfn = physbase >> PAGE_SHIFT; unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; @@ -903,7 +903,7 @@ static void mark_reserved_regions_for_nid(int nid) node->node_spanned_pages; /* - * Check to make sure that this lmb.reserved area is + * Check to make sure that this memblock.reserved area is * within the bounds of the node that we care about. * Checking the nid of the start and end points is not * sufficient because the reserved area could span the @@ -961,7 +961,7 @@ void __init do_init_bootmem(void) int nid; min_low_pfn = 0; - max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn; if (parse_numa_properties()) @@ -1038,7 +1038,7 @@ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; free_area_init_nodes(max_zone_pfns); } @@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, { const u32 *dm; unsigned int drconf_cell_cnt, rc; - unsigned long lmb_size; + unsigned long memblock_size; struct assoc_arrays aa; int nid = -1; @@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, if (!drconf_cell_cnt) return -1; - lmb_size = of_get_lmb_size(memory); - if (!lmb_size) + memblock_size = of_get_memblock_size(memory); + if (!memblock_size) return -1; rc = of_get_assoc_arrays(memory, &aa); @@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, continue; if ((scn_addr < drmem.base_addr) - || (scn_addr >= (drmem.base_addr + lmb_size))) + || (scn_addr >= (drmem.base_addr + memblock_size))) continue; nid = of_drconf_to_nid_single(&drmem, &aa); @@ -1113,7 +1113,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, /* * Find the node associated with a hot added memory section for memory * represented in the device tree as a node (i.e. memory@XXXX) for - * each lmb. + * each memblock. */ int hot_add_node_scn_to_nid(unsigned long scn_addr) { @@ -1154,8 +1154,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr) /* * Find the node associated with a hot added memory section. Section - * corresponds to a SPARSEMEM section, not an LMB. It is assumed that - * sections are fully contained within a single LMB. + * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that + * sections are fully contained within a single MEMBLOCK. */ int hot_add_scn_to_nid(unsigned long scn_addr) { diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 34347b2e7e31..a87ead0138b4 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, * mem_init() sets high_memory so only do the check after that. */ if (mem_init_done && (p < virt_to_phys(high_memory)) && - !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { + !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", (unsigned long long)p, __builtin_return_address(0)); return NULL; @@ -331,7 +331,7 @@ void __init mapin_ram(void) s = mmu_mapin_ram(top); __mapin_ram_chunk(s, top); - top = lmb_end_of_DRAM(); + top = memblock_end_of_DRAM(); s = wii_mmu_mapin_mem2(top); __mapin_ram_chunk(s, top); } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index d050fc8d9714..21d6dfab7942 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include @@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size) if (init_bootmem_done) pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); else - pt = __va(lmb_alloc_base(size, size, + pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); memset(pt, 0, size); diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f11c2cdcb0fe..f8a01829d64f 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -223,7 +223,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(lmb_alloc_base(Hash_size, Hash_size, + Hash = __va(memblock_alloc_base(Hash_size, Hash_size, __initial_memory_limit_addr)); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 687fddaa24c5..446a01842a73 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -12,7 +12,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include @@ -252,7 +252,7 @@ void __init stabs_alloc(void) if (cpu == 0) continue; /* stab for CPU 0 is statically allocated */ - newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, + newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 1< #include #include -#include +#include #include #include @@ -426,7 +426,7 @@ static void __early_init_mmu(int boot_cpu) /* Set the global containing the top of the linear mapping * for use by the TLB miss code */ - linear_map_top = lmb_end_of_DRAM(); + linear_map_top = memblock_end_of_DRAM(); /* A sync won't hurt us after mucking around with * the MMU configuration diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c index 534c2ecc89d9..2ab338c9ac37 100644 --- a/arch/powerpc/platforms/85xx/corenet_ds.c +++ b/arch/powerpc/platforms/85xx/corenet_ds.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -100,7 +100,7 @@ void __init corenet_ds_setup_arch(void) #endif #ifdef CONFIG_SWIOTLB - if (lmb_end_of_DRAM() > max) { + if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index 004b7d36cdb7..f79f2f102141 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include @@ -94,7 +94,7 @@ static void __init mpc8536_ds_setup_arch(void) #endif #ifdef CONFIG_SWIOTLB - if (lmb_end_of_DRAM() > max) { + if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 544011a562fb..8190bc25bf27 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -190,7 +190,7 @@ static void __init mpc85xx_ds_setup_arch(void) #endif #ifdef CONFIG_SWIOTLB - if (lmb_end_of_DRAM() > max) { + if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 8fe87fc61485..494513682d70 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include @@ -325,7 +325,7 @@ static void __init mpc85xx_mds_setup_arch(void) #endif /* CONFIG_QUICC_ENGINE */ #ifdef CONFIG_SWIOTLB - if (lmb_end_of_DRAM() > max) { + if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 2aa69a69bcc8..b11c3535f350 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -103,7 +103,7 @@ mpc86xx_hpcn_setup_arch(void) #endif #ifdef CONFIG_SWIOTLB - if (lmb_end_of_DRAM() > max) { + if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 4326b737d913..3712900471ba 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -845,10 +845,10 @@ static int __init cell_iommu_init_disabled(void) /* If we found a DMA window, we check if it's big enough to enclose * all of physical memory. If not, we force enable IOMMU */ - if (np && size < lmb_end_of_DRAM()) { + if (np && size < memblock_end_of_DRAM()) { printk(KERN_WARNING "iommu: force-enabled, dma window" " (%ldMB) smaller than total memory (%lldMB)\n", - size >> 20, lmb_end_of_DRAM() >> 20); + size >> 20, memblock_end_of_DRAM() >> 20); return -ENODEV; } @@ -1064,7 +1064,7 @@ static int __init cell_iommu_fixed_mapping_init(void) } fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT); - fsize = lmb_phys_mem_size(); + fsize = memblock_phys_mem_size(); if ((fbase + fsize) <= 0x800000000ul) hbase = 0; /* use the device tree window */ @@ -1169,7 +1169,7 @@ static int __init cell_iommu_init(void) * Note: should we make sure we have the IOMMU actually disabled ? */ if (iommu_is_off || - (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull)) + (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) if (cell_iommu_init_disabled() == 0) goto bail; diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 174a04ac4806..5cdcc7c8d973 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x) void __init wii_memory_fixups(void) { - struct lmb_property *p = lmb.memory.region; + struct memblock_property *p = memblock.memory.region; /* * This is part of a workaround to allow the use of two @@ -77,7 +77,7 @@ void __init wii_memory_fixups(void) * between both ranges. */ - BUG_ON(lmb.memory.cnt != 2); + BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE); @@ -92,11 +92,11 @@ void __init wii_memory_fixups(void) p[0].size += wii_hole_size + p[1].size; - lmb.memory.cnt = 1; - lmb_analyze(); + memblock.memory.cnt = 1; + memblock_analyze(); /* reserve the hole */ - lmb_reserve(wii_hole_start, wii_hole_size); + memblock_reserve(wii_hole_start, wii_hole_size); /* allow ioremapping the address space in the hole */ __allow_ioremap_reserved = 1; diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 39df70529d29..3fff8d979b41 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 7b1d608ea3c8..1f9fb2c57761 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -204,7 +204,7 @@ int __init iob_init(struct device_node *dn) pr_debug(" -> %s\n", __func__); /* Allocate a spare page to map all invalid IOTLB pages. */ - tmp = lmb_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE); + tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE); if (!tmp) panic("IOBMAP: Cannot allocate spare page!"); /* Empty l1 is marked invalid */ @@ -275,7 +275,7 @@ void __init alloc_iobmap_l2(void) return; #endif /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ - iob_l2_base = (u32 *)abs_to_virt(lmb_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); + iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); } diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index f1d0132ebcc7..9deb274841f1 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include @@ -619,7 +619,7 @@ static int __init pmac_probe(void) * driver needs that. We have to allocate it now. We allocate 4k * (1 small page) for now. */ - smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL); + smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); #endif /* CONFIG_PMAC_SMU */ return 1; diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index 1e8a1e39dfe8..2c0ed87f2024 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 7925751e464a..c2045880e674 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -318,8 +318,8 @@ static int __init ps3_mm_add_memory(void) return result; } - lmb_add(start_addr, map.r1.size); - lmb_analyze(); + memblock_add(start_addr, map.r1.size); + memblock_analyze(); result = online_pages(start_pfn, nr_pages); diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index dd521a181f23..5b759b669598 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -723,7 +723,7 @@ static void os_area_queue_work(void) * flash to a high address in the boot memory region and then puts that RAM * address and the byte count into the repository for retrieval by the guest. * We copy the data we want into a static variable and allow the memory setup - * by the HV to be claimed by the lmb manager. + * by the HV to be claimed by the memblock manager. * * The os area mirror will not be available to a second stage kernel, and * the header verify will fail. In this case, the saved_params values will diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 01e7b5bb3c1d..deab5f946090 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -10,14 +10,14 @@ */ #include -#include +#include #include #include #include #include #include -static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) +static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; struct zone *zone; @@ -26,7 +26,7 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) start_pfn = base >> PAGE_SHIFT; if (!pfn_valid(start_pfn)) { - lmb_remove(base, lmb_size); + memblock_remove(base, memblock_size); return 0; } @@ -41,20 +41,20 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) * to sysfs "state" file and we can't remove sysfs entries * while writing to it. So we have to defer it to here. */ - ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT); + ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); if (ret) return ret; /* * Update memory regions for memory remove */ - lmb_remove(base, lmb_size); + memblock_remove(base, memblock_size); /* * Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(base); - ret = remove_section_mapping(start, start + lmb_size); + ret = remove_section_mapping(start, start + memblock_size); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory @@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np) const char *type; const unsigned int *regs; unsigned long base; - unsigned int lmb_size; + unsigned int memblock_size; int ret = -EINVAL; /* @@ -80,16 +80,16 @@ static int pseries_remove_memory(struct device_node *np) return 0; /* - * Find the bae address and size of the lmb + * Find the bae address and size of the memblock */ regs = of_get_property(np, "reg", NULL); if (!regs) return ret; base = *(unsigned long *)regs; - lmb_size = regs[3]; + memblock_size = regs[3]; - ret = pseries_remove_lmb(base, lmb_size); + ret = pseries_remove_memblock(base, memblock_size); return ret; } @@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np) const char *type; const unsigned int *regs; unsigned long base; - unsigned int lmb_size; + unsigned int memblock_size; int ret = -EINVAL; /* @@ -109,43 +109,43 @@ static int pseries_add_memory(struct device_node *np) return 0; /* - * Find the base and size of the lmb + * Find the base and size of the memblock */ regs = of_get_property(np, "reg", NULL); if (!regs) return ret; base = *(unsigned long *)regs; - lmb_size = regs[3]; + memblock_size = regs[3]; /* * Update memory region to represent the memory add */ - ret = lmb_add(base, lmb_size); + ret = memblock_add(base, memblock_size); return (ret < 0) ? -EINVAL : 0; } static int pseries_drconf_memory(unsigned long *base, unsigned int action) { struct device_node *np; - const unsigned long *lmb_size; + const unsigned long *memblock_size; int rc; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (!np) return -EINVAL; - lmb_size = of_get_property(np, "ibm,lmb-size", NULL); - if (!lmb_size) { + memblock_size = of_get_property(np, "ibm,memblock-size", NULL); + if (!memblock_size) { of_node_put(np); return -EINVAL; } if (action == PSERIES_DRCONF_MEM_ADD) { - rc = lmb_add(*base, *lmb_size); + rc = memblock_add(*base, *memblock_size); rc = (rc < 0) ? -EINVAL : 0; } else if (action == PSERIES_DRCONF_MEM_REMOVE) { - rc = pseries_remove_lmb(*base, *lmb_size); + rc = pseries_remove_memblock(*base, *memblock_size); } else { rc = -EINVAL; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index d26182d42cbf..395848e30c52 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -66,7 +66,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, tcep = ((u64 *)tbl->it_base) + index; while (npages--) { - /* can't move this out since we might cross LMB boundary */ + /* can't move this out since we might cross MEMBLOCK boundary */ rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c index 7ebd9e88d369..6e7742da0072 100644 --- a/arch/powerpc/platforms/pseries/phyp_dump.c +++ b/arch/powerpc/platforms/pseries/phyp_dump.c @@ -255,12 +255,12 @@ void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr) /* ------------------------------------------------- */ /** - * release_memory_range -- release memory previously lmb_reserved + * release_memory_range -- release memory previously memblock_reserved * @start_pfn: starting physical frame number * @nr_pages: number of pages to free. * * This routine will release memory that had been previously - * lmb_reserved in early boot. The released memory becomes + * memblock_reserved in early boot. The released memory becomes * available for genreal use. */ static void release_memory_range(unsigned long start_pfn, diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index c8b96ed7c015..559db2b846a9 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include @@ -232,7 +232,7 @@ static int __init dart_init(struct device_node *dart_node) * that to work around what looks like a problem with the HT bridge * prefetching into invalid pages and corrupting data */ - tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); + tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK); @@ -407,7 +407,7 @@ void __init alloc_dart_table(void) if (iommu_is_off) return; - if (!iommu_force_on && lmb_end_of_DRAM() <= 0x40000000ull) + if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull) return; /* 512 pages (2MB) is max DART tablesize. */ @@ -416,7 +416,7 @@ void __init alloc_dart_table(void) * will blow up an entire large page anyway in the kernel mapping */ dart_tablebase = (unsigned long) - abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); + abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); } diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index a14760fe513a..356c6a0e1b23 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -190,7 +190,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose, pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); /* Setup inbound mem window */ - mem = lmb_end_of_DRAM(); + mem = memblock_end_of_DRAM(); sz = min(mem, paddr_lo); mem_log = __ilog2_u64(sz); diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 573fca1fbd9b..82868fee21fd 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -10,7 +10,7 @@ config SUPERH select EMBEDDED select HAVE_CLK select HAVE_IDE if HAS_IOPORT - select HAVE_LMB + select HAVE_MEMBLOCK select HAVE_OPROFILE select HAVE_GENERIC_DMA_COHERENT select HAVE_ARCH_TRACEHOOK diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h deleted file mode 100644 index 9b437f657ffa..000000000000 --- a/arch/sh/include/asm/lmb.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_LMB_H -#define __ASM_SH_LMB_H - -#define LMB_REAL_LIMIT 0 - -#endif /* __ASM_SH_LMB_H */ diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h new file mode 100644 index 000000000000..dfe683b88075 --- /dev/null +++ b/arch/sh/include/asm/memblock.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_MEMBLOCK_H +#define __ASM_SH_MEMBLOCK_H + +#define MEMBLOCK_REAL_LIMIT 0 + +#endif /* __ASM_SH_MEMBLOCK_H */ diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 5a559e666eb3..e2a3af31ff99 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -157,10 +157,10 @@ void __init reserve_crashkernel(void) unsigned long long crash_size, crash_base; int ret; - /* this is necessary because of lmb_phys_mem_size() */ - lmb_analyze(); + /* this is necessary because of memblock_phys_mem_size() */ + memblock_analyze(); - ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -172,14 +172,14 @@ void __init reserve_crashkernel(void) crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); if (!crashk_res.start) { - unsigned long max = lmb_end_of_DRAM() - memory_limit; - crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max); + unsigned long max = memblock_end_of_DRAM() - memory_limit; + crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max); if (!crashk_res.start) { pr_err("crashkernel allocation failed\n"); goto disable; } } else { - ret = lmb_reserve(crashk_res.start, crash_size); + ret = memblock_reserve(crashk_res.start, crash_size); if (unlikely(ret < 0)) { pr_err("crashkernel reservation failed - " "memory is in use\n"); @@ -192,7 +192,7 @@ void __init reserve_crashkernel(void) /* * Crash kernel trumps memory limit */ - if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) { + if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) { memory_limit = 0; pr_info("Disabled memory limit for crashkernel\n"); } @@ -201,7 +201,7 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start), - (unsigned long)(lmb_phys_mem_size() >> 20)); + (unsigned long)(memblock_phys_mem_size() >> 20)); return; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 272734681d29..e769401a78ba 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include @@ -141,10 +141,10 @@ void __init check_for_initrd(void) goto disable; } - if (unlikely(end > lmb_end_of_DRAM())) { + if (unlikely(end > memblock_end_of_DRAM())) { pr_err("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", - end, (unsigned long)lmb_end_of_DRAM()); + end, (unsigned long)memblock_end_of_DRAM()); goto disable; } @@ -161,7 +161,7 @@ void __init check_for_initrd(void) initrd_start = (unsigned long)__va(__pa(start)); initrd_end = initrd_start + INITRD_SIZE; - lmb_reserve(__pa(initrd_start), INITRD_SIZE); + memblock_reserve(__pa(initrd_start), INITRD_SIZE); return; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 46f84de62469..d0e249100e98 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -33,7 +33,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD]; void __init generic_mem_init(void) { - lmb_add(__MEMORY_START, __MEMORY_SIZE); + memblock_add(__MEMORY_START, __MEMORY_SIZE); } void __init __weak plat_mem_setup(void) @@ -176,12 +176,12 @@ void __init allocate_pgdat(unsigned int nid) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); #ifdef CONFIG_NEED_MULTIPLE_NODES - phys = __lmb_alloc_base(sizeof(struct pglist_data), + phys = __memblock_alloc_base(sizeof(struct pglist_data), SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); /* Retry with all of system memory */ if (!phys) - phys = __lmb_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, lmb_end_of_DRAM()); + phys = __memblock_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, memblock_end_of_DRAM()); if (!phys) panic("Can't allocate pgdat for node %d\n", nid); @@ -212,7 +212,7 @@ static void __init bootmem_init_one_node(unsigned int nid) total_pages = bootmem_bootmap_pages(p->node_spanned_pages); - paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); + paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); if (!paddr) panic("Can't allocate bootmap for nid[%d]\n", nid); @@ -227,9 +227,9 @@ static void __init bootmem_init_one_node(unsigned int nid) */ if (nid == 0) { /* Reserve the sections we're already using. */ - for (i = 0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i), + for (i = 0; i < memblock.reserved.cnt; i++) + reserve_bootmem(memblock.reserved.region[i].base, + memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); } @@ -241,10 +241,10 @@ static void __init do_init_bootmem(void) int i; /* Add active regions with valid PFNs. */ - for (i = 0; i < lmb.memory.cnt; i++) { + for (i = 0; i < memblock.memory.cnt; i++) { unsigned long start_pfn, end_pfn; - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); __add_active_range(0, start_pfn, end_pfn); } @@ -276,7 +276,7 @@ static void __init early_reserve_mem(void) * this catches the (definitely buggy) case of us accidentally * initializing the bootmem allocator with an invalid RAM area. */ - lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); @@ -284,7 +284,7 @@ static void __init early_reserve_mem(void) * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. */ if (CONFIG_ZERO_PAGE_OFFSET != 0) - lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); /* * Handle additional early reservations @@ -299,27 +299,27 @@ void __init paging_init(void) unsigned long vaddr, end; int nid; - lmb_init(); + memblock_init(); sh_mv.mv_mem_init(); early_reserve_mem(); - lmb_enforce_memory_limit(memory_limit); - lmb_analyze(); + memblock_enforce_memory_limit(memory_limit); + memblock_analyze(); - lmb_dump_all(); + memblock_dump_all(); /* * Determine low and high memory ranges: */ - max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; nodes_clear(node_online_map); memory_start = (unsigned long)__va(__MEMORY_START); - memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); + memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); uncached_init(); pmb_init(); diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index a2e645f64a37..3d85225b9e95 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include @@ -39,12 +39,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) pmb_bolt_mapping((unsigned long)__va(start), start, end - start, PAGE_KERNEL); - lmb_add(start, end - start); + memblock_add(start, end - start); __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ - NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), + NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data), SMP_CACHE_BYTES, end)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); @@ -54,7 +54,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, + bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, PAGE_SIZE, end); init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, start_pfn, end_pfn); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6f1470baa314..c0015db247ba 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -42,7 +42,7 @@ config SPARC64 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES - select HAVE_LMB + select HAVE_MEMBLOCK select HAVE_SYSCALL_WRAPPERS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD diff --git a/arch/sparc/include/asm/lmb.h b/arch/sparc/include/asm/lmb.h deleted file mode 100644 index 6a352cbcf520..000000000000 --- a/arch/sparc/include/asm/lmb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _SPARC64_LMB_H -#define _SPARC64_LMB_H - -#include - -#define LMB_DBG(fmt...) prom_printf(fmt) - -#define LMB_REAL_LIMIT 0 - -#endif /* !(_SPARC64_LMB_H) */ diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h new file mode 100644 index 000000000000..f12af880649b --- /dev/null +++ b/arch/sparc/include/asm/memblock.h @@ -0,0 +1,10 @@ +#ifndef _SPARC64_MEMBLOCK_H +#define _SPARC64_MEMBLOCK_H + +#include + +#define MEMBLOCK_DBG(fmt...) prom_printf(fmt) + +#define MEMBLOCK_REAL_LIMIT 0 + +#endif /* !(_SPARC64_MEMBLOCK_H) */ diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index cdc91d919e93..83e85c2e802a 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -4,7 +4,7 @@ */ #include #include -#include +#include #include #include #include @@ -86,7 +86,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp, hp->handle_size = handle_size; } -static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) +static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size) { unsigned int handle_size, alloc_size; struct mdesc_handle *hp; @@ -97,7 +97,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) mdesc_size); alloc_size = PAGE_ALIGN(handle_size); - paddr = lmb_alloc(alloc_size, PAGE_SIZE); + paddr = memblock_alloc(alloc_size, PAGE_SIZE); hp = NULL; if (paddr) { @@ -107,7 +107,7 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) return hp; } -static void mdesc_lmb_free(struct mdesc_handle *hp) +static void mdesc_memblock_free(struct mdesc_handle *hp) { unsigned int alloc_size; unsigned long start; @@ -120,9 +120,9 @@ static void mdesc_lmb_free(struct mdesc_handle *hp) free_bootmem_late(start, alloc_size); } -static struct mdesc_mem_ops lmb_mdesc_ops = { - .alloc = mdesc_lmb_alloc, - .free = mdesc_lmb_free, +static struct mdesc_mem_ops memblock_mdesc_ops = { + .alloc = mdesc_memblock_alloc, + .free = mdesc_memblock_free, }; static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) @@ -914,7 +914,7 @@ void __init sun4v_mdesc_init(void) printk("MDESC: Size is %lu bytes.\n", len); - hp = mdesc_alloc(len, &lmb_mdesc_ops); + hp = mdesc_alloc(len, &memblock_mdesc_ops); if (hp == NULL) { prom_printf("MDESC: alloc of %lu bytes failed.\n", len); prom_halt(); diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index fb06ac2bd38f..466a32763ea8 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -34,7 +34,7 @@ void * __init prom_early_alloc(unsigned long size) { - unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES); + unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES); void *ret; if (!paddr) { diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index b2831dc3c121..f0434513df15 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -726,7 +726,7 @@ static void __init find_ramdisk(unsigned long phys_base) initrd_start = ramdisk_image; initrd_end = ramdisk_image + sparc_ramdisk_size; - lmb_reserve(initrd_start, sparc_ramdisk_size); + memblock_reserve(initrd_start, sparc_ramdisk_size); initrd_start += PAGE_OFFSET; initrd_end += PAGE_OFFSET; @@ -822,7 +822,7 @@ static void __init allocate_node_data(int nid) struct pglist_data *p; #ifdef CONFIG_NEED_MULTIPLE_NODES - paddr = lmb_alloc_nid(sizeof(struct pglist_data), + paddr = memblock_alloc_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid, nid_range); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); @@ -843,7 +843,7 @@ static void __init allocate_node_data(int nid) if (p->node_spanned_pages) { num_pages = bootmem_bootmap_pages(p->node_spanned_pages); - paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, + paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, nid_range); if (!paddr) { prom_printf("Cannot allocate bootmap for nid[%d]\n", @@ -974,11 +974,11 @@ static void __init add_node_ranges(void) { int i; - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long size = lmb_size_bytes(&lmb.memory, i); + for (i = 0; i < memblock.memory.cnt; i++) { + unsigned long size = memblock_size_bytes(&memblock.memory, i); unsigned long start, end; - start = lmb.memory.region[i].base; + start = memblock.memory.region[i].base; end = start + size; while (start < end) { unsigned long this_end; @@ -1010,7 +1010,7 @@ static int __init grab_mlgroups(struct mdesc_handle *md) if (!count) return -ENOENT; - paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), + paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; @@ -1051,7 +1051,7 @@ static int __init grab_mblocks(struct mdesc_handle *md) if (!count) return -ENOENT; - paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), + paddr = memblock_alloc(count * sizeof(struct mdesc_mblock), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; @@ -1279,8 +1279,8 @@ static int bootmem_init_numa(void) static void __init bootmem_init_nonnuma(void) { - unsigned long top_of_ram = lmb_end_of_DRAM(); - unsigned long total_ram = lmb_phys_mem_size(); + unsigned long top_of_ram = memblock_end_of_DRAM(); + unsigned long total_ram = memblock_phys_mem_size(); unsigned int i; numadbg("bootmem_init_nonnuma()\n"); @@ -1292,15 +1292,15 @@ static void __init bootmem_init_nonnuma(void) init_node_masks_nonnuma(); - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long size = lmb_size_bytes(&lmb.memory, i); + for (i = 0; i < memblock.memory.cnt; i++) { + unsigned long size = memblock_size_bytes(&memblock.memory, i); unsigned long start_pfn, end_pfn; if (!size) continue; - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); add_active_range(0, start_pfn, end_pfn); } @@ -1338,9 +1338,9 @@ static void __init trim_reserved_in_node(int nid) numadbg(" trim_reserved_in_node(%d)\n", nid); - for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long start = lmb.reserved.region[i].base; - unsigned long size = lmb_size_bytes(&lmb.reserved, i); + for (i = 0; i < memblock.reserved.cnt; i++) { + unsigned long start = memblock.reserved.region[i].base; + unsigned long size = memblock_size_bytes(&memblock.reserved, i); unsigned long end = start + size; reserve_range_in_node(nid, start, end); @@ -1384,7 +1384,7 @@ static unsigned long __init bootmem_init(unsigned long phys_base) unsigned long end_pfn; int nid; - end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn = end_pfn; min_low_pfn = (phys_base >> PAGE_SHIFT); @@ -1734,7 +1734,7 @@ void __init paging_init(void) sun4v_ktsb_init(); } - lmb_init(); + memblock_init(); /* Find available physical memory... * @@ -1752,17 +1752,17 @@ void __init paging_init(void) phys_base = 0xffffffffffffffffUL; for (i = 0; i < pavail_ents; i++) { phys_base = min(phys_base, pavail[i].phys_addr); - lmb_add(pavail[i].phys_addr, pavail[i].reg_size); + memblock_add(pavail[i].phys_addr, pavail[i].reg_size); } - lmb_reserve(kern_base, kern_size); + memblock_reserve(kern_base, kern_size); find_ramdisk(phys_base); - lmb_enforce_memory_limit(cmdline_memory_size); + memblock_enforce_memory_limit(cmdline_memory_size); - lmb_analyze(); - lmb_dump_all(); + memblock_analyze(); + memblock_dump_all(); set_bit(0, mmu_context_bmap); @@ -1816,8 +1816,8 @@ void __init paging_init(void) */ for_each_possible_cpu(i) { /* XXX Use node local allocations... XXX */ - softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); - hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); + hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } /* Setup bootmem... */ diff --git a/include/linux/lmb.h b/include/linux/lmb.h deleted file mode 100644 index f3d14333ebed..000000000000 --- a/include/linux/lmb.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef _LINUX_LMB_H -#define _LINUX_LMB_H -#ifdef __KERNEL__ - -/* - * Logical memory blocks. - * - * Copyright (C) 2001 Peter Bergner, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include - -#define MAX_LMB_REGIONS 128 - -struct lmb_property { - u64 base; - u64 size; -}; - -struct lmb_region { - unsigned long cnt; - u64 size; - struct lmb_property region[MAX_LMB_REGIONS+1]; -}; - -struct lmb { - unsigned long debug; - u64 rmo_size; - struct lmb_region memory; - struct lmb_region reserved; -}; - -extern struct lmb lmb; - -extern void __init lmb_init(void); -extern void __init lmb_analyze(void); -extern long lmb_add(u64 base, u64 size); -extern long lmb_remove(u64 base, u64 size); -extern long __init lmb_free(u64 base, u64 size); -extern long __init lmb_reserve(u64 base, u64 size); -extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, - u64 (*nid_range)(u64, u64, int *)); -extern u64 __init lmb_alloc(u64 size, u64 align); -extern u64 __init lmb_alloc_base(u64 size, - u64, u64 max_addr); -extern u64 __init __lmb_alloc_base(u64 size, - u64 align, u64 max_addr); -extern u64 __init lmb_phys_mem_size(void); -extern u64 lmb_end_of_DRAM(void); -extern void __init lmb_enforce_memory_limit(u64 memory_limit); -extern int __init lmb_is_reserved(u64 addr); -extern int lmb_is_region_reserved(u64 base, u64 size); -extern int lmb_find(struct lmb_property *res); - -extern void lmb_dump_all(void); - -static inline u64 -lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].size; -} -static inline u64 -lmb_size_pages(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; -} -static inline u64 -lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].base >> PAGE_SHIFT; -} -static inline u64 -lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_start_pfn(type, region_nr) + - lmb_size_pages(type, region_nr); -} - -#include - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_LMB_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h new file mode 100644 index 000000000000..a59faf2b5edd --- /dev/null +++ b/include/linux/memblock.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_MEMBLOCK_H +#define _LINUX_MEMBLOCK_H +#ifdef __KERNEL__ + +/* + * Logical memory blocks. + * + * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +#define MAX_MEMBLOCK_REGIONS 128 + +struct memblock_property { + u64 base; + u64 size; +}; + +struct memblock_region { + unsigned long cnt; + u64 size; + struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; +}; + +struct memblock { + unsigned long debug; + u64 rmo_size; + struct memblock_region memory; + struct memblock_region reserved; +}; + +extern struct memblock memblock; + +extern void __init memblock_init(void); +extern void __init memblock_analyze(void); +extern long memblock_add(u64 base, u64 size); +extern long memblock_remove(u64 base, u64 size); +extern long __init memblock_free(u64 base, u64 size); +extern long __init memblock_reserve(u64 base, u64 size); +extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, + u64 (*nid_range)(u64, u64, int *)); +extern u64 __init memblock_alloc(u64 size, u64 align); +extern u64 __init memblock_alloc_base(u64 size, + u64, u64 max_addr); +extern u64 __init __memblock_alloc_base(u64 size, + u64 align, u64 max_addr); +extern u64 __init memblock_phys_mem_size(void); +extern u64 memblock_end_of_DRAM(void); +extern void __init memblock_enforce_memory_limit(u64 memory_limit); +extern int __init memblock_is_reserved(u64 addr); +extern int memblock_is_region_reserved(u64 base, u64 size); +extern int memblock_find(struct memblock_property *res); + +extern void memblock_dump_all(void); + +static inline u64 +memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) +{ + return type->region[region_nr].size; +} +static inline u64 +memblock_size_pages(struct memblock_region *type, unsigned long region_nr) +{ + return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; +} +static inline u64 +memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) +{ + return type->region[region_nr].base >> PAGE_SHIFT; +} +static inline u64 +memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) +{ + return memblock_start_pfn(type, region_nr) + + memblock_size_pages(type, region_nr); +} + +#include + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_MEMBLOCK_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 170d8ca901d8..5b916bc0fbae 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -181,9 +181,6 @@ config HAS_DMA config CHECK_SIGNATURE bool -config HAVE_LMB - boolean - config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS help diff --git a/lib/Makefile b/lib/Makefile index 3f1062cbbff4..0bfabba1bb32 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -89,8 +89,6 @@ obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o -obj-$(CONFIG_HAVE_LMB) += lmb.o - obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o diff --git a/lib/lmb.c b/lib/lmb.c deleted file mode 100644 index b1fc52606524..000000000000 --- a/lib/lmb.c +++ /dev/null @@ -1,541 +0,0 @@ -/* - * Procedures for maintaining information about logical memory blocks. - * - * Peter Bergner, IBM Corp. June 2001. - * Copyright (C) 2001 Peter Bergner. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include - -#define LMB_ALLOC_ANYWHERE 0 - -struct lmb lmb; - -static int lmb_debug; - -static int __init early_lmb(char *p) -{ - if (p && strstr(p, "debug")) - lmb_debug = 1; - return 0; -} -early_param("lmb", early_lmb); - -static void lmb_dump(struct lmb_region *region, char *name) -{ - unsigned long long base, size; - int i; - - pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); - - for (i = 0; i < region->cnt; i++) { - base = region->region[i].base; - size = region->region[i].size; - - pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", - name, i, base, base + size - 1, size); - } -} - -void lmb_dump_all(void) -{ - if (!lmb_debug) - return; - - pr_info("LMB configuration:\n"); - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); - pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); - - lmb_dump(&lmb.memory, "memory"); - lmb_dump(&lmb.reserved, "reserved"); -} - -static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, - u64 size2) -{ - return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); -} - -static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) -{ - if (base2 == base1 + size1) - return 1; - else if (base1 == base2 + size2) - return -1; - - return 0; -} - -static long lmb_regions_adjacent(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - u64 base1 = rgn->region[r1].base; - u64 size1 = rgn->region[r1].size; - u64 base2 = rgn->region[r2].base; - u64 size2 = rgn->region[r2].size; - - return lmb_addrs_adjacent(base1, size1, base2, size2); -} - -static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) -{ - unsigned long i; - - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; - } - rgn->cnt--; -} - -/* Assumption: base addr of region 1 < base addr of region 2 */ -static void lmb_coalesce_regions(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - rgn->region[r1].size += rgn->region[r2].size; - lmb_remove_region(rgn, r2); -} - -void __init lmb_init(void) -{ - /* Create a dummy zero size LMB which will get coalesced away later. - * This simplifies the lmb_add() code below... - */ - lmb.memory.region[0].base = 0; - lmb.memory.region[0].size = 0; - lmb.memory.cnt = 1; - - /* Ditto. */ - lmb.reserved.region[0].base = 0; - lmb.reserved.region[0].size = 0; - lmb.reserved.cnt = 1; -} - -void __init lmb_analyze(void) -{ - int i; - - lmb.memory.size = 0; - - for (i = 0; i < lmb.memory.cnt; i++) - lmb.memory.size += lmb.memory.region[i].size; -} - -static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) -{ - unsigned long coalesced = 0; - long adjacent, i; - - if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { - rgn->region[0].base = base; - rgn->region[0].size = size; - return 0; - } - - /* First try and coalesce this LMB with another. */ - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; - - if ((rgnbase == base) && (rgnsize == size)) - /* Already have this region, so we're done */ - return 0; - - adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); - if (adjacent > 0) { - rgn->region[i].base -= size; - rgn->region[i].size += size; - coalesced++; - break; - } else if (adjacent < 0) { - rgn->region[i].size += size; - coalesced++; - break; - } - } - - if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) { - lmb_coalesce_regions(rgn, i, i+1); - coalesced++; - } - - if (coalesced) - return coalesced; - if (rgn->cnt >= MAX_LMB_REGIONS) - return -1; - - /* Couldn't coalesce the LMB, so add it to the sorted table. */ - for (i = rgn->cnt - 1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i+1].base = rgn->region[i].base; - rgn->region[i+1].size = rgn->region[i].size; - } else { - rgn->region[i+1].base = base; - rgn->region[i+1].size = size; - break; - } - } - - if (base < rgn->region[0].base) { - rgn->region[0].base = base; - rgn->region[0].size = size; - } - rgn->cnt++; - - return 0; -} - -long lmb_add(u64 base, u64 size) -{ - struct lmb_region *_rgn = &lmb.memory; - - /* On pSeries LPAR systems, the first LMB is our RMO region. */ - if (base == 0) - lmb.rmo_size = size; - - return lmb_add_region(_rgn, base, size); - -} - -static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) -{ - u64 rgnbegin, rgnend; - u64 end = base + size; - int i; - - rgnbegin = rgnend = 0; /* supress gcc warnings */ - - /* Find the region where (base, size) belongs to */ - for (i=0; i < rgn->cnt; i++) { - rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size; - - if ((rgnbegin <= base) && (end <= rgnend)) - break; - } - - /* Didn't find the region */ - if (i == rgn->cnt) - return -1; - - /* Check to see if we are removing entire region */ - if ((rgnbegin == base) && (rgnend == end)) { - lmb_remove_region(rgn, i); - return 0; - } - - /* Check to see if region is matching at the front */ - if (rgnbegin == base) { - rgn->region[i].base = end; - rgn->region[i].size -= size; - return 0; - } - - /* Check to see if the region is matching at the end */ - if (rgnend == end) { - rgn->region[i].size -= size; - return 0; - } - - /* - * We need to split the entry - adjust the current one to the - * beginging of the hole and add the region after hole. - */ - rgn->region[i].size = base - rgn->region[i].base; - return lmb_add_region(rgn, end, rgnend - end); -} - -long lmb_remove(u64 base, u64 size) -{ - return __lmb_remove(&lmb.memory, base, size); -} - -long __init lmb_free(u64 base, u64 size) -{ - return __lmb_remove(&lmb.reserved, base, size); -} - -long __init lmb_reserve(u64 base, u64 size) -{ - struct lmb_region *_rgn = &lmb.reserved; - - BUG_ON(0 == size); - - return lmb_add_region(_rgn, base, size); -} - -long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) -{ - unsigned long i; - - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; - if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) - break; - } - - return (i < rgn->cnt) ? i : -1; -} - -static u64 lmb_align_down(u64 addr, u64 size) -{ - return addr & ~(size - 1); -} - -static u64 lmb_align_up(u64 addr, u64 size) -{ - return (addr + (size - 1)) & ~(size - 1); -} - -static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end, - u64 size, u64 align) -{ - u64 base, res_base; - long j; - - base = lmb_align_down((end - size), align); - while (start <= base) { - j = lmb_overlaps_region(&lmb.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, size) < 0) - base = ~(u64)0; - return base; - } - res_base = lmb.reserved.region[j].base; - if (res_base < size) - break; - base = lmb_align_down(res_base - size, align); - } - - return ~(u64)0; -} - -static u64 __init lmb_alloc_nid_region(struct lmb_property *mp, - u64 (*nid_range)(u64, u64, int *), - u64 size, u64 align, int nid) -{ - u64 start, end; - - start = mp->base; - end = start + mp->size; - - start = lmb_align_up(start, align); - while (start < end) { - u64 this_end; - int this_nid; - - this_end = nid_range(start, end, &this_nid); - if (this_nid == nid) { - u64 ret = lmb_alloc_nid_unreserved(start, this_end, - size, align); - if (ret != ~(u64)0) - return ret; - } - start = this_end; - } - - return ~(u64)0; -} - -u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, - u64 (*nid_range)(u64 start, u64 end, int *nid)) -{ - struct lmb_region *mem = &lmb.memory; - int i; - - BUG_ON(0 == size); - - size = lmb_align_up(size, align); - - for (i = 0; i < mem->cnt; i++) { - u64 ret = lmb_alloc_nid_region(&mem->region[i], - nid_range, - size, align, nid); - if (ret != ~(u64)0) - return ret; - } - - return lmb_alloc(size, align); -} - -u64 __init lmb_alloc(u64 size, u64 align) -{ - return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); -} - -u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) -{ - u64 alloc; - - alloc = __lmb_alloc_base(size, align, max_addr); - - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", - (unsigned long long) size, (unsigned long long) max_addr); - - return alloc; -} - -u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) -{ - long i, j; - u64 base = 0; - u64 res_base; - - BUG_ON(0 == size); - - size = lmb_align_up(size, align); - - /* On some platforms, make sure we allocate lowmem */ - /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */ - if (max_addr == LMB_ALLOC_ANYWHERE) - max_addr = LMB_REAL_LIMIT; - - for (i = lmb.memory.cnt - 1; i >= 0; i--) { - u64 lmbbase = lmb.memory.region[i].base; - u64 lmbsize = lmb.memory.region[i].size; - - if (lmbsize < size) - continue; - if (max_addr == LMB_ALLOC_ANYWHERE) - base = lmb_align_down(lmbbase + lmbsize - size, align); - else if (lmbbase < max_addr) { - base = min(lmbbase + lmbsize, max_addr); - base = lmb_align_down(base - size, align); - } else - continue; - - while (base && lmbbase <= base) { - j = lmb_overlaps_region(&lmb.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (lmb_add_region(&lmb.reserved, base, size) < 0) - return 0; - return base; - } - res_base = lmb.reserved.region[j].base; - if (res_base < size) - break; - base = lmb_align_down(res_base - size, align); - } - } - return 0; -} - -/* You must call lmb_analyze() before this. */ -u64 __init lmb_phys_mem_size(void) -{ - return lmb.memory.size; -} - -u64 lmb_end_of_DRAM(void) -{ - int idx = lmb.memory.cnt - 1; - - return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); -} - -/* You must call lmb_analyze() after this. */ -void __init lmb_enforce_memory_limit(u64 memory_limit) -{ - unsigned long i; - u64 limit; - struct lmb_property *p; - - if (!memory_limit) - return; - - /* Truncate the lmb regions to satisfy the memory limit. */ - limit = memory_limit; - for (i = 0; i < lmb.memory.cnt; i++) { - if (limit > lmb.memory.region[i].size) { - limit -= lmb.memory.region[i].size; - continue; - } - - lmb.memory.region[i].size = limit; - lmb.memory.cnt = i + 1; - break; - } - - if (lmb.memory.region[0].size < lmb.rmo_size) - lmb.rmo_size = lmb.memory.region[0].size; - - memory_limit = lmb_end_of_DRAM(); - - /* And truncate any reserves above the limit also. */ - for (i = 0; i < lmb.reserved.cnt; i++) { - p = &lmb.reserved.region[i]; - - if (p->base > memory_limit) - p->size = 0; - else if ((p->base + p->size) > memory_limit) - p->size = memory_limit - p->base; - - if (p->size == 0) { - lmb_remove_region(&lmb.reserved, i); - i--; - } - } -} - -int __init lmb_is_reserved(u64 addr) -{ - int i; - - for (i = 0; i < lmb.reserved.cnt; i++) { - u64 upper = lmb.reserved.region[i].base + - lmb.reserved.region[i].size - 1; - if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) - return 1; - } - return 0; -} - -int lmb_is_region_reserved(u64 base, u64 size) -{ - return lmb_overlaps_region(&lmb.reserved, base, size); -} - -/* - * Given a , find which memory regions belong to this range. - * Adjust the request and return a contiguous chunk. - */ -int lmb_find(struct lmb_property *res) -{ - int i; - u64 rstart, rend; - - rstart = res->base; - rend = rstart + res->size - 1; - - for (i = 0; i < lmb.memory.cnt; i++) { - u64 start = lmb.memory.region[i].base; - u64 end = start + lmb.memory.region[i].size - 1; - - if (start > rend) - return -1; - - if ((end >= rstart) && (start < rend)) { - /* adjust the request */ - if (rstart < start) - rstart = start; - if (rend > end) - rend = end; - res->base = rstart; - res->size = rend - rstart + 1; - return 0; - } - } - return -1; -} diff --git a/mm/Kconfig b/mm/Kconfig index 527136b22384..f4e516e9c37c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -128,6 +128,9 @@ config SPARSEMEM_VMEMMAP pfn_to_page and page_to_pfn operations. This is the most efficient option when sufficient kernel resources are available. +config HAVE_MEMBLOCK + boolean + # eventually, we can have this option just 'select SPARSEMEM' config MEMORY_HOTPLUG bool "Allow for memory hot-add" diff --git a/mm/Makefile b/mm/Makefile index 8982504bd03b..34b2546a9e37 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -15,6 +15,8 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ $(mmu-y) obj-y += init-mm.o +obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o obj-$(CONFIG_HAS_DMA) += dmapool.o diff --git a/mm/memblock.c b/mm/memblock.c new file mode 100644 index 000000000000..3024eb30fc27 --- /dev/null +++ b/mm/memblock.c @@ -0,0 +1,541 @@ +/* + * Procedures for maintaining information about logical memory blocks. + * + * Peter Bergner, IBM Corp. June 2001. + * Copyright (C) 2001 Peter Bergner. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +#define MEMBLOCK_ALLOC_ANYWHERE 0 + +struct memblock memblock; + +static int memblock_debug; + +static int __init early_memblock(char *p) +{ + if (p && strstr(p, "debug")) + memblock_debug = 1; + return 0; +} +early_param("memblock", early_memblock); + +static void memblock_dump(struct memblock_region *region, char *name) +{ + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); + + for (i = 0; i < region->cnt; i++) { + base = region->region[i].base; + size = region->region[i].size; + + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); + } +} + +void memblock_dump_all(void) +{ + if (!memblock_debug) + return; + + pr_info("MEMBLOCK configuration:\n"); + pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); + + memblock_dump(&memblock.memory, "memory"); + memblock_dump(&memblock.reserved, "reserved"); +} + +static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, + u64 size2) +{ + return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); +} + +static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) +{ + if (base2 == base1 + size1) + return 1; + else if (base1 == base2 + size2) + return -1; + + return 0; +} + +static long memblock_regions_adjacent(struct memblock_region *rgn, + unsigned long r1, unsigned long r2) +{ + u64 base1 = rgn->region[r1].base; + u64 size1 = rgn->region[r1].size; + u64 base2 = rgn->region[r2].base; + u64 size2 = rgn->region[r2].size; + + return memblock_addrs_adjacent(base1, size1, base2, size2); +} + +static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) +{ + unsigned long i; + + for (i = r; i < rgn->cnt - 1; i++) { + rgn->region[i].base = rgn->region[i + 1].base; + rgn->region[i].size = rgn->region[i + 1].size; + } + rgn->cnt--; +} + +/* Assumption: base addr of region 1 < base addr of region 2 */ +static void memblock_coalesce_regions(struct memblock_region *rgn, + unsigned long r1, unsigned long r2) +{ + rgn->region[r1].size += rgn->region[r2].size; + memblock_remove_region(rgn, r2); +} + +void __init memblock_init(void) +{ + /* Create a dummy zero size MEMBLOCK which will get coalesced away later. + * This simplifies the memblock_add() code below... + */ + memblock.memory.region[0].base = 0; + memblock.memory.region[0].size = 0; + memblock.memory.cnt = 1; + + /* Ditto. */ + memblock.reserved.region[0].base = 0; + memblock.reserved.region[0].size = 0; + memblock.reserved.cnt = 1; +} + +void __init memblock_analyze(void) +{ + int i; + + memblock.memory.size = 0; + + for (i = 0; i < memblock.memory.cnt; i++) + memblock.memory.size += memblock.memory.region[i].size; +} + +static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) +{ + unsigned long coalesced = 0; + long adjacent, i; + + if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { + rgn->region[0].base = base; + rgn->region[0].size = size; + return 0; + } + + /* First try and coalesce this MEMBLOCK with another. */ + for (i = 0; i < rgn->cnt; i++) { + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; + + if ((rgnbase == base) && (rgnsize == size)) + /* Already have this region, so we're done */ + return 0; + + adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); + if (adjacent > 0) { + rgn->region[i].base -= size; + rgn->region[i].size += size; + coalesced++; + break; + } else if (adjacent < 0) { + rgn->region[i].size += size; + coalesced++; + break; + } + } + + if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { + memblock_coalesce_regions(rgn, i, i+1); + coalesced++; + } + + if (coalesced) + return coalesced; + if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) + return -1; + + /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ + for (i = rgn->cnt - 1; i >= 0; i--) { + if (base < rgn->region[i].base) { + rgn->region[i+1].base = rgn->region[i].base; + rgn->region[i+1].size = rgn->region[i].size; + } else { + rgn->region[i+1].base = base; + rgn->region[i+1].size = size; + break; + } + } + + if (base < rgn->region[0].base) { + rgn->region[0].base = base; + rgn->region[0].size = size; + } + rgn->cnt++; + + return 0; +} + +long memblock_add(u64 base, u64 size) +{ + struct memblock_region *_rgn = &memblock.memory; + + /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ + if (base == 0) + memblock.rmo_size = size; + + return memblock_add_region(_rgn, base, size); + +} + +static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) +{ + u64 rgnbegin, rgnend; + u64 end = base + size; + int i; + + rgnbegin = rgnend = 0; /* supress gcc warnings */ + + /* Find the region where (base, size) belongs to */ + for (i=0; i < rgn->cnt; i++) { + rgnbegin = rgn->region[i].base; + rgnend = rgnbegin + rgn->region[i].size; + + if ((rgnbegin <= base) && (end <= rgnend)) + break; + } + + /* Didn't find the region */ + if (i == rgn->cnt) + return -1; + + /* Check to see if we are removing entire region */ + if ((rgnbegin == base) && (rgnend == end)) { + memblock_remove_region(rgn, i); + return 0; + } + + /* Check to see if region is matching at the front */ + if (rgnbegin == base) { + rgn->region[i].base = end; + rgn->region[i].size -= size; + return 0; + } + + /* Check to see if the region is matching at the end */ + if (rgnend == end) { + rgn->region[i].size -= size; + return 0; + } + + /* + * We need to split the entry - adjust the current one to the + * beginging of the hole and add the region after hole. + */ + rgn->region[i].size = base - rgn->region[i].base; + return memblock_add_region(rgn, end, rgnend - end); +} + +long memblock_remove(u64 base, u64 size) +{ + return __memblock_remove(&memblock.memory, base, size); +} + +long __init memblock_free(u64 base, u64 size) +{ + return __memblock_remove(&memblock.reserved, base, size); +} + +long __init memblock_reserve(u64 base, u64 size) +{ + struct memblock_region *_rgn = &memblock.reserved; + + BUG_ON(0 == size); + + return memblock_add_region(_rgn, base, size); +} + +long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) +{ + unsigned long i; + + for (i = 0; i < rgn->cnt; i++) { + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; + if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) + break; + } + + return (i < rgn->cnt) ? i : -1; +} + +static u64 memblock_align_down(u64 addr, u64 size) +{ + return addr & ~(size - 1); +} + +static u64 memblock_align_up(u64 addr, u64 size) +{ + return (addr + (size - 1)) & ~(size - 1); +} + +static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, + u64 size, u64 align) +{ + u64 base, res_base; + long j; + + base = memblock_align_down((end - size), align); + while (start <= base) { + j = memblock_overlaps_region(&memblock.reserved, base, size); + if (j < 0) { + /* this area isn't reserved, take it */ + if (memblock_add_region(&memblock.reserved, base, size) < 0) + base = ~(u64)0; + return base; + } + res_base = memblock.reserved.region[j].base; + if (res_base < size) + break; + base = memblock_align_down(res_base - size, align); + } + + return ~(u64)0; +} + +static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, + u64 (*nid_range)(u64, u64, int *), + u64 size, u64 align, int nid) +{ + u64 start, end; + + start = mp->base; + end = start + mp->size; + + start = memblock_align_up(start, align); + while (start < end) { + u64 this_end; + int this_nid; + + this_end = nid_range(start, end, &this_nid); + if (this_nid == nid) { + u64 ret = memblock_alloc_nid_unreserved(start, this_end, + size, align); + if (ret != ~(u64)0) + return ret; + } + start = this_end; + } + + return ~(u64)0; +} + +u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, + u64 (*nid_range)(u64 start, u64 end, int *nid)) +{ + struct memblock_region *mem = &memblock.memory; + int i; + + BUG_ON(0 == size); + + size = memblock_align_up(size, align); + + for (i = 0; i < mem->cnt; i++) { + u64 ret = memblock_alloc_nid_region(&mem->region[i], + nid_range, + size, align, nid); + if (ret != ~(u64)0) + return ret; + } + + return memblock_alloc(size, align); +} + +u64 __init memblock_alloc(u64 size, u64 align) +{ + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); +} + +u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) +{ + u64 alloc; + + alloc = __memblock_alloc_base(size, align, max_addr); + + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); + + return alloc; +} + +u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) +{ + long i, j; + u64 base = 0; + u64 res_base; + + BUG_ON(0 == size); + + size = memblock_align_up(size, align); + + /* On some platforms, make sure we allocate lowmem */ + /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ + if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) + max_addr = MEMBLOCK_REAL_LIMIT; + + for (i = memblock.memory.cnt - 1; i >= 0; i--) { + u64 memblockbase = memblock.memory.region[i].base; + u64 memblocksize = memblock.memory.region[i].size; + + if (memblocksize < size) + continue; + if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) + base = memblock_align_down(memblockbase + memblocksize - size, align); + else if (memblockbase < max_addr) { + base = min(memblockbase + memblocksize, max_addr); + base = memblock_align_down(base - size, align); + } else + continue; + + while (base && memblockbase <= base) { + j = memblock_overlaps_region(&memblock.reserved, base, size); + if (j < 0) { + /* this area isn't reserved, take it */ + if (memblock_add_region(&memblock.reserved, base, size) < 0) + return 0; + return base; + } + res_base = memblock.reserved.region[j].base; + if (res_base < size) + break; + base = memblock_align_down(res_base - size, align); + } + } + return 0; +} + +/* You must call memblock_analyze() before this. */ +u64 __init memblock_phys_mem_size(void) +{ + return memblock.memory.size; +} + +u64 memblock_end_of_DRAM(void) +{ + int idx = memblock.memory.cnt - 1; + + return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); +} + +/* You must call memblock_analyze() after this. */ +void __init memblock_enforce_memory_limit(u64 memory_limit) +{ + unsigned long i; + u64 limit; + struct memblock_property *p; + + if (!memory_limit) + return; + + /* Truncate the memblock regions to satisfy the memory limit. */ + limit = memory_limit; + for (i = 0; i < memblock.memory.cnt; i++) { + if (limit > memblock.memory.region[i].size) { + limit -= memblock.memory.region[i].size; + continue; + } + + memblock.memory.region[i].size = limit; + memblock.memory.cnt = i + 1; + break; + } + + if (memblock.memory.region[0].size < memblock.rmo_size) + memblock.rmo_size = memblock.memory.region[0].size; + + memory_limit = memblock_end_of_DRAM(); + + /* And truncate any reserves above the limit also. */ + for (i = 0; i < memblock.reserved.cnt; i++) { + p = &memblock.reserved.region[i]; + + if (p->base > memory_limit) + p->size = 0; + else if ((p->base + p->size) > memory_limit) + p->size = memory_limit - p->base; + + if (p->size == 0) { + memblock_remove_region(&memblock.reserved, i); + i--; + } + } +} + +int __init memblock_is_reserved(u64 addr) +{ + int i; + + for (i = 0; i < memblock.reserved.cnt; i++) { + u64 upper = memblock.reserved.region[i].base + + memblock.reserved.region[i].size - 1; + if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) + return 1; + } + return 0; +} + +int memblock_is_region_reserved(u64 base, u64 size) +{ + return memblock_overlaps_region(&memblock.reserved, base, size); +} + +/* + * Given a , find which memory regions belong to this range. + * Adjust the request and return a contiguous chunk. + */ +int memblock_find(struct memblock_property *res) +{ + int i; + u64 rstart, rend; + + rstart = res->base; + rend = rstart + res->size - 1; + + for (i = 0; i < memblock.memory.cnt; i++) { + u64 start = memblock.memory.region[i].base; + u64 end = start + memblock.memory.region[i].size - 1; + + if (start > rend) + return -1; + + if ((end >= rstart) && (start < rend)) { + /* adjust the request */ + if (rstart < start) + rstart = start; + if (rend > end) + rend = end; + res->base = rstart; + res->size = rend - rstart + 1; + return 0; + } + } + return -1; +} -- cgit v1.2.3 From 4c879170296174bde05cd1c643dac16594edee77 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:30:10 +0200 Subject: padata: Check for valid padata instance on start This patch introduces the PADATA_INVALID flag which is checked on padata start. This will be used to mark a padata instance as invalid, if the padata cpumask does not intersect with the active cpumask. we change padata_start to return an error if the PADATA_INVALID is set. Also we adapt the only padata user, pcrypt to this change. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 19 ++++++++++++++----- include/linux/padata.h | 3 ++- kernel/padata.c | 18 ++++++++++++++++-- 3 files changed, 32 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 247178cb98ec..71ae2b2ae33b 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -385,6 +385,7 @@ static struct crypto_template pcrypt_tmpl = { static int __init pcrypt_init(void) { + int err = -ENOMEM; encwq = create_workqueue("pencrypt"); if (!encwq) goto err; @@ -400,14 +401,22 @@ static int __init pcrypt_init(void) pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); if (!pcrypt_dec_padata) - goto err_free_padata; + goto err_free_enc_padata; - padata_start(pcrypt_enc_padata); - padata_start(pcrypt_dec_padata); + err = padata_start(pcrypt_enc_padata); + if (err) + goto err_free_dec_padata; + + err = padata_start(pcrypt_dec_padata); + if (err) + goto err_free_dec_padata; return crypto_register_template(&pcrypt_tmpl); -err_free_padata: +err_free_dec_padata: + padata_free(pcrypt_dec_padata); + +err_free_enc_padata: padata_free(pcrypt_enc_padata); err_destroy_decwq: @@ -417,7 +426,7 @@ err_destroy_encwq: destroy_workqueue(encwq); err: - return -ENOMEM; + return err; } static void __exit pcrypt_exit(void) diff --git a/include/linux/padata.h b/include/linux/padata.h index 8d8406246eef..e4c17f9b7c9e 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -126,6 +126,7 @@ struct padata_instance { u8 flags; #define PADATA_INIT 1 #define PADATA_RESET 2 +#define PADATA_INVALID 4 }; extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, @@ -138,6 +139,6 @@ extern int padata_set_cpumask(struct padata_instance *pinst, cpumask_var_t cpumask); extern int padata_add_cpu(struct padata_instance *pinst, int cpu); extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); -extern void padata_start(struct padata_instance *pinst); +extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/kernel/padata.c b/kernel/padata.c index ff8de1b71e4e..e7d723a3e31d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -485,6 +485,11 @@ static void padata_flush_queues(struct parallel_data *pd) BUG_ON(atomic_read(&pd->refcnt) != 0); } +static void __padata_start(struct padata_instance *pinst) +{ + pinst->flags |= PADATA_INIT; +} + /* Replace the internal control stucture with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) @@ -619,11 +624,20 @@ EXPORT_SYMBOL(padata_remove_cpu); * * @pinst: padata instance to start */ -void padata_start(struct padata_instance *pinst) +int padata_start(struct padata_instance *pinst) { + int err = 0; + mutex_lock(&pinst->lock); - pinst->flags |= PADATA_INIT; + + if (pinst->flags & PADATA_INVALID) + err =-EINVAL; + + __padata_start(pinst); + mutex_unlock(&pinst->lock); + + return err; } EXPORT_SYMBOL(padata_start); -- cgit v1.2.3 From 5f1a8c1bc724498ff32acbd59ed5263275676b9d Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 7 Jul 2010 15:32:39 +0200 Subject: padata: simplify serialization mechanism We count the number of processed objects on a percpu basis, so we need to go through all the percpu reorder queues to calculate the sequence number of the next object that needs serialization. This patch changes this to count the number of processed objects global. So we can calculate the sequence number and the percpu reorder queue of the next object that needs serialization without searching through the percpu reorder queues. This avoids some accesses to memory of foreign cpus. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 ++--- kernel/padata.c | 71 ++++++++++++++------------------------------------ 2 files changed, 22 insertions(+), 55 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index e4c17f9b7c9e..8844b851191e 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -67,7 +67,6 @@ struct padata_list { * @pwork: work struct for parallelization. * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. - * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ struct padata_queue { @@ -77,7 +76,6 @@ struct padata_queue { struct work_struct pwork; struct work_struct swork; struct parallel_data *pd; - atomic_t num_obj; int cpu_index; }; @@ -93,6 +91,7 @@ struct padata_queue { * @max_seq_nr: Maximal used sequence number. * @cpumask: cpumask in use. * @lock: Reorder lock. + * @processed: Number of already processed objects. * @timer: Reorder timer. */ struct parallel_data { @@ -103,7 +102,8 @@ struct parallel_data { atomic_t refcnt; unsigned int max_seq_nr; cpumask_var_t cpumask; - spinlock_t lock; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; struct timer_list timer; }; diff --git a/kernel/padata.c b/kernel/padata.c index ae8defcf0622..450d67d394b0 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -170,79 +170,47 @@ EXPORT_SYMBOL(padata_do_parallel); */ static struct padata_priv *padata_get_next(struct parallel_data *pd) { - int cpu, num_cpus, empty, calc_seq_nr; - int seq_nr, next_nr, overrun, next_overrun; + int cpu, num_cpus; + int next_nr, next_index; struct padata_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - empty = 0; - next_nr = -1; - next_overrun = 0; - next_queue = NULL; - num_cpus = cpumask_weight(pd->cpumask); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - reorder = &queue->reorder; - - /* - * Calculate the seq_nr of the object that should be - * next in this reorder queue. - */ - overrun = 0; - calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) - + queue->cpu_index; - - if (unlikely(calc_seq_nr > pd->max_seq_nr)) { - calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; - overrun = 1; - } - - if (!list_empty(&reorder->list)) { - padata = list_entry(reorder->list.next, - struct padata_priv, list); - - seq_nr = padata->seq_nr; - BUG_ON(calc_seq_nr != seq_nr); - } else { - seq_nr = calc_seq_nr; - empty++; - } - - if (next_nr < 0 || seq_nr < next_nr - || (next_overrun && !overrun)) { - next_nr = seq_nr; - next_overrun = overrun; - next_queue = queue; - } + /* + * Calculate the percpu reorder queue and the sequence + * number of the next object. + */ + next_nr = pd->processed; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + + if (unlikely(next_nr > pd->max_seq_nr)) { + next_nr = next_nr - pd->max_seq_nr - 1; + next_index = next_nr % num_cpus; + cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->queue, cpu); + pd->processed = 0; } padata = NULL; - if (empty == num_cpus) - goto out; - reorder = &next_queue->reorder; if (!list_empty(&reorder->list)) { padata = list_entry(reorder->list.next, struct padata_priv, list); - if (unlikely(next_overrun)) { - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - atomic_set(&queue->num_obj, 0); - } - } + BUG_ON(next_nr != padata->seq_nr); spin_lock(&reorder->lock); list_del_init(&padata->list); atomic_dec(&pd->reorder_objects); spin_unlock(&reorder->lock); - atomic_inc(&next_queue->num_obj); + pd->processed++; goto out; } @@ -430,7 +398,6 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, INIT_WORK(&queue->pwork, padata_parallel_worker); INIT_WORK(&queue->swork, padata_serial_worker); - atomic_set(&queue->num_obj, 0); } num_cpus = cpumask_weight(pd->cpumask); -- cgit v1.2.3 From 4cf51c383d7a8d472a6090a0d19c371d40e823c9 Mon Sep 17 00:00:00 2001 From: Joonyoung Shim Date: Wed, 14 Jul 2010 21:55:30 -0700 Subject: Input: Add ATMEL QT602240 touchscreen driver The chip's full name is AT42QT602240 or ATMXT224. This is a capacitive touchscreen supporting 10-contact multitouch and using I2C interface. Signed-off-by: Joonyoung Shim Acked-by: Henrik Rydberg Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/Kconfig | 12 + drivers/input/touchscreen/Makefile | 1 + drivers/input/touchscreen/qt602240_ts.c | 1401 +++++++++++++++++++++++++++++++ include/linux/i2c/qt602240_ts.h | 38 + 4 files changed, 1452 insertions(+) create mode 100644 drivers/input/touchscreen/qt602240_ts.c create mode 100644 include/linux/i2c/qt602240_ts.h (limited to 'include') diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index ff18d896ea6d..7bfcfdff6cf8 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -291,6 +291,18 @@ config TOUCHSCREEN_PENMOUNT To compile this driver as a module, choose M here: the module will be called penmount. +config TOUCHSCREEN_QT602240 + tristate "QT602240 I2C Touchscreen" + depends on I2C + help + Say Y here if you have the AT42QT602240/ATMXT224 I2C touchscreen + connected to your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called qt602240_ts. + config TOUCHSCREEN_MIGOR tristate "Renesas MIGO-R touchscreen" depends on SH_MIGOR && I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 9efdd4424757..779de0d9d413 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o +obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o diff --git a/drivers/input/touchscreen/qt602240_ts.c b/drivers/input/touchscreen/qt602240_ts.c new file mode 100644 index 000000000000..66b26ad3032a --- /dev/null +++ b/drivers/input/touchscreen/qt602240_ts.c @@ -0,0 +1,1401 @@ +/* + * AT42QT602240/ATMXT224 Touchscreen driver + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Version */ +#define QT602240_VER_20 20 +#define QT602240_VER_21 21 +#define QT602240_VER_22 22 + +/* Slave addresses */ +#define QT602240_APP_LOW 0x4a +#define QT602240_APP_HIGH 0x4b +#define QT602240_BOOT_LOW 0x24 +#define QT602240_BOOT_HIGH 0x25 + +/* Firmware */ +#define QT602240_FW_NAME "qt602240.fw" + +/* Registers */ +#define QT602240_FAMILY_ID 0x00 +#define QT602240_VARIANT_ID 0x01 +#define QT602240_VERSION 0x02 +#define QT602240_BUILD 0x03 +#define QT602240_MATRIX_X_SIZE 0x04 +#define QT602240_MATRIX_Y_SIZE 0x05 +#define QT602240_OBJECT_NUM 0x06 +#define QT602240_OBJECT_START 0x07 + +#define QT602240_OBJECT_SIZE 6 + +/* Object types */ +#define QT602240_DEBUG_DIAGNOSTIC 37 +#define QT602240_GEN_MESSAGE 5 +#define QT602240_GEN_COMMAND 6 +#define QT602240_GEN_POWER 7 +#define QT602240_GEN_ACQUIRE 8 +#define QT602240_TOUCH_MULTI 9 +#define QT602240_TOUCH_KEYARRAY 15 +#define QT602240_TOUCH_PROXIMITY 23 +#define QT602240_PROCI_GRIPFACE 20 +#define QT602240_PROCG_NOISE 22 +#define QT602240_PROCI_ONETOUCH 24 +#define QT602240_PROCI_TWOTOUCH 27 +#define QT602240_SPT_COMMSCONFIG 18 /* firmware ver 21 over */ +#define QT602240_SPT_GPIOPWM 19 +#define QT602240_SPT_SELFTEST 25 +#define QT602240_SPT_CTECONFIG 28 +#define QT602240_SPT_USERDATA 38 /* firmware ver 21 over */ + +/* QT602240_GEN_COMMAND field */ +#define QT602240_COMMAND_RESET 0 +#define QT602240_COMMAND_BACKUPNV 1 +#define QT602240_COMMAND_CALIBRATE 2 +#define QT602240_COMMAND_REPORTALL 3 +#define QT602240_COMMAND_DIAGNOSTIC 5 + +/* QT602240_GEN_POWER field */ +#define QT602240_POWER_IDLEACQINT 0 +#define QT602240_POWER_ACTVACQINT 1 +#define QT602240_POWER_ACTV2IDLETO 2 + +/* QT602240_GEN_ACQUIRE field */ +#define QT602240_ACQUIRE_CHRGTIME 0 +#define QT602240_ACQUIRE_TCHDRIFT 2 +#define QT602240_ACQUIRE_DRIFTST 3 +#define QT602240_ACQUIRE_TCHAUTOCAL 4 +#define QT602240_ACQUIRE_SYNC 5 +#define QT602240_ACQUIRE_ATCHCALST 6 +#define QT602240_ACQUIRE_ATCHCALSTHR 7 + +/* QT602240_TOUCH_MULTI field */ +#define QT602240_TOUCH_CTRL 0 +#define QT602240_TOUCH_XORIGIN 1 +#define QT602240_TOUCH_YORIGIN 2 +#define QT602240_TOUCH_XSIZE 3 +#define QT602240_TOUCH_YSIZE 4 +#define QT602240_TOUCH_BLEN 6 +#define QT602240_TOUCH_TCHTHR 7 +#define QT602240_TOUCH_TCHDI 8 +#define QT602240_TOUCH_ORIENT 9 +#define QT602240_TOUCH_MOVHYSTI 11 +#define QT602240_TOUCH_MOVHYSTN 12 +#define QT602240_TOUCH_NUMTOUCH 14 +#define QT602240_TOUCH_MRGHYST 15 +#define QT602240_TOUCH_MRGTHR 16 +#define QT602240_TOUCH_AMPHYST 17 +#define QT602240_TOUCH_XRANGE_LSB 18 +#define QT602240_TOUCH_XRANGE_MSB 19 +#define QT602240_TOUCH_YRANGE_LSB 20 +#define QT602240_TOUCH_YRANGE_MSB 21 +#define QT602240_TOUCH_XLOCLIP 22 +#define QT602240_TOUCH_XHICLIP 23 +#define QT602240_TOUCH_YLOCLIP 24 +#define QT602240_TOUCH_YHICLIP 25 +#define QT602240_TOUCH_XEDGECTRL 26 +#define QT602240_TOUCH_XEDGEDIST 27 +#define QT602240_TOUCH_YEDGECTRL 28 +#define QT602240_TOUCH_YEDGEDIST 29 +#define QT602240_TOUCH_JUMPLIMIT 30 /* firmware ver 22 over */ + +/* QT602240_PROCI_GRIPFACE field */ +#define QT602240_GRIPFACE_CTRL 0 +#define QT602240_GRIPFACE_XLOGRIP 1 +#define QT602240_GRIPFACE_XHIGRIP 2 +#define QT602240_GRIPFACE_YLOGRIP 3 +#define QT602240_GRIPFACE_YHIGRIP 4 +#define QT602240_GRIPFACE_MAXTCHS 5 +#define QT602240_GRIPFACE_SZTHR1 7 +#define QT602240_GRIPFACE_SZTHR2 8 +#define QT602240_GRIPFACE_SHPTHR1 9 +#define QT602240_GRIPFACE_SHPTHR2 10 +#define QT602240_GRIPFACE_SUPEXTTO 11 + +/* QT602240_PROCI_NOISE field */ +#define QT602240_NOISE_CTRL 0 +#define QT602240_NOISE_OUTFLEN 1 +#define QT602240_NOISE_GCAFUL_LSB 3 +#define QT602240_NOISE_GCAFUL_MSB 4 +#define QT602240_NOISE_GCAFLL_LSB 5 +#define QT602240_NOISE_GCAFLL_MSB 6 +#define QT602240_NOISE_ACTVGCAFVALID 7 +#define QT602240_NOISE_NOISETHR 8 +#define QT602240_NOISE_FREQHOPSCALE 10 +#define QT602240_NOISE_FREQ0 11 +#define QT602240_NOISE_FREQ1 12 +#define QT602240_NOISE_FREQ2 13 +#define QT602240_NOISE_FREQ3 14 +#define QT602240_NOISE_FREQ4 15 +#define QT602240_NOISE_IDLEGCAFVALID 16 + +/* QT602240_SPT_COMMSCONFIG */ +#define QT602240_COMMS_CTRL 0 +#define QT602240_COMMS_CMD 1 + +/* QT602240_SPT_CTECONFIG field */ +#define QT602240_CTE_CTRL 0 +#define QT602240_CTE_CMD 1 +#define QT602240_CTE_MODE 2 +#define QT602240_CTE_IDLEGCAFDEPTH 3 +#define QT602240_CTE_ACTVGCAFDEPTH 4 +#define QT602240_CTE_VOLTAGE 5 /* firmware ver 21 over */ + +#define QT602240_VOLTAGE_DEFAULT 2700000 +#define QT602240_VOLTAGE_STEP 10000 + +/* Define for QT602240_GEN_COMMAND */ +#define QT602240_BOOT_VALUE 0xa5 +#define QT602240_BACKUP_VALUE 0x55 +#define QT602240_BACKUP_TIME 25 /* msec */ +#define QT602240_RESET_TIME 65 /* msec */ + +#define QT602240_FWRESET_TIME 175 /* msec */ + +/* Command to unlock bootloader */ +#define QT602240_UNLOCK_CMD_MSB 0xaa +#define QT602240_UNLOCK_CMD_LSB 0xdc + +/* Bootloader mode status */ +#define QT602240_WAITING_BOOTLOAD_CMD 0xc0 /* valid 7 6 bit only */ +#define QT602240_WAITING_FRAME_DATA 0x80 /* valid 7 6 bit only */ +#define QT602240_FRAME_CRC_CHECK 0x02 +#define QT602240_FRAME_CRC_FAIL 0x03 +#define QT602240_FRAME_CRC_PASS 0x04 +#define QT602240_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */ +#define QT602240_BOOT_STATUS_MASK 0x3f + +/* Touch status */ +#define QT602240_SUPPRESS (1 << 1) +#define QT602240_AMP (1 << 2) +#define QT602240_VECTOR (1 << 3) +#define QT602240_MOVE (1 << 4) +#define QT602240_RELEASE (1 << 5) +#define QT602240_PRESS (1 << 6) +#define QT602240_DETECT (1 << 7) + +/* Touchscreen absolute values */ +#define QT602240_MAX_XC 0x3ff +#define QT602240_MAX_YC 0x3ff +#define QT602240_MAX_AREA 0xff + +#define QT602240_MAX_FINGER 10 + +/* Initial register values recommended from chip vendor */ +static const u8 init_vals_ver_20[] = { + /* QT602240_GEN_COMMAND(6) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_GEN_POWER(7) */ + 0x20, 0xff, 0x32, + /* QT602240_GEN_ACQUIRE(8) */ + 0x08, 0x05, 0x05, 0x00, 0x00, 0x00, 0x05, 0x14, + /* QT602240_TOUCH_MULTI(9) */ + 0x00, 0x00, 0x00, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0x64, + /* QT602240_TOUCH_KEYARRAY(15) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + /* QT602240_SPT_GPIOPWM(19) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, + /* QT602240_PROCI_GRIPFACE(20) */ + 0x00, 0x64, 0x64, 0x64, 0x64, 0x00, 0x00, 0x1e, 0x14, 0x04, + 0x1e, 0x00, + /* QT602240_PROCG_NOISE(22) */ + 0x05, 0x00, 0x00, 0x19, 0x00, 0xe7, 0xff, 0x04, 0x32, 0x00, + 0x01, 0x0a, 0x0f, 0x14, 0x00, 0x00, 0xe8, + /* QT602240_TOUCH_PROXIMITY(23) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, + /* QT602240_PROCI_ONETOUCH(24) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_SELFTEST(25) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_TWOTOUCH(27) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_CTECONFIG(28) */ + 0x00, 0x00, 0x00, 0x04, 0x08, +}; + +static const u8 init_vals_ver_21[] = { + /* QT602240_GEN_COMMAND(6) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_GEN_POWER(7) */ + 0x20, 0xff, 0x32, + /* QT602240_GEN_ACQUIRE(8) */ + 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23, + /* QT602240_TOUCH_MULTI(9) */ + 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_TOUCH_KEYARRAY(15) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + /* QT602240_SPT_GPIOPWM(19) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_GRIPFACE(20) */ + 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04, + 0x0f, 0x0a, + /* QT602240_PROCG_NOISE(22) */ + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00, + 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03, + /* QT602240_TOUCH_PROXIMITY(23) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, + /* QT602240_PROCI_ONETOUCH(24) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_SELFTEST(25) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_TWOTOUCH(27) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_CTECONFIG(28) */ + 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, +}; + +static const u8 init_vals_ver_22[] = { + /* QT602240_GEN_COMMAND(6) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_GEN_POWER(7) */ + 0x20, 0xff, 0x32, + /* QT602240_GEN_ACQUIRE(8) */ + 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23, + /* QT602240_TOUCH_MULTI(9) */ + 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + /* QT602240_TOUCH_KEYARRAY(15) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + /* QT602240_SPT_GPIOPWM(19) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_GRIPFACE(20) */ + 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04, + 0x0f, 0x0a, + /* QT602240_PROCG_NOISE(22) */ + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00, + 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03, + /* QT602240_TOUCH_PROXIMITY(23) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_ONETOUCH(24) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_SELFTEST(25) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + /* QT602240_PROCI_TWOTOUCH(27) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* QT602240_SPT_CTECONFIG(28) */ + 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, +}; + +struct qt602240_info { + u8 family_id; + u8 variant_id; + u8 version; + u8 build; + u8 matrix_xsize; + u8 matrix_ysize; + u8 object_num; +}; + +struct qt602240_object { + u8 type; + u16 start_address; + u8 size; + u8 instances; + u8 num_report_ids; + + /* to map object and message */ + u8 max_reportid; +}; + +struct qt602240_message { + u8 reportid; + u8 message[7]; + u8 checksum; +}; + +struct qt602240_finger { + int status; + int x; + int y; + int area; +}; + +/* Each client has this additional data */ +struct qt602240_data { + struct i2c_client *client; + struct input_dev *input_dev; + const struct qt602240_platform_data *pdata; + struct qt602240_object *object_table; + struct qt602240_info info; + struct qt602240_finger finger[QT602240_MAX_FINGER]; + unsigned int irq; +}; + +static bool qt602240_object_readable(unsigned int type) +{ + switch (type) { + case QT602240_GEN_MESSAGE: + case QT602240_GEN_COMMAND: + case QT602240_GEN_POWER: + case QT602240_GEN_ACQUIRE: + case QT602240_TOUCH_MULTI: + case QT602240_TOUCH_KEYARRAY: + case QT602240_TOUCH_PROXIMITY: + case QT602240_PROCI_GRIPFACE: + case QT602240_PROCG_NOISE: + case QT602240_PROCI_ONETOUCH: + case QT602240_PROCI_TWOTOUCH: + case QT602240_SPT_COMMSCONFIG: + case QT602240_SPT_GPIOPWM: + case QT602240_SPT_SELFTEST: + case QT602240_SPT_CTECONFIG: + case QT602240_SPT_USERDATA: + return true; + default: + return false; + } +} + +static bool qt602240_object_writable(unsigned int type) +{ + switch (type) { + case QT602240_GEN_COMMAND: + case QT602240_GEN_POWER: + case QT602240_GEN_ACQUIRE: + case QT602240_TOUCH_MULTI: + case QT602240_TOUCH_KEYARRAY: + case QT602240_TOUCH_PROXIMITY: + case QT602240_PROCI_GRIPFACE: + case QT602240_PROCG_NOISE: + case QT602240_PROCI_ONETOUCH: + case QT602240_PROCI_TWOTOUCH: + case QT602240_SPT_GPIOPWM: + case QT602240_SPT_SELFTEST: + case QT602240_SPT_CTECONFIG: + return true; + default: + return false; + } +} + +static void qt602240_dump_message(struct device *dev, + struct qt602240_message *message) +{ + dev_dbg(dev, "reportid:\t0x%x\n", message->reportid); + dev_dbg(dev, "message1:\t0x%x\n", message->message[0]); + dev_dbg(dev, "message2:\t0x%x\n", message->message[1]); + dev_dbg(dev, "message3:\t0x%x\n", message->message[2]); + dev_dbg(dev, "message4:\t0x%x\n", message->message[3]); + dev_dbg(dev, "message5:\t0x%x\n", message->message[4]); + dev_dbg(dev, "message6:\t0x%x\n", message->message[5]); + dev_dbg(dev, "message7:\t0x%x\n", message->message[6]); + dev_dbg(dev, "checksum:\t0x%x\n", message->checksum); +} + +static int qt602240_check_bootloader(struct i2c_client *client, + unsigned int state) +{ + u8 val; + +recheck: + if (i2c_master_recv(client, &val, 1) != 1) { + dev_err(&client->dev, "%s: i2c recv failed\n", __func__); + return -EIO; + } + + switch (state) { + case QT602240_WAITING_BOOTLOAD_CMD: + case QT602240_WAITING_FRAME_DATA: + val &= ~QT602240_BOOT_STATUS_MASK; + break; + case QT602240_FRAME_CRC_PASS: + if (val == QT602240_FRAME_CRC_CHECK) + goto recheck; + break; + default: + return -EINVAL; + } + + if (val != state) { + dev_err(&client->dev, "Unvalid bootloader mode state\n"); + return -EINVAL; + } + + return 0; +} + +static int qt602240_unlock_bootloader(struct i2c_client *client) +{ + u8 buf[2]; + + buf[0] = QT602240_UNLOCK_CMD_LSB; + buf[1] = QT602240_UNLOCK_CMD_MSB; + + if (i2c_master_send(client, buf, 2) != 2) { + dev_err(&client->dev, "%s: i2c send failed\n", __func__); + return -EIO; + } + + return 0; +} + +static int qt602240_fw_write(struct i2c_client *client, + const u8 *data, unsigned int frame_size) +{ + if (i2c_master_send(client, data, frame_size) != frame_size) { + dev_err(&client->dev, "%s: i2c send failed\n", __func__); + return -EIO; + } + + return 0; +} + +static int __qt602240_read_reg(struct i2c_client *client, + u16 reg, u16 len, void *val) +{ + struct i2c_msg xfer[2]; + u8 buf[2]; + + buf[0] = reg & 0xff; + buf[1] = (reg >> 8) & 0xff; + + /* Write register */ + xfer[0].addr = client->addr; + xfer[0].flags = 0; + xfer[0].len = 2; + xfer[0].buf = buf; + + /* Read data */ + xfer[1].addr = client->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = len; + xfer[1].buf = val; + + if (i2c_transfer(client->adapter, xfer, 2) != 2) { + dev_err(&client->dev, "%s: i2c transfer failed\n", __func__); + return -EIO; + } + + return 0; +} + +static int qt602240_read_reg(struct i2c_client *client, u16 reg, u8 *val) +{ + return __qt602240_read_reg(client, reg, 1, val); +} + +static int qt602240_write_reg(struct i2c_client *client, u16 reg, u8 val) +{ + u8 buf[3]; + + buf[0] = reg & 0xff; + buf[1] = (reg >> 8) & 0xff; + buf[2] = val; + + if (i2c_master_send(client, buf, 3) != 3) { + dev_err(&client->dev, "%s: i2c send failed\n", __func__); + return -EIO; + } + + return 0; +} + +static int qt602240_read_object_table(struct i2c_client *client, + u16 reg, u8 *object_buf) +{ + return __qt602240_read_reg(client, reg, QT602240_OBJECT_SIZE, + object_buf); +} + +static struct qt602240_object * +qt602240_get_object(struct qt602240_data *data, u8 type) +{ + struct qt602240_object *object; + int i; + + for (i = 0; i < data->info.object_num; i++) { + object = data->object_table + i; + if (object->type == type) + return object; + } + + dev_err(&data->client->dev, "Invalid object type\n"); + return NULL; +} + +static int qt602240_read_message(struct qt602240_data *data, + struct qt602240_message *message) +{ + struct qt602240_object *object; + u16 reg; + + object = qt602240_get_object(data, QT602240_GEN_MESSAGE); + if (!object) + return -EINVAL; + + reg = object->start_address; + return __qt602240_read_reg(data->client, reg, + sizeof(struct qt602240_message), message); +} + +static int qt602240_read_object(struct qt602240_data *data, + u8 type, u8 offset, u8 *val) +{ + struct qt602240_object *object; + u16 reg; + + object = qt602240_get_object(data, type); + if (!object) + return -EINVAL; + + reg = object->start_address; + return __qt602240_read_reg(data->client, reg + offset, 1, val); +} + +static int qt602240_write_object(struct qt602240_data *data, + u8 type, u8 offset, u8 val) +{ + struct qt602240_object *object; + u16 reg; + + object = qt602240_get_object(data, type); + if (!object) + return -EINVAL; + + reg = object->start_address; + return qt602240_write_reg(data->client, reg + offset, val); +} + +static void qt602240_input_report(struct qt602240_data *data, int single_id) +{ + struct qt602240_finger *finger = data->finger; + struct input_dev *input_dev = data->input_dev; + int status = finger[single_id].status; + int finger_num = 0; + int id; + + for (id = 0; id < QT602240_MAX_FINGER; id++) { + if (!finger[id].status) + continue; + + input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, + finger[id].status != QT602240_RELEASE ? + finger[id].area : 0); + input_report_abs(input_dev, ABS_MT_POSITION_X, + finger[id].x); + input_report_abs(input_dev, ABS_MT_POSITION_Y, + finger[id].y); + input_mt_sync(input_dev); + + if (finger[id].status == QT602240_RELEASE) + finger[id].status = 0; + else + finger_num++; + } + + input_report_key(input_dev, BTN_TOUCH, finger_num > 0); + + if (status != QT602240_RELEASE) { + input_report_abs(input_dev, ABS_X, finger[single_id].x); + input_report_abs(input_dev, ABS_Y, finger[single_id].y); + } + + input_sync(input_dev); +} + +static void qt602240_input_touchevent(struct qt602240_data *data, + struct qt602240_message *message, int id) +{ + struct qt602240_finger *finger = data->finger; + struct device *dev = &data->client->dev; + u8 status = message->message[0]; + int x; + int y; + int area; + + /* Check the touch is present on the screen */ + if (!(status & QT602240_DETECT)) { + if (status & QT602240_RELEASE) { + dev_dbg(dev, "[%d] released\n", id); + + finger[id].status = QT602240_RELEASE; + qt602240_input_report(data, id); + } + return; + } + + /* Check only AMP detection */ + if (!(status & (QT602240_PRESS | QT602240_MOVE))) + return; + + x = (message->message[1] << 2) | ((message->message[3] & ~0x3f) >> 6); + y = (message->message[2] << 2) | ((message->message[3] & ~0xf3) >> 2); + area = message->message[4]; + + dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, + status & QT602240_MOVE ? "moved" : "pressed", + x, y, area); + + finger[id].status = status & QT602240_MOVE ? + QT602240_MOVE : QT602240_PRESS; + finger[id].x = x; + finger[id].y = y; + finger[id].area = area; + + qt602240_input_report(data, id); +} + +static irqreturn_t qt602240_interrupt(int irq, void *dev_id) +{ + struct qt602240_data *data = dev_id; + struct qt602240_message message; + struct qt602240_object *object; + struct device *dev = &data->client->dev; + int id; + u8 reportid; + u8 max_reportid; + u8 min_reportid; + + do { + if (qt602240_read_message(data, &message)) { + dev_err(dev, "Failed to read message\n"); + goto end; + } + + reportid = message.reportid; + + /* whether reportid is thing of QT602240_TOUCH_MULTI */ + object = qt602240_get_object(data, QT602240_TOUCH_MULTI); + if (!object) + goto end; + + max_reportid = object->max_reportid; + min_reportid = max_reportid - object->num_report_ids + 1; + id = reportid - min_reportid; + + if (reportid >= min_reportid && reportid <= max_reportid) + qt602240_input_touchevent(data, &message, id); + else + qt602240_dump_message(dev, &message); + } while (reportid != 0xff); + +end: + return IRQ_HANDLED; +} + +static int qt602240_check_reg_init(struct qt602240_data *data) +{ + struct qt602240_object *object; + struct device *dev = &data->client->dev; + int index = 0; + int i, j; + u8 version = data->info.version; + u8 *init_vals; + + switch (version) { + case QT602240_VER_20: + init_vals = (u8 *)init_vals_ver_20; + break; + case QT602240_VER_21: + init_vals = (u8 *)init_vals_ver_21; + break; + case QT602240_VER_22: + init_vals = (u8 *)init_vals_ver_22; + break; + default: + dev_err(dev, "Firmware version %d doesn't support\n", version); + return -EINVAL; + } + + for (i = 0; i < data->info.object_num; i++) { + object = data->object_table + i; + + if (!qt602240_object_writable(object->type)) + continue; + + for (j = 0; j < object->size + 1; j++) + qt602240_write_object(data, object->type, j, + init_vals[index + j]); + + index += object->size + 1; + } + + return 0; +} + +static int qt602240_check_matrix_size(struct qt602240_data *data) +{ + const struct qt602240_platform_data *pdata = data->pdata; + struct device *dev = &data->client->dev; + int mode = -1; + int error; + u8 val; + + dev_dbg(dev, "Number of X lines: %d\n", pdata->x_line); + dev_dbg(dev, "Number of Y lines: %d\n", pdata->y_line); + + switch (pdata->x_line) { + case 0 ... 15: + if (pdata->y_line <= 14) + mode = 0; + break; + case 16: + if (pdata->y_line <= 12) + mode = 1; + if (pdata->y_line == 13 || pdata->y_line == 14) + mode = 0; + break; + case 17: + if (pdata->y_line <= 11) + mode = 2; + if (pdata->y_line == 12 || pdata->y_line == 13) + mode = 1; + break; + case 18: + if (pdata->y_line <= 10) + mode = 3; + if (pdata->y_line == 11 || pdata->y_line == 12) + mode = 2; + break; + case 19: + if (pdata->y_line <= 9) + mode = 4; + if (pdata->y_line == 10 || pdata->y_line == 11) + mode = 3; + break; + case 20: + mode = 4; + } + + if (mode < 0) { + dev_err(dev, "Invalid X/Y lines\n"); + return -EINVAL; + } + + error = qt602240_read_object(data, QT602240_SPT_CTECONFIG, + QT602240_CTE_MODE, &val); + if (error) + return error; + + if (mode == val) + return 0; + + /* Change the CTE configuration */ + qt602240_write_object(data, QT602240_SPT_CTECONFIG, + QT602240_CTE_CTRL, 1); + qt602240_write_object(data, QT602240_SPT_CTECONFIG, + QT602240_CTE_MODE, mode); + qt602240_write_object(data, QT602240_SPT_CTECONFIG, + QT602240_CTE_CTRL, 0); + + return 0; +} + +static int qt602240_make_highchg(struct qt602240_data *data) +{ + struct device *dev = &data->client->dev; + int count = 10; + int error; + u8 val; + + /* Read dummy message to make high CHG pin */ + do { + error = qt602240_read_object(data, QT602240_GEN_MESSAGE, 0, &val); + if (error) + return error; + } while ((val != 0xff) && --count); + + if (!count) { + dev_err(dev, "CHG pin isn't cleared\n"); + return -EBUSY; + } + + return 0; +} + +static void qt602240_handle_pdata(struct qt602240_data *data) +{ + const struct qt602240_platform_data *pdata = data->pdata; + u8 voltage; + + /* Set touchscreen lines */ + qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_XSIZE, + pdata->x_line); + qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_YSIZE, + pdata->y_line); + + /* Set touchscreen orient */ + qt602240_write_object(data, QT602240_TOUCH_MULTI, QT602240_TOUCH_ORIENT, + pdata->orient); + + /* Set touchscreen burst length */ + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_BLEN, pdata->blen); + + /* Set touchscreen threshold */ + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_TCHTHR, pdata->threshold); + + /* Set touchscreen resolution */ + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_XRANGE_LSB, (pdata->x_size - 1) & 0xff); + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_XRANGE_MSB, (pdata->x_size - 1) >> 8); + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_YRANGE_LSB, (pdata->y_size - 1) & 0xff); + qt602240_write_object(data, QT602240_TOUCH_MULTI, + QT602240_TOUCH_YRANGE_MSB, (pdata->y_size - 1) >> 8); + + /* Set touchscreen voltage */ + if (data->info.version >= QT602240_VER_21 && pdata->voltage) { + if (pdata->voltage < QT602240_VOLTAGE_DEFAULT) { + voltage = (QT602240_VOLTAGE_DEFAULT - pdata->voltage) / + QT602240_VOLTAGE_STEP; + voltage = 0xff - voltage + 1; + } else + voltage = (pdata->voltage - QT602240_VOLTAGE_DEFAULT) / + QT602240_VOLTAGE_STEP; + + qt602240_write_object(data, QT602240_SPT_CTECONFIG, + QT602240_CTE_VOLTAGE, voltage); + } +} + +static int qt602240_get_info(struct qt602240_data *data) +{ + struct i2c_client *client = data->client; + struct qt602240_info *info = &data->info; + int error; + u8 val; + + error = qt602240_read_reg(client, QT602240_FAMILY_ID, &val); + if (error) + return error; + info->family_id = val; + + error = qt602240_read_reg(client, QT602240_VARIANT_ID, &val); + if (error) + return error; + info->variant_id = val; + + error = qt602240_read_reg(client, QT602240_VERSION, &val); + if (error) + return error; + info->version = val; + + error = qt602240_read_reg(client, QT602240_BUILD, &val); + if (error) + return error; + info->build = val; + + error = qt602240_read_reg(client, QT602240_OBJECT_NUM, &val); + if (error) + return error; + info->object_num = val; + + return 0; +} + +static int qt602240_get_object_table(struct qt602240_data *data) +{ + int error; + int i; + u16 reg; + u8 reportid = 0; + u8 buf[QT602240_OBJECT_SIZE]; + + for (i = 0; i < data->info.object_num; i++) { + struct qt602240_object *object = data->object_table + i; + + reg = QT602240_OBJECT_START + QT602240_OBJECT_SIZE * i; + error = qt602240_read_object_table(data->client, reg, buf); + if (error) + return error; + + object->type = buf[0]; + object->start_address = (buf[2] << 8) | buf[1]; + object->size = buf[3]; + object->instances = buf[4]; + object->num_report_ids = buf[5]; + + if (object->num_report_ids) { + reportid += object->num_report_ids * + (object->instances + 1); + object->max_reportid = reportid; + } + } + + return 0; +} + +static int qt602240_initialize(struct qt602240_data *data) +{ + struct i2c_client *client = data->client; + struct qt602240_info *info = &data->info; + int error; + u8 val; + + error = qt602240_get_info(data); + if (error) + return error; + + data->object_table = kcalloc(info->object_num, + sizeof(struct qt602240_data), + GFP_KERNEL); + if (!data->object_table) { + dev_err(&client->dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + /* Get object table information */ + error = qt602240_get_object_table(data); + if (error) + return error; + + /* Check register init values */ + error = qt602240_check_reg_init(data); + if (error) + return error; + + /* Check X/Y matrix size */ + error = qt602240_check_matrix_size(data); + if (error) + return error; + + error = qt602240_make_highchg(data); + if (error) + return error; + + qt602240_handle_pdata(data); + + /* Backup to memory */ + qt602240_write_object(data, QT602240_GEN_COMMAND, + QT602240_COMMAND_BACKUPNV, + QT602240_BACKUP_VALUE); + msleep(QT602240_BACKUP_TIME); + + /* Soft reset */ + qt602240_write_object(data, QT602240_GEN_COMMAND, + QT602240_COMMAND_RESET, 1); + msleep(QT602240_RESET_TIME); + + /* Update matrix size at info struct */ + error = qt602240_read_reg(client, QT602240_MATRIX_X_SIZE, &val); + if (error) + return error; + info->matrix_xsize = val; + + error = qt602240_read_reg(client, QT602240_MATRIX_Y_SIZE, &val); + if (error) + return error; + info->matrix_ysize = val; + + dev_info(&client->dev, + "Family ID: %d Variant ID: %d Version: %d Build: %d\n", + info->family_id, info->variant_id, info->version, + info->build); + + dev_info(&client->dev, + "Matrix X Size: %d Matrix Y Size: %d Object Num: %d\n", + info->matrix_xsize, info->matrix_ysize, + info->object_num); + + return 0; +} + +static ssize_t qt602240_object_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qt602240_data *data = dev_get_drvdata(dev); + struct qt602240_object *object; + int count = 0; + int i, j; + int error; + u8 val; + + for (i = 0; i < data->info.object_num; i++) { + object = data->object_table + i; + + count += sprintf(buf + count, + "Object Table Element %d(Type %d)\n", + i + 1, object->type); + + if (!qt602240_object_readable(object->type)) { + count += sprintf(buf + count, "\n"); + continue; + } + + for (j = 0; j < object->size + 1; j++) { + error = qt602240_read_object(data, + object->type, j, &val); + if (error) + return error; + + count += sprintf(buf + count, + " Byte %d: 0x%x (%d)\n", j, val, val); + } + + count += sprintf(buf + count, "\n"); + } + + return count; +} + +static int qt602240_load_fw(struct device *dev, const char *fn) +{ + struct qt602240_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + const struct firmware *fw = NULL; + unsigned int frame_size; + unsigned int pos = 0; + int ret; + + ret = request_firmware(&fw, fn, dev); + if (ret) { + dev_err(dev, "Unable to open firmware %s\n", fn); + return ret; + } + + /* Change to the bootloader mode */ + qt602240_write_object(data, QT602240_GEN_COMMAND, + QT602240_COMMAND_RESET, QT602240_BOOT_VALUE); + msleep(QT602240_RESET_TIME); + + /* Change to slave address of bootloader */ + if (client->addr == QT602240_APP_LOW) + client->addr = QT602240_BOOT_LOW; + else + client->addr = QT602240_BOOT_HIGH; + + ret = qt602240_check_bootloader(client, QT602240_WAITING_BOOTLOAD_CMD); + if (ret) + goto out; + + /* Unlock bootloader */ + qt602240_unlock_bootloader(client); + + while (pos < fw->size) { + ret = qt602240_check_bootloader(client, + QT602240_WAITING_FRAME_DATA); + if (ret) + goto out; + + frame_size = ((*(fw->data + pos) << 8) | *(fw->data + pos + 1)); + + /* We should add 2 at frame size as the the firmware data is not + * included the CRC bytes. + */ + frame_size += 2; + + /* Write one frame to device */ + qt602240_fw_write(client, fw->data + pos, frame_size); + + ret = qt602240_check_bootloader(client, + QT602240_FRAME_CRC_PASS); + if (ret) + goto out; + + pos += frame_size; + + dev_dbg(dev, "Updated %d bytes / %zd bytes\n", pos, fw->size); + } + +out: + release_firmware(fw); + + /* Change to slave address of application */ + if (client->addr == QT602240_BOOT_LOW) + client->addr = QT602240_APP_LOW; + else + client->addr = QT602240_APP_HIGH; + + return ret; +} + +static ssize_t qt602240_update_fw_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qt602240_data *data = dev_get_drvdata(dev); + unsigned int version; + int error; + + if (sscanf(buf, "%u", &version) != 1) { + dev_err(dev, "Invalid values\n"); + return -EINVAL; + } + + if (data->info.version < QT602240_VER_21 || version < QT602240_VER_21) { + dev_err(dev, "FW update supported starting with version 21\n"); + return -EINVAL; + } + + disable_irq(data->irq); + + error = qt602240_load_fw(dev, QT602240_FW_NAME); + if (error) { + dev_err(dev, "The firmware update failed(%d)\n", error); + count = error; + } else { + dev_dbg(dev, "The firmware update succeeded\n"); + + /* Wait for reset */ + msleep(QT602240_FWRESET_TIME); + + kfree(data->object_table); + data->object_table = NULL; + + qt602240_initialize(data); + } + + enable_irq(data->irq); + + return count; +} + +static DEVICE_ATTR(object, 0444, qt602240_object_show, NULL); +static DEVICE_ATTR(update_fw, 0664, NULL, qt602240_update_fw_store); + +static struct attribute *qt602240_attrs[] = { + &dev_attr_object.attr, + &dev_attr_update_fw.attr, + NULL +}; + +static const struct attribute_group qt602240_attr_group = { + .attrs = qt602240_attrs, +}; + +static void qt602240_start(struct qt602240_data *data) +{ + /* Touch enable */ + qt602240_write_object(data, + QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0x83); +} + +static void qt602240_stop(struct qt602240_data *data) +{ + /* Touch disable */ + qt602240_write_object(data, + QT602240_TOUCH_MULTI, QT602240_TOUCH_CTRL, 0); +} + +static int qt602240_input_open(struct input_dev *dev) +{ + struct qt602240_data *data = input_get_drvdata(dev); + + qt602240_start(data); + + return 0; +} + +static void qt602240_input_close(struct input_dev *dev) +{ + struct qt602240_data *data = input_get_drvdata(dev); + + qt602240_stop(data); +} + +static int __devinit qt602240_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct qt602240_data *data; + struct input_dev *input_dev; + int error; + + if (!client->dev.platform_data) + return -EINVAL; + + data = kzalloc(sizeof(struct qt602240_data), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!data || !input_dev) { + dev_err(&client->dev, "Failed to allocate memory\n"); + error = -ENOMEM; + goto err_free_mem; + } + + input_dev->name = "AT42QT602240/ATMXT224 Touchscreen"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + input_dev->open = qt602240_input_open; + input_dev->close = qt602240_input_close; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(BTN_TOUCH, input_dev->keybit); + + /* For single touch */ + input_set_abs_params(input_dev, ABS_X, + 0, QT602240_MAX_XC, 0, 0); + input_set_abs_params(input_dev, ABS_Y, + 0, QT602240_MAX_YC, 0, 0); + + /* For multi touch */ + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, + 0, QT602240_MAX_AREA, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_X, + 0, QT602240_MAX_XC, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, + 0, QT602240_MAX_YC, 0, 0); + + input_set_drvdata(input_dev, data); + + data->client = client; + data->input_dev = input_dev; + data->pdata = client->dev.platform_data; + data->irq = client->irq; + + i2c_set_clientdata(client, data); + + error = qt602240_initialize(data); + if (error) + goto err_free_object; + + error = request_threaded_irq(client->irq, NULL, qt602240_interrupt, + IRQF_TRIGGER_FALLING, client->dev.driver->name, data); + if (error) { + dev_err(&client->dev, "Failed to register interrupt\n"); + goto err_free_object; + } + + error = input_register_device(input_dev); + if (error) + goto err_free_irq; + + error = sysfs_create_group(&client->dev.kobj, &qt602240_attr_group); + if (error) + goto err_unregister_device; + + return 0; + +err_unregister_device: + input_unregister_device(input_dev); + input_dev = NULL; +err_free_irq: + free_irq(client->irq, data); +err_free_object: + kfree(data->object_table); +err_free_mem: + input_free_device(input_dev); + kfree(data); + return error; +} + +static int __devexit qt602240_remove(struct i2c_client *client) +{ + struct qt602240_data *data = i2c_get_clientdata(client); + + sysfs_remove_group(&client->dev.kobj, &qt602240_attr_group); + free_irq(data->irq, data); + input_unregister_device(data->input_dev); + kfree(data->object_table); + kfree(data); + + return 0; +} + +#ifdef CONFIG_PM +static int qt602240_suspend(struct i2c_client *client, pm_message_t mesg) +{ + struct qt602240_data *data = i2c_get_clientdata(client); + struct input_dev *input_dev = data->input_dev; + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + qt602240_stop(data); + + mutex_unlock(&input_dev->mutex); + + return 0; +} + +static int qt602240_resume(struct i2c_client *client) +{ + struct qt602240_data *data = i2c_get_clientdata(client); + struct input_dev *input_dev = data->input_dev; + + /* Soft reset */ + qt602240_write_object(data, QT602240_GEN_COMMAND, + QT602240_COMMAND_RESET, 1); + + msleep(QT602240_RESET_TIME); + + mutex_lock(&input_dev->mutex); + + if (input_dev->users) + qt602240_start(data); + + mutex_unlock(&input_dev->mutex); + + return 0; +} +#else +#define qt602240_suspend NULL +#define qt602240_resume NULL +#endif + +static const struct i2c_device_id qt602240_id[] = { + { "qt602240_ts", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, qt602240_id); + +static struct i2c_driver qt602240_driver = { + .driver = { + .name = "qt602240_ts", + .owner = THIS_MODULE, + }, + .probe = qt602240_probe, + .remove = __devexit_p(qt602240_remove), + .suspend = qt602240_suspend, + .resume = qt602240_resume, + .id_table = qt602240_id, +}; + +static int __init qt602240_init(void) +{ + return i2c_add_driver(&qt602240_driver); +} + +static void __exit qt602240_exit(void) +{ + i2c_del_driver(&qt602240_driver); +} + +module_init(qt602240_init); +module_exit(qt602240_exit); + +/* Module information */ +MODULE_AUTHOR("Joonyoung Shim "); +MODULE_DESCRIPTION("AT42QT602240/ATMXT224 Touchscreen driver"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/i2c/qt602240_ts.h b/include/linux/i2c/qt602240_ts.h new file mode 100644 index 000000000000..c5033e101094 --- /dev/null +++ b/include/linux/i2c/qt602240_ts.h @@ -0,0 +1,38 @@ +/* + * AT42QT602240/ATMXT224 Touchscreen driver + * + * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Author: Joonyoung Shim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __LINUX_QT602240_TS_H +#define __LINUX_QT602240_TS_H + +/* Orient */ +#define QT602240_NORMAL 0x0 +#define QT602240_DIAGONAL 0x1 +#define QT602240_HORIZONTAL_FLIP 0x2 +#define QT602240_ROTATED_90_COUNTER 0x3 +#define QT602240_VERTICAL_FLIP 0x4 +#define QT602240_ROTATED_90 0x5 +#define QT602240_ROTATED_180 0x6 +#define QT602240_DIAGONAL_COUNTER 0x7 + +/* The platform data for the AT42QT602240/ATMXT224 touchscreen driver */ +struct qt602240_platform_data { + unsigned int x_line; + unsigned int y_line; + unsigned int x_size; + unsigned int y_size; + unsigned int blen; + unsigned int threshold; + unsigned int voltage; + unsigned char orient; +}; + +#endif /* __LINUX_QT602240_TS_H */ -- cgit v1.2.3 From 13ceef099edd2b70c5a6f3a9ef5d6d97cda2e096 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 14 Jul 2010 07:56:33 +0200 Subject: jbd2/ocfs2: Fix block checksumming when a buffer is used in several transactions OCFS2 uses t_commit trigger to compute and store checksum of the just committed blocks. When a buffer has b_frozen_data, checksum is computed for it instead of b_data but this can result in an old checksum being written to the filesystem in the following scenario: 1) transaction1 is opened 2) handle1 is opened 3) journal_access(handle1, bh) - This sets jh->b_transaction to transaction1 4) modify(bh) 5) journal_dirty(handle1, bh) 6) handle1 is closed 7) start committing transaction1, opening transaction2 8) handle2 is opened 9) journal_access(handle2, bh) - This copies off b_frozen_data to make it safe for transaction1 to commit. jh->b_next_transaction is set to transaction2. 10) jbd2_journal_write_metadata() checksums b_frozen_data 11) the journal correctly writes b_frozen_data to the disk journal 12) handle2 is closed - There was no dirty call for the bh on handle2, so it is never queued for any more journal operation 13) Checkpointing finally happens, and it just spools the bh via normal buffer writeback. This will write b_data, which was never triggered on and thus contains a wrong (old) checksum. This patch fixes the problem by calling the trigger at the moment data is frozen for journal commit - i.e., either when b_frozen_data is created by do_get_write_access or just before we write a buffer to the log if b_frozen_data does not exist. We also rename the trigger to t_frozen as that better describes when it is called. Signed-off-by: Jan Kara Signed-off-by: Mark Fasheh Signed-off-by: Joel Becker --- fs/jbd2/journal.c | 15 +++++++-------- fs/jbd2/transaction.c | 9 ++++++--- fs/ocfs2/journal.c | 24 ++++++++++++------------ include/linux/jbd2.h | 11 ++++++----- 4 files changed, 31 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index bc2ff5932769..036880895bfc 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); - struct jbd2_buffer_trigger_type *triggers; journal_t *journal = transaction->t_journal; /* @@ -328,21 +327,21 @@ repeat: done_copy_out = 1; new_page = virt_to_page(jh_in->b_frozen_data); new_offset = offset_in_page(jh_in->b_frozen_data); - triggers = jh_in->b_frozen_triggers; } else { new_page = jh2bh(jh_in)->b_page; new_offset = offset_in_page(jh2bh(jh_in)->b_data); - triggers = jh_in->b_triggers; } mapped_data = kmap_atomic(new_page, KM_USER0); /* - * Fire any commit trigger. Do this before checking for escaping, - * as the trigger may modify the magic offset. If a copy-out - * happens afterwards, it will have the correct data in the buffer. + * Fire data frozen trigger if data already wasn't frozen. Do this + * before checking for escaping, as the trigger may modify the magic + * offset. If a copy-out happens afterwards, it will have the correct + * data in the buffer. */ - jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset, - triggers); + if (!done_copy_out) + jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset, + jh_in->b_triggers); /* * Check for escaping diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e214d68620ac..b8e0806681bb 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -725,6 +725,9 @@ done: page = jh2bh(jh)->b_page; offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; source = kmap_atomic(page, KM_USER0); + /* Fire data frozen trigger just before we copy the data */ + jbd2_buffer_frozen_trigger(jh, source + offset, + jh->b_triggers); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); kunmap_atomic(source, KM_USER0); @@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh, jh->b_triggers = type; } -void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data, +void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers) { struct buffer_head *bh = jh2bh(jh); - if (!triggers || !triggers->t_commit) + if (!triggers || !triggers->t_frozen) return; - triggers->t_commit(triggers, bh, mapped_data, bh->b_size); + triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); } void jbd2_buffer_abort_trigger(struct journal_head *jh, diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 39113b5e79e7..625de9d7088c 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger return container_of(triggers, struct ocfs2_triggers, ot_triggers); } -static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, +static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, struct buffer_head *bh, void *data, size_t size) { @@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, * Quota blocks have their own trigger because the struct ocfs2_block_check * offset depends on the blocksize. */ -static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, +static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, struct buffer_head *bh, void *data, size_t size) { @@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, * Directory blocks also have their own trigger because the * struct ocfs2_block_check offset depends on the blocksize. */ -static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, +static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, struct buffer_head *bh, void *data, size_t size) { @@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, static struct ocfs2_triggers di_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_dinode, i_check), @@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = { static struct ocfs2_triggers eb_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_extent_block, h_check), @@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = { static struct ocfs2_triggers rb_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), @@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = { static struct ocfs2_triggers gd_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), @@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = { static struct ocfs2_triggers db_triggers = { .ot_triggers = { - .t_commit = ocfs2_db_commit_trigger, + .t_frozen = ocfs2_db_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, }; static struct ocfs2_triggers xb_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), @@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = { static struct ocfs2_triggers dq_triggers = { .ot_triggers = { - .t_commit = ocfs2_dq_commit_trigger, + .t_frozen = ocfs2_dq_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, }; static struct ocfs2_triggers dr_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), @@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = { static struct ocfs2_triggers dl_triggers = { .ot_triggers = { - .t_commit = ocfs2_commit_trigger, + .t_frozen = ocfs2_frozen_trigger, .t_abort = ocfs2_abort_trigger, }, .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a4d2e9f7088a..adf832dec3f3 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1026,11 +1026,12 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); struct jbd2_buffer_trigger_type { /* - * Fired just before a buffer is written to the journal. - * mapped_data is a mapped buffer that is the frozen data for - * commit. + * Fired a the moment data to write to the journal are known to be + * stable - so either at the moment b_frozen_data is created or just + * before a buffer is written to the journal. mapped_data is a mapped + * buffer that is the frozen data for commit. */ - void (*t_commit)(struct jbd2_buffer_trigger_type *type, + void (*t_frozen)(struct jbd2_buffer_trigger_type *type, struct buffer_head *bh, void *mapped_data, size_t size); @@ -1042,7 +1043,7 @@ struct jbd2_buffer_trigger_type { struct buffer_head *bh); }; -extern void jbd2_buffer_commit_trigger(struct journal_head *jh, +extern void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, struct jbd2_buffer_trigger_type *triggers); extern void jbd2_buffer_abort_trigger(struct journal_head *jh, -- cgit v1.2.3 From 40d007e7df1dab17bf1ecf91e718218354d963d7 Mon Sep 17 00:00:00 2001 From: Henrik Rydberg Date: Thu, 15 Jul 2010 23:10:10 -0700 Subject: Input: introduce MT event slots With the rapidly increasing number of intelligent multi-contact and multi-user devices, the need to send digested, filtered information from a set of different sources within the same device is imminent. This patch adds the concept of slots to the MT protocol. The slots enumerate a set of identified sources, such that all MT events can be passed independently and selectively per identified source. The protocol works like this: Instead of sending a SYN_MT_REPORT event immediately after the contact data, one sends an ABS_MT_SLOT event immediately before the contact data. The input core will only emit events for slots with modified MT events. It is assumed that the same slot is used for the duration of an initiated contact. Acked-by: Ping Cheng Acked-by: Chase Douglas Acked-by: Rafi Rubin Signed-off-by: Henrik Rydberg Signed-off-by: Dmitry Torokhov --- drivers/input/evdev.c | 4 ++ drivers/input/input.c | 135 ++++++++++++++++++++++++++++++++++---------------- include/linux/input.h | 33 ++++++++++++ 3 files changed, 129 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index cd323254ca6f..fc5afbd78625 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -686,6 +686,10 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, sizeof(struct input_absinfo)))) return -EFAULT; + /* We can't change number of reserved MT slots */ + if (t == ABS_MT_SLOT) + return -EINVAL; + /* * Take event lock to ensure that we are not * changing device parameters in the middle diff --git a/drivers/input/input.c b/drivers/input/input.c index a3d5485154e7..54109c33e36c 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -33,25 +33,6 @@ MODULE_LICENSE("GPL"); #define INPUT_DEVICES 256 -/* - * EV_ABS events which should not be cached are listed here. - */ -static unsigned int input_abs_bypass_init_data[] __initdata = { - ABS_MT_TOUCH_MAJOR, - ABS_MT_TOUCH_MINOR, - ABS_MT_WIDTH_MAJOR, - ABS_MT_WIDTH_MINOR, - ABS_MT_ORIENTATION, - ABS_MT_POSITION_X, - ABS_MT_POSITION_Y, - ABS_MT_TOOL_TYPE, - ABS_MT_BLOB_ID, - ABS_MT_TRACKING_ID, - ABS_MT_PRESSURE, - 0 -}; -static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; - static LIST_HEAD(input_dev_list); static LIST_HEAD(input_handler_list); @@ -181,6 +162,56 @@ static void input_stop_autorepeat(struct input_dev *dev) #define INPUT_PASS_TO_DEVICE 2 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) +static int input_handle_abs_event(struct input_dev *dev, + unsigned int code, int *pval) +{ + bool is_mt_event; + int *pold; + + if (code == ABS_MT_SLOT) { + /* + * "Stage" the event; we'll flush it later, when we + * get actiual touch data. + */ + if (*pval >= 0 && *pval < dev->mtsize) + dev->slot = *pval; + + return INPUT_IGNORE_EVENT; + } + + is_mt_event = code >= ABS_MT_FIRST && code <= ABS_MT_LAST; + + if (!is_mt_event) { + pold = &dev->abs[code]; + } else if (dev->mt) { + struct input_mt_slot *mtslot = &dev->mt[dev->slot]; + pold = &mtslot->abs[code - ABS_MT_FIRST]; + } else { + /* + * Bypass filtering for multitouch events when + * not employing slots. + */ + pold = NULL; + } + + if (pold) { + *pval = input_defuzz_abs_event(*pval, *pold, + dev->absfuzz[code]); + if (*pold == *pval) + return INPUT_IGNORE_EVENT; + + *pold = *pval; + } + + /* Flush pending "slot" event */ + if (is_mt_event && dev->slot != dev->abs[ABS_MT_SLOT]) { + dev->abs[ABS_MT_SLOT] = dev->slot; + input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); + } + + return INPUT_PASS_TO_HANDLERS; +} + static void input_handle_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { @@ -233,21 +264,9 @@ static void input_handle_event(struct input_dev *dev, break; case EV_ABS: - if (is_event_supported(code, dev->absbit, ABS_MAX)) { - - if (test_bit(code, input_abs_bypass)) { - disposition = INPUT_PASS_TO_HANDLERS; - break; - } + if (is_event_supported(code, dev->absbit, ABS_MAX)) + disposition = input_handle_abs_event(dev, code, &value); - value = input_defuzz_abs_event(value, - dev->abs[code], dev->absfuzz[code]); - - if (dev->abs[code] != value) { - dev->abs[code] = value; - disposition = INPUT_PASS_TO_HANDLERS; - } - } break; case EV_REL: @@ -1288,6 +1307,7 @@ static void input_dev_release(struct device *device) struct input_dev *dev = to_input_dev(device); input_ff_destroy(dev); + input_mt_destroy_slots(dev); kfree(dev); module_put(THIS_MODULE); @@ -1536,6 +1556,45 @@ void input_free_device(struct input_dev *dev) } EXPORT_SYMBOL(input_free_device); +/** + * input_mt_create_slots() - create MT input slots + * @dev: input device supporting MT events and finger tracking + * @num_slots: number of slots used by the device + * + * This function allocates all necessary memory for MT slot handling + * in the input device, and adds ABS_MT_SLOT to the device capabilities. + */ +int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) +{ + if (!num_slots) + return 0; + + dev->mt = kcalloc(num_slots, sizeof(struct input_mt_slot), GFP_KERNEL); + if (!dev->mt) + return -ENOMEM; + + dev->mtsize = num_slots; + input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); + + return 0; +} +EXPORT_SYMBOL(input_mt_create_slots); + +/** + * input_mt_destroy_slots() - frees the MT slots of the input device + * @dev: input device with allocated MT slots + * + * This function is only needed in error path as the input core will + * automatically free the MT slots when the device is destroyed. + */ +void input_mt_destroy_slots(struct input_dev *dev) +{ + kfree(dev->mt); + dev->mt = NULL; + dev->mtsize = 0; +} +EXPORT_SYMBOL(input_mt_destroy_slots); + /** * input_set_capability - mark device as capable of a certain event * @dev: device that is capable of emitting or accepting event @@ -1945,20 +2004,10 @@ static const struct file_operations input_fops = { .open = input_open_file, }; -static void __init input_init_abs_bypass(void) -{ - const unsigned int *p; - - for (p = input_abs_bypass_init_data; *p; p++) - input_abs_bypass[BIT_WORD(*p)] |= BIT_MASK(*p); -} - static int __init input_init(void) { int err; - input_init_abs_bypass(); - err = class_register(&input_class); if (err) { printk(KERN_ERR "input: unable to register input_dev class\n"); diff --git a/include/linux/input.h b/include/linux/input.h index cc524c8b6703..a14de64ed16a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -691,9 +691,12 @@ struct input_absinfo { #define ABS_TILT_X 0x1a #define ABS_TILT_Y 0x1b #define ABS_TOOL_WIDTH 0x1c + #define ABS_VOLUME 0x20 + #define ABS_MISC 0x28 +#define ABS_MT_SLOT 0x2f /* MT slot being modified */ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ #define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ @@ -706,6 +709,12 @@ struct input_absinfo { #define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ #define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */ +#ifdef __KERNEL__ +/* Implementation details, userspace should not care about these */ +#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR +#define ABS_MT_LAST ABS_MT_PRESSURE +#endif + #define ABS_MAX 0x3f #define ABS_CNT (ABS_MAX+1) @@ -1047,6 +1056,14 @@ struct ff_effect { #include #include +/** + * struct input_mt_slot - represents the state of an input MT slot + * @abs: holds current values of ABS_MT axes for this slot + */ +struct input_mt_slot { + int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; +}; + /** * struct input_dev - represents an input device * @name: name of the device @@ -1085,6 +1102,10 @@ struct ff_effect { * @sync: set to 1 when there were no new events since last EV_SYNC * @abs: current values for reports from absolute axes * @rep: current values for autorepeat parameters (delay, rate) + * @mt: pointer to array of struct input_mt_slot holding current values + * of tracked contacts + * @mtsize: number of MT slots the device uses + * @slot: MT slot currently being transmitted * @key: reflects current state of device's keys/buttons * @led: reflects current state of device's LEDs * @snd: reflects current state of sound effects @@ -1164,6 +1185,10 @@ struct input_dev { int abs[ABS_CNT]; int rep[REP_MAX + 1]; + struct input_mt_slot *mt; + int mtsize; + int slot; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; unsigned long led[BITS_TO_LONGS(LED_CNT)]; unsigned long snd[BITS_TO_LONGS(SND_CNT)]; @@ -1412,6 +1437,11 @@ static inline void input_mt_sync(struct input_dev *dev) input_event(dev, EV_SYN, SYN_MT_REPORT, 0); } +static inline void input_mt_slot(struct input_dev *dev, int slot) +{ + input_event(dev, EV_ABS, ABS_MT_SLOT, slot); +} + void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); /** @@ -1506,5 +1536,8 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); +int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots); +void input_mt_destroy_slots(struct input_dev *dev); + #endif #endif -- cgit v1.2.3 From 20da92de8ec3c1d4ba7e5aca322d38b6ce634932 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 15 Jul 2010 23:27:36 -0700 Subject: Input: change input handlers to use bool when possible Signed-off-by: Dmitry Torokhov --- drivers/input/evdev.c | 6 +++--- drivers/input/input.c | 6 +++--- drivers/input/joydev.c | 7 +++---- drivers/input/mousedev.c | 6 +++--- include/linux/input.h | 6 +++--- 5 files changed, 15 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index fc5afbd78625..70c0eb52ca96 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -24,7 +24,6 @@ #include "input-compat.h" struct evdev { - int exist; int open; int minor; struct input_handle handle; @@ -34,6 +33,7 @@ struct evdev { spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; + bool exist; }; struct evdev_client { @@ -793,7 +793,7 @@ static void evdev_remove_chrdev(struct evdev *evdev) static void evdev_mark_dead(struct evdev *evdev) { mutex_lock(&evdev->mutex); - evdev->exist = 0; + evdev->exist = false; mutex_unlock(&evdev->mutex); } @@ -842,7 +842,7 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, init_waitqueue_head(&evdev->wait); dev_set_name(&evdev->dev, "event%d", minor); - evdev->exist = 1; + evdev->exist = true; evdev->minor = minor; evdev->handle.dev = input_get_device(dev); diff --git a/drivers/input/input.c b/drivers/input/input.c index 54109c33e36c..e1243b4b32a5 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -227,12 +227,12 @@ static void input_handle_event(struct input_dev *dev, case SYN_REPORT: if (!dev->sync) { - dev->sync = 1; + dev->sync = true; disposition = INPUT_PASS_TO_HANDLERS; } break; case SYN_MT_REPORT: - dev->sync = 0; + dev->sync = false; disposition = INPUT_PASS_TO_HANDLERS; break; } @@ -317,7 +317,7 @@ static void input_handle_event(struct input_dev *dev, } if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) - dev->sync = 0; + dev->sync = false; if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 34157bb97ed6..63834585c283 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -37,7 +37,6 @@ MODULE_LICENSE("GPL"); #define JOYDEV_BUFFER_SIZE 64 struct joydev { - int exist; int open; int minor; struct input_handle handle; @@ -46,6 +45,7 @@ struct joydev { spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; + bool exist; struct js_corr corr[ABS_CNT]; struct JS_DATA_SAVE_TYPE glue; @@ -760,7 +760,7 @@ static void joydev_remove_chrdev(struct joydev *joydev) static void joydev_mark_dead(struct joydev *joydev) { mutex_lock(&joydev->mutex); - joydev->exist = 0; + joydev->exist = false; mutex_unlock(&joydev->mutex); } @@ -817,10 +817,9 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, init_waitqueue_head(&joydev->wait); dev_set_name(&joydev->dev, "js%d", minor); - joydev->exist = 1; + joydev->exist = true; joydev->minor = minor; - joydev->exist = 1; joydev->handle.dev = input_get_device(dev); joydev->handle.name = dev_name(&joydev->dev); joydev->handle.handler = handler; diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index f34b22bce4ff..d7a7a2fce745 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -57,7 +57,6 @@ struct mousedev_hw_data { }; struct mousedev { - int exist; int open; int minor; struct input_handle handle; @@ -66,6 +65,7 @@ struct mousedev { spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; + bool exist; struct list_head mixdev_node; int mixdev_open; @@ -802,7 +802,7 @@ static void mousedev_remove_chrdev(struct mousedev *mousedev) static void mousedev_mark_dead(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); - mousedev->exist = 0; + mousedev->exist = false; mutex_unlock(&mousedev->mutex); } @@ -862,7 +862,7 @@ static struct mousedev *mousedev_create(struct input_dev *dev, dev_set_name(&mousedev->dev, "mouse%d", minor); mousedev->minor = minor; - mousedev->exist = 1; + mousedev->exist = true; mousedev->handle.dev = input_get_device(dev); mousedev->handle.name = dev_name(&mousedev->dev); mousedev->handle.handler = handler; diff --git a/include/linux/input.h b/include/linux/input.h index a14de64ed16a..339d043ccb53 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1099,7 +1099,6 @@ struct input_mt_slot { * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat - * @sync: set to 1 when there were no new events since last EV_SYNC * @abs: current values for reports from absolute axes * @rep: current values for autorepeat parameters (delay, rate) * @mt: pointer to array of struct input_mt_slot holding current values @@ -1144,6 +1143,7 @@ struct input_mt_slot { * last user closes the device * @going_away: marks devices that are in a middle of unregistering and * causes input_open_device*() fail with -ENODEV. + * @sync: set to %true when there were no new events since last EV_SYN * @dev: driver model's view of this device * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held @@ -1180,8 +1180,6 @@ struct input_dev { unsigned int repeat_key; struct timer_list timer; - int sync; - int abs[ABS_CNT]; int rep[REP_MAX + 1]; @@ -1213,6 +1211,8 @@ struct input_dev { unsigned int users; bool going_away; + bool sync; + struct device dev; struct list_head h_list; -- cgit v1.2.3 From 58c84eda07560a6b75b03e8d3b26d6eddfc14011 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 15 Jul 2010 09:41:42 -0600 Subject: PCI: fall back to original BIOS BAR addresses If we fail to assign resources to a PCI BAR, this patch makes us try the original address from BIOS rather than leaving it disabled. Linux tries to make sure all PCI device BARs are inside the upstream PCI host bridge or P2P bridge apertures, reassigning BARs if necessary. Windows does similar reassignment. Before this patch, if we could not move a BAR into an aperture, we left the resource unassigned, i.e., at address zero. Windows leaves such BARs at the original BIOS addresses, and this patch makes Linux do the same. This is a bit ugly because we disable the resource long before we try to reassign it, so we have to keep track of the BIOS BAR address somewhere. For lack of a better place, I put it in the struct pci_dev. I think it would be cleaner to attempt the assignment immediately when the claim fails, so we could easily remember the original address. But we currently claim motherboard resources in the middle, after attempting to claim PCI resources and before assigning new PCI resources, and changing that is a fairly big job. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=16263 Reported-by: Andrew Tested-by: Andrew Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/i386.c | 1 + drivers/pci/setup-res.c | 32 ++++++++++++++++++++++++++++++++ include/linux/pci.h | 1 + 3 files changed, 34 insertions(+) (limited to 'include') diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 6fdb3ec30c31..55253095be84 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -184,6 +184,7 @@ static void __init pcibios_allocate_resources(int pass) idx, r, disabled, pass); if (pci_claim_resource(dev, idx) < 0) { /* We'll assign a new address later */ + dev->fw_addr[idx] = r->start; r->end -= r->start; r->start = 0; } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 92379e2d37e7..2aaa13150de3 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -156,6 +156,38 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, pcibios_align_resource, dev); } + if (ret < 0 && dev->fw_addr[resno]) { + struct resource *root, *conflict; + resource_size_t start, end; + + /* + * If we failed to assign anything, let's try the address + * where firmware left it. That at least has a chance of + * working, which is better than just leaving it disabled. + */ + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + else + root = &iomem_resource; + + start = res->start; + end = res->end; + res->start = dev->fw_addr[resno]; + res->end = res->start + size - 1; + dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", + resno, res); + conflict = request_resource_conflict(root, res); + if (conflict) { + dev_info(&dev->dev, + "BAR %d: %pR conflicts with %s %pR\n", resno, + res, conflict->name, conflict); + res->start = start; + res->end = end; + } else + ret = 0; + } + if (!ret) { res->flags &= ~IORESOURCE_STARTALIGN; dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); diff --git a/include/linux/pci.h b/include/linux/pci.h index 7cb00845f150..f26fda76b87f 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -288,6 +288,7 @@ struct pci_dev { */ unsigned int irq; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */ /* These fields are used by common fixups */ unsigned int transparent:1; /* Transparent PCI bridge */ -- cgit v1.2.3 From 2f495c398edca50ac251c134f1995a2fb3c06cb7 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 21 Jun 2010 13:20:46 +1000 Subject: net/phy/marvell: Expose IDs and flags in a .h and add dns323 LEDs setup flag This moves the various known Marvell PHY IDs to include/linux/marvell_phy.h along with dev_flags definitions for use by the driver. I then added a flag that changes the PHY init code to setup the LEDs config to the values needed to operate a dns323 rev C1 NAS. I moved the existing "resistance" flag to the .h as well, though I've been unable to find whoever sets this to convert it to use that constant. Signed-off-by: Benjamin Herrenschmidt Reviewed-by: Wolfram Sang Acked-by: David S. Miller Signed-off-by: Nicolas Pitre --- drivers/net/phy/marvell.c | 38 ++++++++++++++++++++------------------ include/linux/marvell_phy.h | 20 ++++++++++++++++++++ 2 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 include/linux/marvell_phy.h (limited to 'include') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 78b74e83ce5d..5a1bd5db2a93 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -48,8 +49,6 @@ #define MII_M1145_RGMII_RX_DELAY 0x0080 #define MII_M1145_RGMII_TX_DELAY 0x0002 -#define M1145_DEV_FLAGS_RESISTANCE 0x00000001 - #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c @@ -350,7 +349,10 @@ static int m88e1118_config_init(struct phy_device *phydev) return err; /* Adjust LED Control */ - err = phy_write(phydev, 0x10, 0x021e); + if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS) + err = phy_write(phydev, 0x10, 0x1100); + else + err = phy_write(phydev, 0x10, 0x021e); if (err < 0) return err; @@ -398,7 +400,7 @@ static int m88e1145_config_init(struct phy_device *phydev) if (err < 0) return err; - if (phydev->dev_flags & M1145_DEV_FLAGS_RESISTANCE) { + if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) { err = phy_write(phydev, 0x1d, 0x0012); if (err < 0) return err; @@ -529,8 +531,8 @@ static int m88e1121_did_interrupt(struct phy_device *phydev) static struct phy_driver marvell_drivers[] = { { - .phy_id = 0x01410c60, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1101, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -541,8 +543,8 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = 0x01410c90, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1112, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1112", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -554,8 +556,8 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = 0x01410cc0, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1111, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1111", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -567,8 +569,8 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = 0x01410e10, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1118, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1118", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -580,8 +582,8 @@ static struct phy_driver marvell_drivers[] = { .driver = {.owner = THIS_MODULE,}, }, { - .phy_id = 0x01410cb0, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1121R, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -593,8 +595,8 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = 0x01410cd0, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1145, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1145", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, @@ -606,8 +608,8 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = 0x01410e30, - .phy_id_mask = 0xfffffff0, + .phy_id = MARVELL_PHY_ID_88E1240, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h new file mode 100644 index 000000000000..2ed4fb8bbd51 --- /dev/null +++ b/include/linux/marvell_phy.h @@ -0,0 +1,20 @@ +#ifndef _MARVELL_PHY_H +#define _MARVELL_PHY_H + +/* Mask used for ID comparisons */ +#define MARVELL_PHY_ID_MASK 0xfffffff0 + +/* Known PHY IDs */ +#define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E1112 0x01410c90 +#define MARVELL_PHY_ID_88E1111 0x01410cc0 +#define MARVELL_PHY_ID_88E1118 0x01410e10 +#define MARVELL_PHY_ID_88E1121R 0x01410cb0 +#define MARVELL_PHY_ID_88E1145 0x01410cd0 +#define MARVELL_PHY_ID_88E1240 0x01410e30 + +/* struct phy_device dev_flags definitions */ +#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 +#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 + +#endif /* _MARVELL_PHY_H */ -- cgit v1.2.3 From 2430d12c94ff2bafcfe4f65edf7ee5f300d2d9c6 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Sun, 13 Jun 2010 00:36:52 +0200 Subject: PM: describe kernel policy regarding wakeup defaults (v. 2) This patch (as1381b) updates a comment describing the kernel's policy toward enabling wakeup by default. It also makes device_set_wakeup_capable() actually do something when CONFIG_PM isn't enabled. It's not clear this is necessary; however if it isn't then device_init_wakeup() and device_can_wakeup() should also be do-nothing routines. Furthermore, I don't expect this change to have any noticeable effect -- but if it does then clearly the old behavior was wrong. Signed-off-by: Alan Stern Signed-off-by: Rafael J. Wysocki --- include/linux/pm_wakeup.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 22d64c18056c..76aca48722ae 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -29,8 +29,11 @@ #ifdef CONFIG_PM -/* changes to device_may_wakeup take effect on the next pm state change. - * by default, devices should wakeup if they can. +/* Changes to device_may_wakeup take effect on the next pm state change. + * + * By default, most devices should leave wakeup disabled. The exceptions + * are devices that everyone expects to be wakeup sources: keyboards, + * power buttons, possibly network interfaces, etc. */ static inline void device_init_wakeup(struct device *dev, bool val) { @@ -59,7 +62,7 @@ static inline bool device_may_wakeup(struct device *dev) #else /* !CONFIG_PM */ -/* For some reason the next two routines work even without CONFIG_PM */ +/* For some reason the following routines work even without CONFIG_PM */ static inline void device_init_wakeup(struct device *dev, bool val) { dev->power.can_wakeup = val; @@ -67,6 +70,7 @@ static inline void device_init_wakeup(struct device *dev, bool val) static inline void device_set_wakeup_capable(struct device *dev, bool capable) { + dev->power.can_wakeup = capable; } static inline bool device_can_wakeup(struct device *dev) -- cgit v1.2.3 From b14e033e17d0ea0ba12668d0d2f371cd31586994 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 29 Jun 2010 22:49:24 +0200 Subject: PNPACPI: Add support for remote wakeup This patch (as1354) adds remote-wakeup support to the pnpacpi driver. The new can_wakeup method also allows other PNP protocol drivers (pnpbios or iaspnp) to add wakeup support, but I don't know enough about how they work to actually do it. Signed-off-by: Alan Stern Reviewed-by: Bjorn Helgaas Signed-off-by: Rafael J. Wysocki --- drivers/pnp/core.c | 3 +++ drivers/pnp/pnpacpi/core.c | 23 +++++++++++++++++++++++ include/linux/pnp.h | 1 + 3 files changed, 27 insertions(+) (limited to 'include') diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c index 5dba90995d9e..88b3cde52596 100644 --- a/drivers/pnp/core.c +++ b/drivers/pnp/core.c @@ -164,6 +164,9 @@ int __pnp_add_device(struct pnp_dev *dev) list_add_tail(&dev->global_list, &pnp_global); list_add_tail(&dev->protocol_list, &dev->protocol->devices); spin_unlock(&pnp_lock); + if (dev->protocol->can_wakeup) + device_set_wakeup_capable(&dev->dev, + dev->protocol->can_wakeup(dev)); return device_register(&dev->dev); } diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index f7ff628b7d94..dc4e32e031e9 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -122,17 +122,37 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) } #ifdef CONFIG_ACPI_SLEEP +static bool pnpacpi_can_wakeup(struct pnp_dev *dev) +{ + struct acpi_device *acpi_dev = dev->data; + acpi_handle handle = acpi_dev->handle; + + return acpi_bus_can_wakeup(handle); +} + static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) { struct acpi_device *acpi_dev = dev->data; acpi_handle handle = acpi_dev->handle; int power_state; + if (device_can_wakeup(&dev->dev)) { + int rc = acpi_pm_device_sleep_wake(&dev->dev, + device_may_wakeup(&dev->dev)); + + if (rc) + return rc; + } power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); if (power_state < 0) power_state = (state.event == PM_EVENT_ON) ? ACPI_STATE_D0 : ACPI_STATE_D3; + /* acpi_bus_set_power() often fails (keyboard port can't be + * powered-down?), and in any case, our return value is ignored + * by pnp_bus_suspend(). Hence we don't revert the wakeup + * setting if the set_power fails. + */ return acpi_bus_set_power(handle, power_state); } @@ -141,6 +161,8 @@ static int pnpacpi_resume(struct pnp_dev *dev) struct acpi_device *acpi_dev = dev->data; acpi_handle handle = acpi_dev->handle; + if (device_may_wakeup(&dev->dev)) + acpi_pm_device_sleep_wake(&dev->dev, false); return acpi_bus_set_power(handle, ACPI_STATE_D0); } #endif @@ -151,6 +173,7 @@ struct pnp_protocol pnpacpi_protocol = { .set = pnpacpi_set_resources, .disable = pnpacpi_disable_resources, #ifdef CONFIG_ACPI_SLEEP + .can_wakeup = pnpacpi_can_wakeup, .suspend = pnpacpi_suspend, .resume = pnpacpi_resume, #endif diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 7c4193eb0072..1bc1338b817b 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -414,6 +414,7 @@ struct pnp_protocol { int (*disable) (struct pnp_dev *dev); /* protocol specific suspend/resume */ + bool (*can_wakeup) (struct pnp_dev *dev); int (*suspend) (struct pnp_dev * dev, pm_message_t state); int (*resume) (struct pnp_dev * dev); -- cgit v1.2.3 From c125e96f044427f38d106fab7bc5e4a5e6a18262 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 Jul 2010 22:43:53 +0200 Subject: PM: Make it possible to avoid races between wakeup and system sleep One of the arguments during the suspend blockers discussion was that the mainline kernel didn't contain any mechanisms making it possible to avoid races between wakeup and system suspend. Generally, there are two problems in that area. First, if a wakeup event occurs exactly when /sys/power/state is being written to, it may be delivered to user space right before the freezer kicks in, so the user space consumer of the event may not be able to process it before the system is suspended. Second, if a wakeup event occurs after user space has been frozen, it is not generally guaranteed that the ongoing transition of the system into a sleep state will be aborted. To address these issues introduce a new global sysfs attribute, /sys/power/wakeup_count, associated with a running counter of wakeup events and three helper functions, pm_stay_awake(), pm_relax(), and pm_wakeup_event(), that may be used by kernel subsystems to control the behavior of this attribute and to request the PM core to abort system transitions into a sleep state already in progress. The /sys/power/wakeup_count file may be read from or written to by user space. Reads will always succeed (unless interrupted by a signal) and return the current value of the wakeup events counter. Writes, however, will only succeed if the written number is equal to the current value of the wakeup events counter. If a write is successful, it will cause the kernel to save the current value of the wakeup events counter and to abort the subsequent system transition into a sleep state if any wakeup events are reported after the write has returned. [The assumption is that before writing to /sys/power/state user space will first read from /sys/power/wakeup_count. Next, user space consumers of wakeup events will have a chance to acknowledge or veto the upcoming system transition to a sleep state. Finally, if the transition is allowed to proceed, /sys/power/wakeup_count will be written to and if that succeeds, /sys/power/state will be written to as well. Still, if any wakeup events are reported to the PM core by kernel subsystems after that point, the transition will be aborted.] Additionally, put a wakeup events counter into struct dev_pm_info and make these per-device wakeup event counters available via sysfs, so that it's possible to check the activity of various wakeup event sources within the kernel. To illustrate how subsystems can use pm_wakeup_event(), make the low-level PCI runtime PM wakeup-handling code use it. Signed-off-by: Rafael J. Wysocki Acked-by: Jesse Barnes Acked-by: Greg Kroah-Hartman Acked-by: markgross Reviewed-by: Alan Stern --- Documentation/ABI/testing/sysfs-power | 15 +++ drivers/base/power/Makefile | 2 +- drivers/base/power/main.c | 1 + drivers/base/power/sysfs.c | 15 +++ drivers/base/power/wakeup.c | 229 ++++++++++++++++++++++++++++++++++ drivers/pci/pci-acpi.c | 1 + drivers/pci/pci.c | 20 ++- drivers/pci/pci.h | 1 + drivers/pci/pcie/pme/pcie_pme.c | 5 +- include/linux/pm.h | 10 ++ include/linux/suspend.h | 7 ++ kernel/power/hibernate.c | 20 ++- kernel/power/main.c | 55 ++++++++ kernel/power/suspend.c | 4 +- 14 files changed, 375 insertions(+), 10 deletions(-) create mode 100644 drivers/base/power/wakeup.c (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index d6a801f45b48..2875f1f74a07 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power @@ -114,3 +114,18 @@ Description: if this file contains "1", which is the default. It may be disabled by writing "0" to this file, in which case all devices will be suspended and resumed synchronously. + +What: /sys/power/wakeup_count +Date: July 2010 +Contact: Rafael J. Wysocki +Description: + The /sys/power/wakeup_count file allows user space to put the + system into a sleep state while taking into account the + concurrent arrival of wakeup events. Reading from it returns + the current number of registered wakeup events and it blocks if + some wakeup events are being processed at the time the file is + read from. Writing to it will only succeed if the current + number of wakeup events is equal to the written value and, if + successful, will make the kernel abort a subsequent transition + to a sleep state if any wakeup events are reported after the + write has returned. diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 89de75325cea..cbccf9a3cee4 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_PM) += sysfs.o -obj-$(CONFIG_PM_SLEEP) += main.o +obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_OPS) += generic_ops.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb87e52a..5419a49ff135 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; init_completion(&dev->power.completion); + dev->power.wakeup_count = 0; pm_runtime_init(dev); } diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a4c33bc51257..81d344e0e95d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -73,6 +73,8 @@ * device are known to the PM core. However, for some devices this * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. + * + * wakeup_count - Report the number of wakeup events related to the device */ static const char enabled[] = "enabled"; @@ -144,6 +146,16 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +#ifdef CONFIG_PM_SLEEP +static ssize_t wakeup_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", dev->power.wakeup_count); +} + +static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); +#endif + #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_RUNTIME @@ -230,6 +242,9 @@ static struct attribute * power_attrs[] = { &dev_attr_control.attr, #endif &dev_attr_wakeup.attr, +#ifdef CONFIG_PM_SLEEP + &dev_attr_wakeup_count.attr, +#endif #ifdef CONFIG_PM_ADVANCED_DEBUG &dev_attr_async.attr, #ifdef CONFIG_PM_RUNTIME diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 000000000000..25599077c39c --- /dev/null +++ b/drivers/base/power/wakeup.c @@ -0,0 +1,229 @@ +/* + * drivers/base/power/wakeup.c - System wakeup events framework + * + * Copyright (c) 2010 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include + +/* + * If set, the suspend/hibernate code will abort transitions to a sleep state + * if wakeup events are registered during or immediately before the transition. + */ +bool events_check_enabled; + +/* The counter of registered wakeup events. */ +static unsigned long event_count; +/* A preserved old value of event_count. */ +static unsigned long saved_event_count; +/* The counter of wakeup events being processed. */ +static unsigned long events_in_progress; + +static DEFINE_SPINLOCK(events_lock); + +/* + * The functions below use the observation that each wakeup event starts a + * period in which the system should not be suspended. The moment this period + * will end depends on how the wakeup event is going to be processed after being + * detected and all of the possible cases can be divided into two distinct + * groups. + * + * First, a wakeup event may be detected by the same functional unit that will + * carry out the entire processing of it and possibly will pass it to user space + * for further processing. In that case the functional unit that has detected + * the event may later "close" the "no suspend" period associated with it + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and + * pm_relax(), balanced with each other, is supposed to be used in such + * situations. + * + * Second, a wakeup event may be detected by one functional unit and processed + * by another one. In that case the unit that has detected it cannot really + * "close" the "no suspend" period associated with it, unless it knows in + * advance what's going to happen to the event during processing. This + * knowledge, however, may not be available to it, so it can simply specify time + * to wait before the system can be suspended and pass it as the second + * argument of pm_wakeup_event(). + */ + +/** + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. + * @dev: Device the wakeup event is related to. + * + * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the + * counter of wakeup events being processed. If @dev is not NULL, the counter + * of wakeup events related to @dev is incremented too. + * + * Call this function after detecting of a wakeup event if pm_relax() is going + * to be called directly after processing the event (and possibly passing it to + * user space for further processing). + * + * It is safe to call this function from interrupt context. + */ +void pm_stay_awake(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (dev) + dev->power.wakeup_count++; + + events_in_progress++; + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * + * Notify the PM core that a wakeup event has been processed by decrementing + * the counter of wakeup events being processed and incrementing the counter + * of registered wakeup events. + * + * Call this function for wakeup events whose processing started with calling + * pm_stay_awake(). + * + * It is safe to call it from interrupt context. + */ +void pm_relax(void) +{ + unsigned long flags; + + spin_lock_irqsave(&events_lock, flags); + if (events_in_progress) { + events_in_progress--; + event_count++; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_wakeup_work_fn - Deferred closing of a wakeup event. + * + * Execute pm_relax() for a wakeup event detected in the past and free the + * work item object used for queuing up the work. + */ +static void pm_wakeup_work_fn(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + + pm_relax(); + kfree(dwork); +} + +/** + * pm_wakeup_event - Notify the PM core of a wakeup event. + * @dev: Device the wakeup event is related to. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event (signaled by @dev) that will take + * approximately @msec milliseconds to be processed by the kernel. Increment + * the counter of wakeup events being processed and queue up a work item + * that will execute pm_relax() for the event after @msec milliseconds. If @dev + * is not NULL, the counter of wakeup events related to @dev is incremented too. + * + * It is safe to call this function from interrupt context. + */ +void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + unsigned long flags; + struct delayed_work *dwork; + + dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL; + + spin_lock_irqsave(&events_lock, flags); + if (dev) + dev->power.wakeup_count++; + + if (dwork) { + INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn); + schedule_delayed_work(dwork, msecs_to_jiffies(msec)); + + events_in_progress++; + } else { + event_count++; + } + spin_unlock_irqrestore(&events_lock, flags); +} + +/** + * pm_check_wakeup_events - Check for new wakeup events. + * + * Compare the current number of registered wakeup events with its preserved + * value from the past to check if new wakeup events have been registered since + * the old value was stored. Check if the current number of wakeup events being + * processed is zero. + */ +bool pm_check_wakeup_events(void) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&events_lock, flags); + if (events_check_enabled) { + ret = (event_count == saved_event_count) && !events_in_progress; + events_check_enabled = ret; + } + spin_unlock_irqrestore(&events_lock, flags); + return ret; +} + +/** + * pm_get_wakeup_count - Read the number of registered wakeup events. + * @count: Address to store the value at. + * + * Store the number of registered wakeup events at the address in @count. Block + * if the current number of wakeup events being processed is nonzero. + * + * Return false if the wait for the number of wakeup events being processed to + * drop down to zero has been interrupted by a signal (and the current number + * of wakeup events being processed is still nonzero). Otherwise return true. + */ +bool pm_get_wakeup_count(unsigned long *count) +{ + bool ret; + + spin_lock_irq(&events_lock); + if (capable(CAP_SYS_ADMIN)) + events_check_enabled = false; + + while (events_in_progress && !signal_pending(current)) { + spin_unlock_irq(&events_lock); + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + spin_lock_irq(&events_lock); + } + *count = event_count; + ret = !events_in_progress; + spin_unlock_irq(&events_lock); + return ret; +} + +/** + * pm_save_wakeup_count - Save the current number of registered wakeup events. + * @count: Value to compare with the current number of registered wakeup events. + * + * If @count is equal to the current number of registered wakeup events and the + * current number of wakeup events being processed is zero, store @count as the + * old number of registered wakeup events to be used by pm_check_wakeup_events() + * and return true. Otherwise return false. + */ +bool pm_save_wakeup_count(unsigned long count) +{ + bool ret = false; + + spin_lock_irq(&events_lock); + if (count == event_count && !events_in_progress) { + saved_event_count = count; + events_check_enabled = true; + ret = true; + } + spin_unlock_irq(&events_lock); + return ret; +} diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 2e7a3bf13824..1ab98bbe58dd 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -48,6 +48,7 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { pci_check_pme_status(pci_dev); pm_runtime_resume(&pci_dev->dev); + pci_wakeup_event(pci_dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 740fb4ea9669..130ed1daf0f8 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1275,6 +1275,22 @@ bool pci_check_pme_status(struct pci_dev *dev) return ret; } +/* + * Time to wait before the system can be put into a sleep state after reporting + * a wakeup event signaled by a PCI device. + */ +#define PCI_WAKEUP_COOLDOWN 100 + +/** + * pci_wakeup_event - Report a wakeup event related to a given PCI device. + * @dev: Device to report the wakeup event for. + */ +void pci_wakeup_event(struct pci_dev *dev) +{ + if (device_may_wakeup(&dev->dev)) + pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); +} + /** * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. * @dev: Device to handle. @@ -1285,8 +1301,10 @@ bool pci_check_pme_status(struct pci_dev *dev) */ static int pci_pme_wakeup(struct pci_dev *dev, void *ign) { - if (pci_check_pme_status(dev)) + if (pci_check_pme_status(dev)) { pm_request_resume(&dev->dev); + pci_wakeup_event(dev); + } return 0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f8077b3c8c8c..c8b7fd056ccd 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -56,6 +56,7 @@ extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); extern void pci_disable_enabled_device(struct pci_dev *dev); extern bool pci_check_pme_status(struct pci_dev *dev); extern int pci_finish_runtime_suspend(struct pci_dev *dev); +extern void pci_wakeup_event(struct pci_dev *dev); extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); extern void pci_pme_wakeup_bus(struct pci_bus *bus); extern void pci_pm_init(struct pci_dev *dev); diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c index d672a0a63816..bbdea18693d9 100644 --- a/drivers/pci/pcie/pme/pcie_pme.c +++ b/drivers/pci/pcie/pme/pcie_pme.c @@ -154,6 +154,7 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus) /* Skip PCIe devices in case we started from a root port. */ if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { pm_request_resume(&dev->dev); + pci_wakeup_event(dev); ret = true; } @@ -254,8 +255,10 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) if (found) { /* The device is there, but we have to check its PME status. */ found = pci_check_pme_status(dev); - if (found) + if (found) { pm_request_resume(&dev->dev); + pci_wakeup_event(dev); + } pci_dev_put(dev); } else if (devfn) { /* diff --git a/include/linux/pm.h b/include/linux/pm.h index 8e258c727971..b417fc46f3fc 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -457,6 +457,7 @@ struct dev_pm_info { #ifdef CONFIG_PM_SLEEP struct list_head entry; struct completion completion; + unsigned long wakeup_count; #endif #ifdef CONFIG_PM_RUNTIME struct timer_list suspend_timer; @@ -552,6 +553,11 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); } while (0) extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); + +/* drivers/base/power/wakeup.c */ +extern void pm_wakeup_event(struct device *dev, unsigned int msec); +extern void pm_stay_awake(struct device *dev); +extern void pm_relax(void); #else /* !CONFIG_PM_SLEEP */ #define device_pm_lock() do {} while (0) @@ -565,6 +571,10 @@ static inline int dpm_suspend_start(pm_message_t state) #define suspend_report_result(fn, ret) do {} while (0) static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} +static inline void pm_stay_awake(struct device *dev) {} +static inline void pm_relax(void) {} #endif /* !CONFIG_PM_SLEEP */ /* How to reorder dpm_list after device_move() */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bc7d6bb4cd8e..bf1bab7b059c 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -286,6 +286,13 @@ extern int unregister_pm_notifier(struct notifier_block *nb); { .notifier_call = fn, .priority = pri }; \ register_pm_notifier(&fn##_nb); \ } + +/* drivers/base/power/wakeup.c */ +extern bool events_check_enabled; + +extern bool pm_check_wakeup_events(void); +extern bool pm_get_wakeup_count(unsigned long *count); +extern bool pm_save_wakeup_count(unsigned long count); #else /* !CONFIG_PM_SLEEP */ static inline int register_pm_notifier(struct notifier_block *nb) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916da4d5..f61202916631 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -277,7 +277,7 @@ static int create_image(int platform_mode) goto Enable_irqs; } - if (hibernation_test(TEST_CORE)) + if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) goto Power_up; in_suspend = 1; @@ -288,8 +288,10 @@ static int create_image(int platform_mode) error); /* Restore control flow magically appears here */ restore_processor_state(); - if (!in_suspend) + if (!in_suspend) { + events_check_enabled = false; platform_leave(platform_mode); + } Power_up: sysdev_resume(); @@ -511,14 +513,20 @@ int hibernation_platform_enter(void) local_irq_disable(); sysdev_suspend(PMSG_HIBERNATE); + if (!pm_check_wakeup_events()) { + error = -EAGAIN; + goto Power_up; + } + hibernation_ops->enter(); /* We should never get here */ while (1); - /* - * We don't need to reenable the nonboot CPUs or resume consoles, since - * the system is going to be halted anyway. - */ + Power_up: + sysdev_resume(); + local_irq_enable(); + enable_nonboot_cpus(); + Platform_finish: hibernation_ops->finish(); diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b21fc0..62b0bc6e4983 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -204,6 +204,60 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, power_attr(state); +#ifdef CONFIG_PM_SLEEP +/* + * The 'wakeup_count' attribute, along with the functions defined in + * drivers/base/power/wakeup.c, provides a means by which wakeup events can be + * handled in a non-racy way. + * + * If a wakeup event occurs when the system is in a sleep state, it simply is + * woken up. In turn, if an event that would wake the system up from a sleep + * state occurs when it is undergoing a transition to that sleep state, the + * transition should be aborted. Moreover, if such an event occurs when the + * system is in the working state, an attempt to start a transition to the + * given sleep state should fail during certain period after the detection of + * the event. Using the 'state' attribute alone is not sufficient to satisfy + * these requirements, because a wakeup event may occur exactly when 'state' + * is being written to and may be delivered to user space right before it is + * frozen, so the event will remain only partially processed until the system is + * woken up by another event. In particular, it won't cause the transition to + * a sleep state to be aborted. + * + * This difficulty may be overcome if user space uses 'wakeup_count' before + * writing to 'state'. It first should read from 'wakeup_count' and store + * the read value. Then, after carrying out its own preparations for the system + * transition to a sleep state, it should write the stored value to + * 'wakeup_count'. If that fails, at least one wakeup event has occured since + * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it + * is allowed to write to 'state', but the transition will be aborted if there + * are any wakeup events detected after 'wakeup_count' was written to. + */ + +static ssize_t wakeup_count_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + unsigned long val; + + return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; +} + +static ssize_t wakeup_count_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned long val; + + if (sscanf(buf, "%lu", &val) == 1) { + if (pm_save_wakeup_count(val)) + return n; + } + return -EINVAL; +} + +power_attr(wakeup_count); +#endif /* CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM_TRACE int pm_trace_enabled; @@ -236,6 +290,7 @@ static struct attribute * g[] = { #endif #ifdef CONFIG_PM_SLEEP &pm_async_attr.attr, + &wakeup_count_attr.attr, #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7dd4402..5f8d09f94325 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -163,8 +163,10 @@ static int suspend_enter(suspend_state_t state) error = sysdev_suspend(PMSG_SUSPEND); if (!error) { - if (!suspend_test(TEST_CORE)) + if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { error = suspend_ops->enter(state); + events_check_enabled = false; + } sysdev_resume(); } -- cgit v1.2.3 From 12e4d0cc2e0a776a526c93bb2fcb9267abc6e0b1 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 1 Jul 2010 21:46:36 +0200 Subject: plist: Add plist_last plist is currently used by the scheduler, which only needs to know the highest item in the list. This adds plist_last which allows you to find the lowest. This is necessary for using plists to implement a fast search of dynamic ranges in pm_qos which can have both highest and lowest criteria. Signed-off-by: James Bottomley Signed-off-by: Rafael J. Wysocki --- include/linux/plist.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include') diff --git a/include/linux/plist.h b/include/linux/plist.h index 6898985e7b38..7254eda078e5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -259,6 +259,23 @@ static inline int plist_node_empty(const struct plist_node *node) container_of(plist_first(head), type, member) #endif +/** + * plist_last_entry - get the struct for the last entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_struct within the struct + */ +#ifdef CONFIG_DEBUG_PI_LIST +# define plist_last_entry(head, type, member) \ +({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_last(head), type, member); \ +}) +#else +# define plist_last_entry(head, type, member) \ + container_of(plist_last(head), type, member) +#endif + /** * plist_first - return the first node (and thus, highest priority) * @head: the &struct plist_head pointer @@ -271,4 +288,16 @@ static inline struct plist_node *plist_first(const struct plist_head *head) struct plist_node, plist.node_list); } +/** + * plist_last - return the last node (and thus, lowest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_last(const struct plist_head *head) +{ + return list_entry(head->node_list.prev, + struct plist_node, plist.node_list); +} + #endif -- cgit v1.2.3 From 82f682514a5df89ffb3890627eebf0897b7a84ec Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 5 Jul 2010 22:53:06 +0200 Subject: pm_qos: Get rid of the allocation in pm_qos_add_request() All current users of pm_qos_add_request() have the ability to supply the memory required by the pm_qos routines, so make them do this and eliminate the kmalloc() with pm_qos_add_request(). This has the double benefit of making the call never fail and allowing it to be called from atomic context. Signed-off-by: James Bottomley Signed-off-by: mark gross Signed-off-by: Rafael J. Wysocki --- drivers/net/e1000e/netdev.c | 17 ++++----- drivers/net/igbvf/netdev.c | 9 ++--- drivers/net/wireless/ipw2x00/ipw2100.c | 12 +++--- include/linux/netdevice.h | 2 +- include/linux/pm_qos_params.h | 13 +++++-- include/sound/pcm.h | 2 +- kernel/pm_qos_params.c | 67 ++++++++++++++++++++-------------- sound/core/pcm_native.c | 13 +++---- 8 files changed, 74 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 57a7e41da69e..9f13b660b801 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2901,10 +2901,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) * dropped transactions. */ pm_qos_update_request( - adapter->netdev->pm_qos_req, 55); + &adapter->netdev->pm_qos_req, 55); } else { pm_qos_update_request( - adapter->netdev->pm_qos_req, + &adapter->netdev->pm_qos_req, PM_QOS_DEFAULT_VALUE); } } @@ -3196,9 +3196,9 @@ int e1000e_up(struct e1000_adapter *adapter) /* DMA latency requirement to workaround early-receive/jumbo issue */ if (adapter->flags & FLAG_HAS_ERT) - adapter->netdev->pm_qos_req = - pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + pm_qos_add_request(&adapter->netdev->pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); /* hardware has been reset, we need to reload some things */ e1000_configure(adapter); @@ -3263,11 +3263,8 @@ void e1000e_down(struct e1000_adapter *adapter) e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); - if (adapter->flags & FLAG_HAS_ERT) { - pm_qos_remove_request( - adapter->netdev->pm_qos_req); - adapter->netdev->pm_qos_req = NULL; - } + if (adapter->flags & FLAG_HAS_ERT) + pm_qos_remove_request(&adapter->netdev->pm_qos_req); /* * TODO: for power management, we could drop the link and diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 5e2b2a8c56c6..add6197d3bcb 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -48,7 +48,7 @@ #define DRV_VERSION "1.0.0-k0" char igbvf_driver_name[] = "igbvf"; const char igbvf_driver_version[] = DRV_VERSION; -struct pm_qos_request_list *igbvf_driver_pm_qos_req; +static struct pm_qos_request_list igbvf_driver_pm_qos_req; static const char igbvf_driver_string[] = "Intel(R) Virtual Function Network Driver"; static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; @@ -2902,8 +2902,8 @@ static int __init igbvf_init_module(void) printk(KERN_INFO "%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); - igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); return ret; } @@ -2918,8 +2918,7 @@ module_init(igbvf_init_module); static void __exit igbvf_exit_module(void) { pci_unregister_driver(&igbvf_driver); - pm_qos_remove_request(igbvf_driver_pm_qos_req); - igbvf_driver_pm_qos_req = NULL; + pm_qos_remove_request(&igbvf_driver_pm_qos_req); } module_exit(igbvf_exit_module); diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 0bd4dfa59a8a..7f0d98b885bc 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -174,7 +174,7 @@ that only one external action is invoked at a time. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" -struct pm_qos_request_list *ipw2100_pm_qos_req; +struct pm_qos_request_list ipw2100_pm_qos_req; /* Debugging stuff */ #ifdef CONFIG_IPW2100_DEBUG @@ -1741,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) /* the ipw2100 hardware really doesn't want power management delays * longer than 175usec */ - pm_qos_update_request(ipw2100_pm_qos_req, 175); + pm_qos_update_request(&ipw2100_pm_qos_req, 175); /* If the interrupt is enabled, turn it off... */ spin_lock_irqsave(&priv->low_lock, flags); @@ -1889,7 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv) ipw2100_disable_interrupts(priv); spin_unlock_irqrestore(&priv->low_lock, flags); - pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); + pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); /* We have to signal any supplicant if we are disassociating */ if (associated) @@ -6669,8 +6669,8 @@ static int __init ipw2100_init(void) if (ret) goto out; - ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); #ifdef CONFIG_IPW2100_DEBUG ipw2100_debug_level = debug; ret = driver_create_file(&ipw2100_pci_driver.driver, @@ -6692,7 +6692,7 @@ static void __exit ipw2100_exit(void) &driver_attr_debug_level); #endif pci_unregister_driver(&ipw2100_pci_driver); - pm_qos_remove_request(ipw2100_pm_qos_req); + pm_qos_remove_request(&ipw2100_pm_qos_req); } module_init(ipw2100_init); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b21e4054c12c..2f22119b4b08 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -779,7 +779,7 @@ struct net_device { */ char name[IFNAMSIZ]; - struct pm_qos_request_list *pm_qos_req; + struct pm_qos_request_list pm_qos_req; /* device name hash chain */ struct hlist_node name_hlist; diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 8ba440e5eb7f..77cbddb3784c 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,8 +1,10 @@ +#ifndef _LINUX_PM_QOS_PARAMS_H +#define _LINUX_PM_QOS_PARAMS_H /* interface for the pm_qos_power infrastructure of the linux kernel. * * Mark Gross */ -#include +#include #include #include @@ -14,9 +16,12 @@ #define PM_QOS_NUM_CLASSES 4 #define PM_QOS_DEFAULT_VALUE -1 -struct pm_qos_request_list; +struct pm_qos_request_list { + struct plist_node list; + int pm_qos_class; +}; -struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); +void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, s32 new_value); void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); @@ -24,4 +29,6 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); int pm_qos_request(int pm_qos_class); int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); +int pm_qos_request_active(struct pm_qos_request_list *req); +#endif diff --git a/include/sound/pcm.h b/include/sound/pcm.h index dd76cdede64d..6e3a29732dc4 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -366,7 +366,7 @@ struct snd_pcm_substream { int number; char name[32]; /* substream name */ int stream; /* stream (direction) */ - struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */ + struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ size_t buffer_bytes_max; /* limit ring buffer size */ struct snd_dma_buffer dma_buffer; unsigned int dma_buf_id; diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index db8e51d7f392..996a4dec5f96 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -30,7 +30,6 @@ /*#define DEBUG*/ #include -#include #include #include #include @@ -49,11 +48,6 @@ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock * held, taken with _irqsave. One lock to rule them all */ -struct pm_qos_request_list { - struct plist_node list; - int pm_qos_class; -}; - enum pm_qos_type { PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN /* return the smallest value */ @@ -210,6 +204,12 @@ int pm_qos_request(int pm_qos_class) } EXPORT_SYMBOL_GPL(pm_qos_request); +int pm_qos_request_active(struct pm_qos_request_list *req) +{ + return req->pm_qos_class != 0; +} +EXPORT_SYMBOL_GPL(pm_qos_request_active); + /** * pm_qos_add_request - inserts new qos request into the list * @pm_qos_class: identifies which list of qos request to us @@ -221,25 +221,23 @@ EXPORT_SYMBOL_GPL(pm_qos_request); * element as a handle for use in updating and removal. Call needs to save * this handle for later use. */ -struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) +void pm_qos_add_request(struct pm_qos_request_list *dep, + int pm_qos_class, s32 value) { - struct pm_qos_request_list *dep; - - dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); - if (dep) { - struct pm_qos_object *o = pm_qos_array[pm_qos_class]; - int new_value; - - if (value == PM_QOS_DEFAULT_VALUE) - new_value = o->default_value; - else - new_value = value; - plist_node_init(&dep->list, new_value); - dep->pm_qos_class = pm_qos_class; - update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); - } + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; + int new_value; - return dep; + if (pm_qos_request_active(dep)) { + WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); + return; + } + if (value == PM_QOS_DEFAULT_VALUE) + new_value = o->default_value; + else + new_value = value; + plist_node_init(&dep->list, new_value); + dep->pm_qos_class = pm_qos_class; + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); } EXPORT_SYMBOL_GPL(pm_qos_add_request); @@ -262,6 +260,11 @@ void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, if (!pm_qos_req) /*guard against callers passing in null */ return; + if (!pm_qos_request_active(pm_qos_req)) { + WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); + return; + } + o = pm_qos_array[pm_qos_req->pm_qos_class]; if (new_value == PM_QOS_DEFAULT_VALUE) @@ -290,9 +293,14 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) return; /* silent return to keep pcm code cleaner */ + if (!pm_qos_request_active(pm_qos_req)) { + WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); + return; + } + o = pm_qos_array[pm_qos_req->pm_qos_class]; update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); - kfree(pm_qos_req); + memset(pm_qos_req, 0, sizeof(*pm_qos_req)); } EXPORT_SYMBOL_GPL(pm_qos_remove_request); @@ -340,8 +348,12 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp) pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); if (pm_qos_class >= 0) { - filp->private_data = (void *) pm_qos_add_request(pm_qos_class, - PM_QOS_DEFAULT_VALUE); + struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); + if (!req) + return -ENOMEM; + + pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); + filp->private_data = req; if (filp->private_data) return 0; @@ -353,8 +365,9 @@ static int pm_qos_power_release(struct inode *inode, struct file *filp) { struct pm_qos_request_list *req; - req = (struct pm_qos_request_list *)filp->private_data; + req = filp->private_data; pm_qos_remove_request(req); + kfree(req); return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 303ac04ff6e4..a3b2a6479246 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -451,13 +451,11 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, snd_pcm_timer_resolution_change(substream); runtime->status->state = SNDRV_PCM_STATE_SETUP; - if (substream->latency_pm_qos_req) { - pm_qos_remove_request(substream->latency_pm_qos_req); - substream->latency_pm_qos_req = NULL; - } + if (pm_qos_request_active(&substream->latency_pm_qos_req)) + pm_qos_remove_request(&substream->latency_pm_qos_req); if ((usecs = period_to_usecs(runtime)) >= 0) - substream->latency_pm_qos_req = pm_qos_add_request( - PM_QOS_CPU_DMA_LATENCY, usecs); + pm_qos_add_request(&substream->latency_pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, usecs); return 0; _error: /* hardware might be unuseable from this time, @@ -512,8 +510,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream) if (substream->ops->hw_free) result = substream->ops->hw_free(substream); runtime->status->state = SNDRV_PCM_STATE_OPEN; - pm_qos_remove_request(substream->latency_pm_qos_req); - substream->latency_pm_qos_req = NULL; + pm_qos_remove_request(&substream->latency_pm_qos_req); return result; } -- cgit v1.2.3 From ce4410116c5debfb0e049f5db4b5cd6211e05b80 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 7 Jul 2010 23:43:45 +0200 Subject: PM / Suspend: Fix ordering of calls in suspend error paths The ACPI suspend code calls suspend_nvs_free() at a wrong place, which may lead to a memory leak if there's an error executing acpi_pm_prepare(), because acpi_pm_finish() will not be called in that case. However, the root cause of this problem is the apparently confusing ordering of calls in suspend error paths that needs to be fixed. In addition to that, fix a typo in a label name in suspend.c. Signed-off-by: Rafael J. Wysocki Acked-by: Len Brown --- include/linux/suspend.h | 10 ++++++---- kernel/power/suspend.c | 9 ++++----- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index bf1bab7b059c..4af270ec2204 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -61,14 +61,15 @@ typedef int __bitwise suspend_state_t; * before device drivers' late suspend callbacks are executed. It returns * 0 on success or a negative error code otherwise, in which case the * system cannot enter the desired sleep state (@prepare_late(), @enter(), - * @wake(), and @finish() will not be called in that case). + * and @wake() will not be called in that case). * * @prepare_late: Finish preparing the platform for entering the system sleep * state indicated by @begin(). * @prepare_late is called before disabling nonboot CPUs and after * device drivers' late suspend callbacks have been executed. It returns * 0 on success or a negative error code otherwise, in which case the - * system cannot enter the desired sleep state (@enter() and @wake()). + * system cannot enter the desired sleep state (@enter() will not be + * executed). * * @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. @@ -81,14 +82,15 @@ typedef int __bitwise suspend_state_t; * resume callbacks are executed. * This callback is optional, but should be implemented by the platforms * that implement @prepare_late(). If implemented, it is always called - * after @enter(), even if @enter() fails. + * after @prepare_late and @enter(), even if one of them fails. * * @finish: Finish wake-up of the platform. * @finish is called right prior to calling device drivers' regular suspend * callbacks. * This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after - * @enter() and @wake(), if implemented, even if any of them fails. + * @enter() and @wake(), even if any of them fails. It is executed after + * a failing @prepare. * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 5f8d09f94325..7335952ee473 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -136,19 +136,19 @@ static int suspend_enter(suspend_state_t state) if (suspend_ops->prepare) { error = suspend_ops->prepare(); if (error) - return error; + goto Platform_finish; } error = dpm_suspend_noirq(PMSG_SUSPEND); if (error) { printk(KERN_ERR "PM: Some devices failed to power down\n"); - goto Platfrom_finish; + goto Platform_finish; } if (suspend_ops->prepare_late) { error = suspend_ops->prepare_late(); if (error) - goto Power_up_devices; + goto Platform_wake; } if (suspend_test(TEST_PLATFORM)) @@ -180,10 +180,9 @@ static int suspend_enter(suspend_state_t state) if (suspend_ops->wake) suspend_ops->wake(); - Power_up_devices: dpm_resume_noirq(PMSG_RESUME); - Platfrom_finish: + Platform_finish: if (suspend_ops->finish) suspend_ops->finish(); -- cgit v1.2.3 From 8d4b9d1bfef117862a2889dec4dac227068544c9 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 19 Jul 2010 02:01:06 +0200 Subject: PM / Runtime: Add runtime PM statistics (v3) In order for PowerTOP to be able to report how well the new runtime PM is working for the various drivers, the kernel needs to export some basic statistics in sysfs. This patch adds two sysfs files in the runtime PM domain that expose the total time a device has been active, and the time a device has been suspended. With this PowerTOP can compute the activity percentage Active %age = 100 * (delta active) / (delta active + delta suspended) and present the information to the user. I've written the PowerTOP code (slated for version 1.12) already, and the output looks like this: Runtime Device Power Management statistics Active Device name 10.0% 06:00.0 Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8101E/RTL8102E PCI Express Fast Ethernet controller [version 2: fix stat update bugs noticed by Alan Stern] [version 3: rebase to -next and move the sysfs declaration] Signed-off-by: Arjan van de Ven Signed-off-by: Rafael J. Wysocki --- drivers/base/power/runtime.c | 54 ++++++++++++++++++++++++++++++++++++++------ drivers/base/power/sysfs.c | 30 ++++++++++++++++++++++++ include/linux/pm.h | 6 +++++ 3 files changed, 83 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b0ec0e9f27e9..b78c401ffa73 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -123,6 +123,45 @@ int pm_runtime_idle(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_idle); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +void update_pm_runtime_accounting(struct device *dev) +{ + unsigned long now = jiffies; + int delta; + + delta = now - dev->power.accounting_timestamp; + + if (delta < 0) + delta = 0; + + dev->power.accounting_timestamp = now; + + if (dev->power.disable_depth > 0) + return; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_jiffies += delta; + else + dev->power.active_jiffies += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} + /** * __pm_runtime_suspend - Carry out run-time suspend of given device. * @dev: Device to suspend. @@ -197,7 +236,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_SUSPENDING; + __update_runtime_status(dev, RPM_SUSPENDING); dev->power.deferred_resume = false; if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { @@ -228,7 +267,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (retval == -EAGAIN || retval == -EBUSY) { if (dev->power.timer_expires == 0) notify = true; @@ -237,7 +276,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) pm_runtime_cancel_pending(dev); } } else { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); if (dev->parent) { @@ -381,7 +420,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) goto repeat; } - dev->power.runtime_status = RPM_RESUMING; + __update_runtime_status(dev, RPM_RESUMING); if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { spin_unlock_irq(&dev->power.lock); @@ -411,10 +450,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) } if (retval) { - dev->power.runtime_status = RPM_SUSPENDED; + __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); } else { - dev->power.runtime_status = RPM_ACTIVE; + __update_runtime_status(dev, RPM_ACTIVE); if (parent) atomic_inc(&parent->power.child_count); } @@ -848,7 +887,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) } out_set: - dev->power.runtime_status = status; + __update_runtime_status(dev, status); dev->power.runtime_error = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1077,6 +1116,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; + dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 1eca50c8e7ca..e56b4388fe61 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "power.h" /* @@ -111,6 +112,33 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(control, 0644, control_show, control_store); +static ssize_t rtpm_active_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); + +static ssize_t rtpm_suspended_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + spin_lock_irq(&dev->power.lock); + update_pm_runtime_accounting(dev); + ret = sprintf(buf, "%i\n", + jiffies_to_msecs(dev->power.suspended_jiffies)); + spin_unlock_irq(&dev->power.lock); + return ret; +} + +static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); + static ssize_t rtpm_status_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -254,6 +282,8 @@ static struct attribute * power_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_control.attr, &dev_attr_runtime_status.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, #endif &dev_attr_wakeup.attr, #ifdef CONFIG_PM_SLEEP diff --git a/include/linux/pm.h b/include/linux/pm.h index b417fc46f3fc..52e8c55ff314 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -477,9 +477,15 @@ struct dev_pm_info { enum rpm_request request; enum rpm_status runtime_status; int runtime_error; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; #endif }; +extern void update_pm_runtime_accounting(struct device *dev); + + /* * The PM_EVENT_ messages are also used by drivers implementing the legacy * suspend framework, based on the ->suspend() and ->resume() callbacks common -- cgit v1.2.3 From 7f8275d0d660c146de6ee3017e1e2e594c49e820 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 19 Jul 2010 14:56:17 +1000 Subject: mm: add context argument to shrinker callback The current shrinker implementation requires the registered callback to have global state to work from. This makes it difficult to shrink caches that are not global (e.g. per-filesystem caches). Pass the shrinker structure to the callback so that users can embed the shrinker structure in the context the shrinker needs to operate on and get back to it in the callback via container_of(). Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- arch/x86/kvm/mmu.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- fs/dcache.c | 2 +- fs/gfs2/glock.c | 2 +- fs/gfs2/quota.c | 2 +- fs/gfs2/quota.h | 2 +- fs/inode.c | 2 +- fs/mbcache.c | 5 +++-- fs/nfs/dir.c | 2 +- fs/nfs/internal.h | 3 ++- fs/quota/dquot.c | 2 +- fs/ubifs/shrinker.c | 2 +- fs/ubifs/ubifs.h | 2 +- fs/xfs/linux-2.6/xfs_buf.c | 5 +++-- fs/xfs/linux-2.6/xfs_sync.c | 1 + fs/xfs/quota/xfs_qm.c | 7 +++++-- include/linux/mm.h | 2 +- mm/vmscan.c | 8 +++++--- 18 files changed, 31 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3699613e8830..b1ed0a1a5913 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2926,7 +2926,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm) return kvm_mmu_zap_page(kvm, page) + 1; } -static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) +static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { struct kvm *kvm; struct kvm *kvm_freed = NULL; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8757ecf6e96b..e7018708cc31 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4978,7 +4978,7 @@ i915_gpu_is_active(struct drm_device *dev) } static int -i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) +i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { drm_i915_private_t *dev_priv, *next_dev; struct drm_i915_gem_object *obj_priv, *next_obj; diff --git a/fs/dcache.c b/fs/dcache.c index c8c78ba07827..86d4db15473e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -896,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent); * * In this case we return -1 to tell the caller that we baled. */ -static int shrink_dcache_memory(int nr, gfp_t gfp_mask) +static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { if (!(gfp_mask & __GFP_FS)) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index dbab3fdc2582..0898f3ec8212 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1358,7 +1358,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) } -static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) +static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { struct gfs2_glock *gl; int may_demote; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b256d6f24288..8f02d3db8f42 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list); static atomic_t qd_lru_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(qd_lru_lock); -int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) +int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { struct gfs2_quota_data *qd; struct gfs2_sbd *sdp; diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 195f60c8bd14..e7d236ca48bd 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) return ret; } -extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); +extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); extern const struct quotactl_ops gfs2_quotactl_ops; #endif /* __QUOTA_DOT_H__ */ diff --git a/fs/inode.c b/fs/inode.c index 2bee20ae3d65..722860b323a9 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(int nr, gfp_t gfp_mask) +static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { /* diff --git a/fs/mbcache.c b/fs/mbcache.c index ec88ff3d04a9..e28f21b95344 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache) * What the mbcache registers as to get shrunk dynamically. */ -static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); +static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); static struct shrinker mb_cache_shrinker = { .shrink = mb_cache_shrink_fn, @@ -191,13 +191,14 @@ forget: * This function is called by the kernel memory management when memory * gets low. * + * @shrink: (ignored) * @nr_to_scan: Number of objects to scan * @gfp_mask: (ignored) * * Returns the number of objects which are present in the cache. */ static int -mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) +mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(free_list); struct list_head *l, *ltmp; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 782b431ef91c..e60416d3f818 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head) } } -int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) +int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(head); struct nfs_inode *nfsi; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d8bd619e386c..e70f44b9b3f4 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[]; void nfs_close_context(struct nfs_open_context *ctx, int is_sync); /* dir.c */ -extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); +extern int nfs_access_cache_shrinker(struct shrinker *shrink, + int nr_to_scan, gfp_t gfp_mask); /* inode.c */ extern struct workqueue_struct *nfsiod_workqueue; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 12c233da1b6b..437d2ca2de97 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -676,7 +676,7 @@ static void prune_dqcache(int count) * This is called from kswapd when we think we need some * more memory */ -static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) +static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { spin_lock(&dq_list_lock); diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 02feb59cefca..0b201114a5ad 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c @@ -277,7 +277,7 @@ static int kick_a_thread(void) return 0; } -int ubifs_shrinker(int nr, gfp_t gfp_mask) +int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) { int freed, contention = 0; long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 2eef553d50c8..04310878f449 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); int ubifs_tnc_end_commit(struct ubifs_info *c); /* shrinker.c */ -int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); +int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); /* commit.c */ int ubifs_bg_thread(void *info); diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 649ade8ef598..2ee3f7a60163 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -45,7 +45,7 @@ static kmem_zone_t *xfs_buf_zone; STATIC int xfsbufd(void *); -STATIC int xfsbufd_wakeup(int, gfp_t); +STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); static struct shrinker xfs_buf_shake = { .shrink = xfsbufd_wakeup, @@ -340,7 +340,7 @@ _xfs_buf_lookup_pages( __func__, gfp_mask); XFS_STATS_INC(xb_page_retries); - xfsbufd_wakeup(0, gfp_mask); + xfsbufd_wakeup(NULL, 0, gfp_mask); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } @@ -1762,6 +1762,7 @@ xfs_buf_runall_queues( STATIC int xfsbufd_wakeup( + struct shrinker *shrink, int priority, gfp_t mask) { diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ef7f0218bccb..be375827af98 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -838,6 +838,7 @@ static struct rw_semaphore xfs_mount_list_lock; static int xfs_reclaim_inode_shrink( + struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) { diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 8c117ff2e3ab..67c018392d62 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -69,7 +69,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); -STATIC int xfs_qm_shake(int, gfp_t); +STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); static struct shrinker xfs_qm_shaker = { .shrink = xfs_qm_shake, @@ -2117,7 +2117,10 @@ xfs_qm_shake_freelist( */ /* ARGSUSED */ STATIC int -xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) +xfs_qm_shake( + struct shrinker *shrink, + int nr_to_scan, + gfp_t gfp_mask) { int ndqused, nfree, n; diff --git a/include/linux/mm.h b/include/linux/mm.h index b969efb03787..a2b48041b910 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -999,7 +999,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) * querying the cache size, so a fastpath for that case is appropriate. */ struct shrinker { - int (*shrink)(int nr_to_scan, gfp_t gfp_mask); + int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); int seeks; /* seeks to recreate an obj */ /* These are for internal use */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 9c7e57cc63a3..199fa436c0dd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -213,8 +213,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; unsigned long total_scan; - unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); + unsigned long max_pass; + max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); delta = (4 * scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); @@ -242,8 +243,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, int shrink_ret; int nr_before; - nr_before = (*shrinker->shrink)(0, gfp_mask); - shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); + nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); + shrink_ret = (*shrinker->shrink)(shrinker, this_scan, + gfp_mask); if (shrink_ret == -1) break; if (shrink_ret < nr_before) -- cgit v1.2.3 From e15bacbebb9dcc95f148f28dfc83a6d5e48b60b8 Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:31:57 +0400 Subject: padata: Make two separate cpumasks The aim of this patch is to make two separate cpumasks for padata parallel and serial workers respectively. It allows user to make more thin and sophisticated configurations of padata framework. For example user may bind parallel and serial workers to non-intersecting CPU groups to gain better performance. Also each padata instance has notifiers chain for its cpumasks now. If either parallel or serial or both masks were changed all interested subsystems will get notification about that. It's especially useful if padata user uses algorithm for callback CPU selection according to serial cpumask. Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 191 ++++++++++++++------ include/linux/padata.h | 116 ++++++++---- kernel/padata.c | 471 ++++++++++++++++++++++++++++++++++++------------- 3 files changed, 564 insertions(+), 214 deletions(-) (limited to 'include') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 6036b6de9079..c9662e25595e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -24,12 +24,38 @@ #include #include #include +#include #include -static struct padata_instance *pcrypt_enc_padata; -static struct padata_instance *pcrypt_dec_padata; -static struct workqueue_struct *encwq; -static struct workqueue_struct *decwq; +struct pcrypt_instance { + struct padata_instance *pinst; + struct workqueue_struct *wq; + + /* + * Cpumask for callback CPUs. It should be + * equal to serial cpumask of corresponding padata instance, + * so it is updated when padata notifies us about serial + * cpumask change. + * + * cb_cpumask is protected by RCU. This fact prevents us from + * using cpumask_var_t directly because the actual type of + * cpumsak_var_t depends on kernel configuration(particularly on + * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration + * cpumask_var_t may be either a pointer to the struct cpumask + * or a variable allocated on the stack. Thus we can not safely use + * cpumask_var_t with RCU operations such as rcu_assign_pointer or + * rcu_dereference. So cpumask_var_t is wrapped with struct + * pcrypt_cpumask which makes possible to use it with RCU. + */ + struct pcrypt_cpumask { + cpumask_var_t mask; + } *cb_cpumask; + struct notifier_block nblock; +}; + +static struct pcrypt_instance pencrypt; +static struct pcrypt_instance pdecrypt; + struct pcrypt_instance_ctx { struct crypto_spawn spawn; @@ -42,25 +68,29 @@ struct pcrypt_aead_ctx { }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, - struct padata_instance *pinst) + struct pcrypt_instance *pcrypt) { unsigned int cpu_index, cpu, i; + struct pcrypt_cpumask *cpumask; cpu = *cb_cpu; - if (cpumask_test_cpu(cpu, cpu_active_mask)) + rcu_read_lock_bh(); + cpumask = rcu_dereference(pcrypt->cb_cpumask); + if (cpumask_test_cpu(cpu, cpumask->mask)) goto out; - cpu_index = cpu % cpumask_weight(cpu_active_mask); + cpu_index = cpu % cpumask_weight(cpumask->mask); - cpu = cpumask_first(cpu_active_mask); + cpu = cpumask_first(cpumask->mask); for (i = 0; i < cpu_index; i++) - cpu = cpumask_next(cpu, cpu_active_mask); + cpu = cpumask_next(cpu, cpumask->mask); *cb_cpu = cpu; out: - return padata_do_parallel(pinst, padata, cpu); + rcu_read_unlock_bh(); + return padata_do_parallel(pcrypt->pinst, padata, cpu); } static int pcrypt_aead_setkey(struct crypto_aead *parent, @@ -142,7 +172,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_assoc(creq, req->assoc, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; @@ -184,7 +214,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req) req->cryptlen, req->iv); aead_request_set_assoc(creq, req->assoc, req->assoclen); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); if (!err) return -EINPROGRESS; @@ -228,7 +258,7 @@ static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); aead_givcrypt_set_giv(creq, req->giv, req->seq); - err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); + err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; @@ -370,6 +400,88 @@ static void pcrypt_free(struct crypto_instance *inst) kfree(inst); } +static int pcrypt_cpumask_change_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct pcrypt_instance *pcrypt; + struct pcrypt_cpumask *new_mask, *old_mask; + + if (!(val & PADATA_CPU_SERIAL)) + return 0; + + pcrypt = container_of(self, struct pcrypt_instance, nblock); + new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); + if (!new_mask) + return -ENOMEM; + if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { + kfree(new_mask); + return -ENOMEM; + } + + old_mask = pcrypt->cb_cpumask; + + padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, new_mask->mask); + rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); + synchronize_rcu_bh(); + + free_cpumask_var(old_mask->mask); + kfree(old_mask); + return 0; +} + +static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt, + const char *name) +{ + int ret = -ENOMEM; + struct pcrypt_cpumask *mask; + + pcrypt->wq = create_workqueue(name); + if (!pcrypt->wq) + goto err; + + pcrypt->pinst = padata_alloc(pcrypt->wq); + if (!pcrypt->pinst) + goto err_destroy_workqueue; + + mask = kmalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) + goto err_free_padata; + if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { + kfree(mask); + goto err_free_padata; + } + + padata_get_cpumask(pcrypt->pinst, PADATA_CPU_SERIAL, mask->mask); + rcu_assign_pointer(pcrypt->cb_cpumask, mask); + + pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; + ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + if (ret) + goto err_free_cpumask; + + return ret; +err_free_cpumask: + free_cpumask_var(mask->mask); + kfree(mask); +err_free_padata: + padata_free(pcrypt->pinst); +err_destroy_workqueue: + destroy_workqueue(pcrypt->wq); +err: + return ret; +} + +static void __pcrypt_deinit_instance(struct pcrypt_instance *pcrypt) +{ + free_cpumask_var(pcrypt->cb_cpumask->mask); + kfree(pcrypt->cb_cpumask); + + padata_stop(pcrypt->pinst); + padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); + destroy_workqueue(pcrypt->wq); + padata_free(pcrypt->pinst); +} + static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .alloc = pcrypt_alloc, @@ -379,60 +491,31 @@ static struct crypto_template pcrypt_tmpl = { static int __init pcrypt_init(void) { - int err = -ENOMEM; - encwq = create_workqueue("pencrypt"); - if (!encwq) - goto err; - - decwq = create_workqueue("pdecrypt"); - if (!decwq) - goto err_destroy_encwq; - - - pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq); - if (!pcrypt_enc_padata) - goto err_destroy_decwq; - - pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); - if (!pcrypt_dec_padata) - goto err_free_enc_padata; + int err; - err = padata_start(pcrypt_enc_padata); + err = __pcrypt_init_instance(&pencrypt, "pencrypt"); if (err) - goto err_free_dec_padata; + goto err; - err = padata_start(pcrypt_dec_padata); + err = __pcrypt_init_instance(&pdecrypt, "pdecrypt"); if (err) - goto err_free_dec_padata; - - return crypto_register_template(&pcrypt_tmpl); - -err_free_dec_padata: - padata_free(pcrypt_dec_padata); + goto err_deinit_pencrypt; -err_free_enc_padata: - padata_free(pcrypt_enc_padata); + padata_start(pencrypt.pinst); + padata_start(pdecrypt.pinst); -err_destroy_decwq: - destroy_workqueue(decwq); - -err_destroy_encwq: - destroy_workqueue(encwq); + return crypto_register_template(&pcrypt_tmpl); +err_deinit_pencrypt: + __pcrypt_deinit_instance(&pencrypt); err: return err; } static void __exit pcrypt_exit(void) { - padata_stop(pcrypt_enc_padata); - padata_stop(pcrypt_dec_padata); - - destroy_workqueue(encwq); - destroy_workqueue(decwq); - - padata_free(pcrypt_enc_padata); - padata_free(pcrypt_dec_padata); + __pcrypt_deinit_instance(&pencrypt); + __pcrypt_deinit_instance(&pdecrypt); crypto_unregister_template(&pcrypt_tmpl); } diff --git a/include/linux/padata.h b/include/linux/padata.h index 8844b851191e..621e7736690c 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -25,6 +25,10 @@ #include #include #include +#include + +#define PADATA_CPU_SERIAL 0x01 +#define PADATA_CPU_PARALLEL 0x02 /** * struct padata_priv - Embedded to the users data structure. @@ -59,7 +63,20 @@ struct padata_list { }; /** - * struct padata_queue - The percpu padata queues. +* struct padata_serial_queue - The percpu padata serial queue +* +* @serial: List to wait for serialization after reordering. +* @work: work struct for serialization. +* @pd: Backpointer to the internal control structure. +*/ +struct padata_serial_queue { + struct padata_list serial; + struct work_struct work; + struct parallel_data *pd; +}; + +/** + * struct padata_parallel_queue - The percpu padata parallel queue * * @parallel: List to wait for parallelization. * @reorder: List to wait for reordering after parallel processing. @@ -67,44 +84,52 @@ struct padata_list { * @pwork: work struct for parallelization. * @swork: work struct for serialization. * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. + * @num_obj: Number of objects that are processed by this cpu. * @cpu_index: Index of the cpu. */ -struct padata_queue { - struct padata_list parallel; - struct padata_list reorder; - struct padata_list serial; - struct work_struct pwork; - struct work_struct swork; - struct parallel_data *pd; - int cpu_index; +struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; + struct parallel_data *pd; + struct work_struct work; + atomic_t num_obj; + int cpu_index; }; + /** * struct parallel_data - Internal control structure, covers everything * that depends on the cpumask in use. * * @pinst: padata instance. - * @queue: percpu padata queues. + * @pqueue: percpu padata queues used for parallelization. + * @squeue: percpu padata queues used for serialuzation. * @seq_nr: The sequence number that will be attached to the next object. * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. - * @cpumask: cpumask in use. + * @cpumask: Contains two cpumasks: pcpu and cbcpu for + * parallel and serial workers respectively. * @lock: Reorder lock. * @processed: Number of already processed objects. * @timer: Reorder timer. */ struct parallel_data { - struct padata_instance *pinst; - struct padata_queue *queue; - atomic_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; - cpumask_var_t cpumask; - spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; + struct padata_instance *pinst; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; + atomic_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; + struct { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; + } cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** @@ -113,32 +138,51 @@ struct parallel_data { * @cpu_notifier: cpu hotplug notifier. * @wq: The workqueue in use. * @pd: The internal control structure. - * @cpumask: User supplied cpumask. + * @cpumask: User supplied cpumask. Contains two cpumasks: pcpu and + * cbcpu for parallel and serial works respectivly. + * @cpumask_change_notifier: Notifiers chain for user-defined notify + * callbacks that will be called when either @pcpu or @cbcpu + * or both cpumasks change. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct notifier_block cpu_notifier; - struct workqueue_struct *wq; - struct parallel_data *pd; - cpumask_var_t cpumask; - struct mutex lock; - u8 flags; -#define PADATA_INIT 1 -#define PADATA_RESET 2 -#define PADATA_INVALID 4 + struct notifier_block cpu_notifier; + struct workqueue_struct *wq; + struct parallel_data *pd; + struct { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; + } cpumask; + struct blocking_notifier_head cpumask_change_notifier; + struct mutex lock; + u8 flags; +#define PADATA_INIT 1 +#define PADATA_RESET 2 +#define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq); +extern struct padata_instance *__padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern int padata_set_cpumask(struct padata_instance *pinst, +extern int padata_get_cpumask(struct padata_instance *pinst, + int cpumask_type, struct cpumask *out_mask); +extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_add_cpu(struct padata_instance *pinst, int cpu); -extern int padata_remove_cpu(struct padata_instance *pinst, int cpu); +extern int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); +extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); +extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); extern void padata_stop(struct padata_instance *pinst); +extern int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); +extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock); #endif diff --git a/kernel/padata.c b/kernel/padata.c index 450d67d394b0..84d0ca9dac9c 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -35,9 +35,9 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; - target_cpu = cpumask_first(pd->cpumask); + target_cpu = cpumask_first(pd->cpumask.pcpu); for (cpu = 0; cpu < cpu_index; cpu++) - target_cpu = cpumask_next(target_cpu, pd->cpumask); + target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); return target_cpu; } @@ -53,26 +53,27 @@ static int padata_cpu_hash(struct padata_priv *padata) * Hash the sequence numbers to the cpus by taking * seq_nr mod. number of cpus in use. */ - cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); + cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); return padata_index_to_cpu(pd, cpu_index); } -static void padata_parallel_worker(struct work_struct *work) +static void padata_parallel_worker(struct work_struct *parallel_work) { - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; struct padata_instance *pinst; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, pwork); - pd = queue->pd; + pqueue = container_of(parallel_work, + struct padata_parallel_queue, work); + pd = pqueue->pd; pinst = pd->pinst; - spin_lock(&queue->parallel.lock); - list_replace_init(&queue->parallel.list, &local_list); - spin_unlock(&queue->parallel.lock); + spin_lock(&pqueue->parallel.lock); + list_replace_init(&pqueue->parallel.list, &local_list); + spin_unlock(&pqueue->parallel.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -94,7 +95,7 @@ static void padata_parallel_worker(struct work_struct *work) * @pinst: padata instance * @padata: object to be parallelized * @cb_cpu: cpu the serialization callback function will run on, - * must be in the cpumask of padata. + * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). * * The parallelization callback function will run with BHs off. * Note: Every object which is parallelized by padata_do_parallel @@ -104,7 +105,7 @@ int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu) { int target_cpu, err; - struct padata_queue *queue; + struct padata_parallel_queue *queue; struct parallel_data *pd; rcu_read_lock_bh(); @@ -115,7 +116,7 @@ int padata_do_parallel(struct padata_instance *pinst, if (!(pinst->flags & PADATA_INIT)) goto out; - if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) + if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) goto out; err = -EBUSY; @@ -136,13 +137,13 @@ int padata_do_parallel(struct padata_instance *pinst, padata->seq_nr = atomic_inc_return(&pd->seq_nr); target_cpu = padata_cpu_hash(padata); - queue = per_cpu_ptr(pd->queue, target_cpu); + queue = per_cpu_ptr(pd->pqueue, target_cpu); spin_lock(&queue->parallel.lock); list_add_tail(&padata->list, &queue->parallel.list); spin_unlock(&queue->parallel.lock); - queue_work_on(target_cpu, pinst->wq, &queue->pwork); + queue_work_on(target_cpu, pinst->wq, &queue->work); out: rcu_read_unlock_bh(); @@ -172,11 +173,11 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) { int cpu, num_cpus; int next_nr, next_index; - struct padata_queue *queue, *next_queue; + struct padata_parallel_queue *queue, *next_queue; struct padata_priv *padata; struct padata_list *reorder; - num_cpus = cpumask_weight(pd->cpumask); + num_cpus = cpumask_weight(pd->cpumask.pcpu); /* * Calculate the percpu reorder queue and the sequence @@ -185,13 +186,13 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) next_nr = pd->processed; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); if (unlikely(next_nr > pd->max_seq_nr)) { next_nr = next_nr - pd->max_seq_nr - 1; next_index = next_nr % num_cpus; cpu = padata_index_to_cpu(pd, next_index); - next_queue = per_cpu_ptr(pd->queue, cpu); + next_queue = per_cpu_ptr(pd->pqueue, cpu); pd->processed = 0; } @@ -215,7 +216,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) goto out; } - queue = per_cpu_ptr(pd->queue, smp_processor_id()); + queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); if (queue->cpu_index == next_queue->cpu_index) { padata = ERR_PTR(-ENODATA); goto out; @@ -229,7 +230,7 @@ out: static void padata_reorder(struct parallel_data *pd) { struct padata_priv *padata; - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct padata_instance *pinst = pd->pinst; /* @@ -268,13 +269,13 @@ static void padata_reorder(struct parallel_data *pd) return; } - queue = per_cpu_ptr(pd->queue, padata->cb_cpu); + squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); - spin_lock(&queue->serial.lock); - list_add_tail(&padata->list, &queue->serial.list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_add_tail(&padata->list, &squeue->serial.list); + spin_unlock(&squeue->serial.lock); - queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); + queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); } spin_unlock_bh(&pd->lock); @@ -300,19 +301,19 @@ static void padata_reorder_timer(unsigned long arg) padata_reorder(pd); } -static void padata_serial_worker(struct work_struct *work) +static void padata_serial_worker(struct work_struct *serial_work) { - struct padata_queue *queue; + struct padata_serial_queue *squeue; struct parallel_data *pd; LIST_HEAD(local_list); local_bh_disable(); - queue = container_of(work, struct padata_queue, swork); - pd = queue->pd; + squeue = container_of(serial_work, struct padata_serial_queue, work); + pd = squeue->pd; - spin_lock(&queue->serial.lock); - list_replace_init(&queue->serial.list, &local_list); - spin_unlock(&queue->serial.lock); + spin_lock(&squeue->serial.lock); + list_replace_init(&squeue->serial.list, &local_list); + spin_unlock(&squeue->serial.lock); while (!list_empty(&local_list)) { struct padata_priv *padata; @@ -339,18 +340,18 @@ static void padata_serial_worker(struct work_struct *work) void padata_do_serial(struct padata_priv *padata) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; struct parallel_data *pd; pd = padata->pd; cpu = get_cpu(); - queue = per_cpu_ptr(pd->queue, cpu); + pqueue = per_cpu_ptr(pd->pqueue, cpu); - spin_lock(&queue->reorder.lock); + spin_lock(&pqueue->reorder.lock); atomic_inc(&pd->reorder_objects); - list_add_tail(&padata->list, &queue->reorder.list); - spin_unlock(&queue->reorder.lock); + list_add_tail(&padata->list, &pqueue->reorder.list); + spin_unlock(&pqueue->reorder.lock); put_cpu(); @@ -358,51 +359,88 @@ void padata_do_serial(struct padata_priv *padata) } EXPORT_SYMBOL(padata_do_serial); -/* Allocate and initialize the internal cpumask dependend resources. */ -static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, - const struct cpumask *cpumask) +static int padata_setup_cpumasks(struct parallel_data *pd, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { - int cpu, cpu_index, num_cpus; - struct padata_queue *queue; - struct parallel_data *pd; + if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) + return -ENOMEM; - cpu_index = 0; + cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); + if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pd->cpumask.cbcpu); + return -ENOMEM; + } - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); - if (!pd) - goto err; + cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); + return 0; +} - pd->queue = alloc_percpu(struct padata_queue); - if (!pd->queue) - goto err_free_pd; +static void __padata_list_init(struct padata_list *pd_list) +{ + INIT_LIST_HEAD(&pd_list->list); + spin_lock_init(&pd_list->lock); +} - if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) - goto err_free_queue; +/* Initialize all percpu queues used by serial workers */ +static void padata_init_squeues(struct parallel_data *pd) +{ + int cpu; + struct padata_serial_queue *squeue; - cpumask_and(pd->cpumask, cpumask, cpu_active_mask); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + squeue->pd = pd; + __padata_list_init(&squeue->serial); + INIT_WORK(&squeue->work, padata_serial_worker); + } +} - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); +/* Initialize all percpu queues used by parallel workers */ +static void padata_init_pqueues(struct parallel_data *pd) +{ + int cpu_index, num_cpus, cpu; + struct padata_parallel_queue *pqueue; - queue->pd = pd; + cpu_index = 0; + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + pqueue->pd = pd; + pqueue->cpu_index = cpu_index; + + __padata_list_init(&pqueue->reorder); + __padata_list_init(&pqueue->parallel); + INIT_WORK(&pqueue->work, padata_parallel_worker); + atomic_set(&pqueue->num_obj, 0); + } - queue->cpu_index = cpu_index; - cpu_index++; + num_cpus = cpumask_weight(pd->cpumask.pcpu); + pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; +} - INIT_LIST_HEAD(&queue->reorder.list); - INIT_LIST_HEAD(&queue->parallel.list); - INIT_LIST_HEAD(&queue->serial.list); - spin_lock_init(&queue->reorder.lock); - spin_lock_init(&queue->parallel.lock); - spin_lock_init(&queue->serial.lock); +/* Allocate and initialize the internal cpumask dependend resources. */ +static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) +{ + struct parallel_data *pd; - INIT_WORK(&queue->pwork, padata_parallel_worker); - INIT_WORK(&queue->swork, padata_serial_worker); - } + pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); + if (!pd) + goto err; - num_cpus = cpumask_weight(pd->cpumask); - pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; + pd->pqueue = alloc_percpu(struct padata_parallel_queue); + if (!pd->pqueue) + goto err_free_pd; + + pd->squeue = alloc_percpu(struct padata_serial_queue); + if (!pd->squeue) + goto err_free_pqueue; + if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) + goto err_free_squeue; + padata_init_pqueues(pd); + padata_init_squeues(pd); setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); atomic_set(&pd->seq_nr, -1); atomic_set(&pd->reorder_objects, 0); @@ -412,8 +450,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, return pd; -err_free_queue: - free_percpu(pd->queue); +err_free_squeue: + free_percpu(pd->squeue); +err_free_pqueue: + free_percpu(pd->pqueue); err_free_pd: kfree(pd); err: @@ -422,8 +462,10 @@ err: static void padata_free_pd(struct parallel_data *pd) { - free_cpumask_var(pd->cpumask); - free_percpu(pd->queue); + free_cpumask_var(pd->cpumask.pcpu); + free_cpumask_var(pd->cpumask.cbcpu); + free_percpu(pd->pqueue); + free_percpu(pd->squeue); kfree(pd); } @@ -431,11 +473,12 @@ static void padata_free_pd(struct parallel_data *pd) static void padata_flush_queues(struct parallel_data *pd) { int cpu; - struct padata_queue *queue; + struct padata_parallel_queue *pqueue; + struct padata_serial_queue *squeue; - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->pwork); + for_each_cpu(cpu, pd->cpumask.pcpu) { + pqueue = per_cpu_ptr(pd->pqueue, cpu); + flush_work(&pqueue->work); } del_timer_sync(&pd->timer); @@ -443,9 +486,9 @@ static void padata_flush_queues(struct parallel_data *pd) if (atomic_read(&pd->reorder_objects)) padata_reorder(pd); - for_each_cpu(cpu, pd->cpumask) { - queue = per_cpu_ptr(pd->queue, cpu); - flush_work(&queue->swork); + for_each_cpu(cpu, pd->cpumask.cbcpu) { + squeue = per_cpu_ptr(pd->squeue, cpu); + flush_work(&squeue->work); } BUG_ON(atomic_read(&pd->refcnt) != 0); @@ -475,21 +518,63 @@ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { struct parallel_data *pd_old = pinst->pd; + int notification_mask = 0; pinst->flags |= PADATA_RESET; rcu_assign_pointer(pinst->pd, pd_new); synchronize_rcu(); + if (!pd_old) + goto out; - if (pd_old) { - padata_flush_queues(pd_old); - padata_free_pd(pd_old); - } + padata_flush_queues(pd_old); + if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) + notification_mask |= PADATA_CPU_PARALLEL; + if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) + notification_mask |= PADATA_CPU_SERIAL; + + padata_free_pd(pd_old); + if (notification_mask) + blocking_notifier_call_chain(&pinst->cpumask_change_notifier, + notification_mask, pinst); +out: pinst->flags &= ~PADATA_RESET; } +/** + * padata_register_cpumask_notifier - Registers a notifier that will be called + * if either pcpu or cbcpu or both cpumasks change. + * + * @pinst: A poineter to padata instance + * @nblock: A pointer to notifier block. + */ +int padata_register_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_register_cpumask_notifier); + +/** + * padata_unregister_cpumask_notifier - Unregisters cpumask notifier + * registered earlier using padata_register_cpumask_notifier + * + * @pinst: A pointer to data instance. + * @nlock: A pointer to notifier block. + */ +int padata_unregister_cpumask_notifier(struct padata_instance *pinst, + struct notifier_block *nblock) +{ + return blocking_notifier_chain_unregister( + &pinst->cpumask_change_notifier, + nblock); +} +EXPORT_SYMBOL(padata_unregister_cpumask_notifier); + + /* If cpumask contains no active cpu, we mark the instance as invalid. */ static bool padata_validate_cpumask(struct padata_instance *pinst, const struct cpumask *cpumask) @@ -504,13 +589,82 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, } /** - * padata_set_cpumask - set the cpumask that padata should use + * padata_get_cpumask: Fetch serial or parallel cpumask from the + * given padata instance and copy it to @out_mask + * + * @pinst: A pointer to padata instance + * @cpumask_type: Specifies which cpumask will be copied. + * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL + * corresponding to serial and parallel cpumask respectively. + * @out_mask: A pointer to cpumask structure where selected + * cpumask will be copied. + */ +int padata_get_cpumask(struct padata_instance *pinst, + int cpumask_type, struct cpumask *out_mask) +{ + struct parallel_data *pd; + int ret = 0; + + rcu_read_lock_bh(); + pd = rcu_dereference(pinst->pd); + switch (cpumask_type) { + case PADATA_CPU_SERIAL: + cpumask_copy(out_mask, pd->cpumask.cbcpu); + break; + case PADATA_CPU_PARALLEL: + cpumask_copy(out_mask, pd->cpumask.pcpu); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock_bh(); + return ret; +} +EXPORT_SYMBOL(padata_get_cpumask); + +/** + * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value + * equivalent to @cpumask. * * @pinst: padata instance + * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding + * to parallel and serial cpumasks respectively. * @cpumask: the cpumask to use */ -int padata_set_cpumask(struct padata_instance *pinst, - cpumask_var_t cpumask) +int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, + cpumask_var_t cpumask) +{ + struct cpumask *serial_mask, *parallel_mask; + + switch (cpumask_type) { + case PADATA_CPU_PARALLEL: + serial_mask = pinst->cpumask.cbcpu; + parallel_mask = cpumask; + break; + case PADATA_CPU_SERIAL: + parallel_mask = pinst->cpumask.pcpu; + serial_mask = cpumask; + break; + default: + return -EINVAL; + } + + return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); +} +EXPORT_SYMBOL(padata_set_cpumask); + +/** + * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, cpumask_var_t cbcpumask) { int valid; int err = 0; @@ -518,7 +672,13 @@ int padata_set_cpumask(struct padata_instance *pinst, mutex_lock(&pinst->lock); - valid = padata_validate_cpumask(pinst, cpumask); + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); if (!valid) { __padata_stop(pinst); goto out_replace; @@ -526,14 +686,15 @@ int padata_set_cpumask(struct padata_instance *pinst, get_online_cpus(); - pd = padata_alloc_pd(pinst, cpumask); + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); if (!pd) { err = -ENOMEM; goto out; } out_replace: - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); padata_replace(pinst, pd); @@ -546,41 +707,57 @@ out: mutex_unlock(&pinst->lock); return err; + } -EXPORT_SYMBOL(padata_set_cpumask); +EXPORT_SYMBOL(__padata_set_cpumasks); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { struct parallel_data *pd; if (cpumask_test_cpu(cpu, cpu_active_mask)) { - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; padata_replace(pinst, pd); - if (padata_validate_cpumask(pinst, pinst->cpumask)) + if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && + padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) __padata_start(pinst); } return 0; } -/** - * padata_add_cpu - add a cpu to the padata cpumask + /** + * padata_add_cpu - add a cpu to one or both(parallel and serial) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to add + * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_add_cpu(struct padata_instance *pinst, int cpu) + +int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_set_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_set_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_add_cpu(pinst, cpu); put_online_cpus(); @@ -596,13 +773,15 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) if (cpumask_test_cpu(cpu, cpu_online_mask)) { - if (!padata_validate_cpumask(pinst, pinst->cpumask)) { + if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || + !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { __padata_stop(pinst); padata_replace(pinst, pd); goto out; } - pd = padata_alloc_pd(pinst, pinst->cpumask); + pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, + pinst->cpumask.cbcpu); if (!pd) return -ENOMEM; @@ -613,20 +792,32 @@ out: return 0; } -/** - * padata_remove_cpu - remove a cpu from the padata cpumask + /** + * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) + * padata cpumasks. * * @pinst: padata instance * @cpu: cpu to remove + * @mask: bitmask specifying from which cpumask @cpu should be removed + * The @mask may be any combination of the following flags: + * PADATA_CPU_SERIAL - serial cpumask + * PADATA_CPU_PARALLEL - parallel cpumask */ -int padata_remove_cpu(struct padata_instance *pinst, int cpu) +int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) { int err; + if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) + return -EINVAL; + mutex_lock(&pinst->lock); get_online_cpus(); - cpumask_clear_cpu(cpu, pinst->cpumask); + if (mask & PADATA_CPU_SERIAL) + cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); + if (mask & PADATA_CPU_PARALLEL) + cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); + err = __padata_remove_cpu(pinst, cpu); put_online_cpus(); @@ -672,6 +863,14 @@ void padata_stop(struct padata_instance *pinst) EXPORT_SYMBOL(padata_stop); #ifdef CONFIG_HOTPLUG_CPU + +static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) +{ + return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || + cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); +} + + static int padata_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -684,7 +883,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_add_cpu(pinst, cpu); @@ -695,7 +894,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); err = __padata_remove_cpu(pinst, cpu); @@ -706,7 +905,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_remove_cpu(pinst, cpu); @@ -714,7 +913,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - if (!cpumask_test_cpu(cpu, pinst->cpumask)) + if (!pinst_has_cpu(pinst, cpu)) break; mutex_lock(&pinst->lock); __padata_add_cpu(pinst, cpu); @@ -726,13 +925,29 @@ static int padata_cpu_callback(struct notifier_block *nfb, #endif /** - * padata_alloc - allocate and initialize a padata instance + * padata_alloc - Allocate and initialize padata instance. + * Use default cpumask(cpu_possible_mask) + * for serial and parallel workes. + * + * @wq: workqueue to use for the allocated padata instance + */ +struct padata_instance *padata_alloc(struct workqueue_struct *wq) +{ + return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); +} +EXPORT_SYMBOL(padata_alloc); + +/** + * __padata_alloc - allocate and initialize a padata instance + * and specify cpumasks for serial and parallel workers. * - * @cpumask: cpumask that padata uses for parallelization * @wq: workqueue to use for the allocated padata instance + * @pcpumask: cpumask that will be used for padata parallelization + * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *padata_alloc(const struct cpumask *cpumask, - struct workqueue_struct *wq) +struct padata_instance *__padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -742,21 +957,26 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, goto err; get_online_cpus(); - - if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) + if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) + goto err_free_inst; + if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { + free_cpumask_var(pinst->cpumask.pcpu); goto err_free_inst; - - if (padata_validate_cpumask(pinst, cpumask)) { - pd = padata_alloc_pd(pinst, cpumask); - if (!pd) - goto err_free_mask; } + if (!padata_validate_cpumask(pinst, pcpumask) || + !padata_validate_cpumask(pinst, cbcpumask)) + goto err_free_masks; + + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + goto err_free_masks; rcu_assign_pointer(pinst->pd, pd); pinst->wq = wq; - cpumask_copy(pinst->cpumask, cpumask); + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); pinst->flags = 0; @@ -768,19 +988,21 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, put_online_cpus(); + BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); mutex_init(&pinst->lock); return pinst; -err_free_mask: - free_cpumask_var(pinst->cpumask); +err_free_masks: + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); err_free_inst: kfree(pinst); put_online_cpus(); err: return NULL; } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(__padata_alloc); /** * padata_free - free a padata instance @@ -795,7 +1017,8 @@ void padata_free(struct padata_instance *pinst) padata_stop(pinst); padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); kfree(pinst); } EXPORT_SYMBOL(padata_free); -- cgit v1.2.3 From 5e017dc3f8bc9e4a28983666e6bc00114a2018bb Mon Sep 17 00:00:00 2001 From: Dan Kruchinin Date: Wed, 14 Jul 2010 14:33:08 +0400 Subject: padata: Added sysfs primitives to padata subsystem Added sysfs primitives to padata subsystem. Now API user may embedded kobject each padata instance contains into any sysfs hierarchy. For now padata sysfs interface provides only two objects: serial_cpumask [RW] - cpumask for serial workers parallel_cpumask [RW] - cpumask for parallel workers Signed-off-by: Dan Kruchinin Signed-off-by: Herbert Xu --- include/linux/padata.h | 5 +- kernel/padata.c | 155 ++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 150 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 621e7736690c..293ad46ffced 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -26,6 +26,7 @@ #include #include #include +#include #define PADATA_CPU_SERIAL 0x01 #define PADATA_CPU_PARALLEL 0x02 @@ -142,7 +143,8 @@ struct parallel_data { * cbcpu for parallel and serial works respectivly. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu - * or both cpumasks change. + * or both cpumasks change. + * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ @@ -155,6 +157,7 @@ struct padata_instance { cpumask_var_t cbcpu; } cpumask; struct blocking_notifier_head cpumask_change_notifier; + struct kobject kobj; struct mutex lock; u8 flags; #define PADATA_INIT 1 diff --git a/kernel/padata.c b/kernel/padata.c index 84d0ca9dac9c..526f9ea2fcc8 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #define MAX_SEQ_NR (INT_MAX - NR_CPUS) @@ -924,6 +925,149 @@ static int padata_cpu_callback(struct notifier_block *nfb, } #endif +static void __padata_free(struct padata_instance *pinst) +{ +#ifdef CONFIG_HOTPLUG_CPU + unregister_hotcpu_notifier(&pinst->cpu_notifier); +#endif + + padata_stop(pinst); + padata_free_pd(pinst->pd); + free_cpumask_var(pinst->cpumask.pcpu); + free_cpumask_var(pinst->cpumask.cbcpu); + kfree(pinst); +} + +#define kobj2pinst(_kobj) \ + container_of(_kobj, struct padata_instance, kobj) +#define attr2pentry(_attr) \ + container_of(_attr, struct padata_sysfs_entry, attr) + +static void padata_sysfs_release(struct kobject *kobj) +{ + struct padata_instance *pinst = kobj2pinst(kobj); + __padata_free(pinst); +} + +struct padata_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct padata_instance *, struct attribute *, char *); + ssize_t (*store)(struct padata_instance *, struct attribute *, + const char *, size_t); +}; + +static ssize_t show_cpumask(struct padata_instance *pinst, + struct attribute *attr, char *buf) +{ + struct cpumask *cpumask; + ssize_t len; + + mutex_lock(&pinst->lock); + if (!strcmp(attr->name, "serial_cpumask")) + cpumask = pinst->cpumask.cbcpu; + else + cpumask = pinst->cpumask.pcpu; + + len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), + nr_cpu_ids); + if (PAGE_SIZE - len < 2) + len = -EINVAL; + else + len += sprintf(buf + len, "\n"); + + mutex_unlock(&pinst->lock); + return len; +} + +static ssize_t store_cpumask(struct padata_instance *pinst, + struct attribute *attr, + const char *buf, size_t count) +{ + cpumask_var_t new_cpumask; + ssize_t ret; + int mask_type; + + if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) + return -ENOMEM; + + ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), + nr_cpumask_bits); + if (ret < 0) + goto out; + + mask_type = !strcmp(attr->name, "serial_cpumask") ? + PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; + ret = padata_set_cpumask(pinst, mask_type, new_cpumask); + if (!ret) + ret = count; + +out: + free_cpumask_var(new_cpumask); + return ret; +} + +#define PADATA_ATTR_RW(_name, _show_name, _store_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0644, _show_name, _store_name) +#define PADATA_ATTR_RO(_name, _show_name) \ + static struct padata_sysfs_entry _name##_attr = \ + __ATTR(_name, 0400, _show_name, NULL) + +PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); +PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); + +/* + * Padata sysfs provides the following objects: + * serial_cpumask [RW] - cpumask for serial workers + * parallel_cpumask [RW] - cpumask for parallel workers + */ +static struct attribute *padata_default_attrs[] = { + &serial_cpumask_attr.attr, + ¶llel_cpumask_attr.attr, + NULL, +}; + +static ssize_t padata_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->show(pinst, attr, buf); + + return ret; +} + +static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct padata_instance *pinst; + struct padata_sysfs_entry *pentry; + ssize_t ret = -EIO; + + pinst = kobj2pinst(kobj); + pentry = attr2pentry(attr); + if (pentry->show) + ret = pentry->store(pinst, attr, buf, count); + + return ret; +} + +static const struct sysfs_ops padata_sysfs_ops = { + .show = padata_sysfs_show, + .store = padata_sysfs_store, +}; + +static struct kobj_type padata_attr_type = { + .sysfs_ops = &padata_sysfs_ops, + .default_attrs = padata_default_attrs, + .release = padata_sysfs_release, +}; + /** * padata_alloc - Allocate and initialize padata instance. * Use default cpumask(cpu_possible_mask) @@ -989,6 +1133,7 @@ struct padata_instance *__padata_alloc(struct workqueue_struct *wq, put_online_cpus(); BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); + kobject_init(&pinst->kobj, &padata_attr_type); mutex_init(&pinst->lock); return pinst; @@ -1011,14 +1156,6 @@ EXPORT_SYMBOL(__padata_alloc); */ void padata_free(struct padata_instance *pinst) { -#ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&pinst->cpu_notifier); -#endif - - padata_stop(pinst); - padata_free_pd(pinst->pd); - free_cpumask_var(pinst->cpumask.pcpu); - free_cpumask_var(pinst->cpumask.cbcpu); - kfree(pinst); + kobject_put(&pinst->kobj); } EXPORT_SYMBOL(padata_free); -- cgit v1.2.3 From 7a2e3659b6ffbee4e742cd6f6a60359ad9148720 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Tue, 13 Jul 2010 05:53:44 -0400 Subject: reiserfs: typo comment fix Fix trivial typo in code comment (change adn for and), also change comment style for proper coding style. Signed-off-by: Davidlohr Bueso Signed-off-by: Jiri Kosina --- include/linux/reiserfs_fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 3b603f474186..ba394163dea1 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -360,7 +360,7 @@ int is_reiserfs_jr(struct reiserfs_super_block *rs); /* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */ #define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024) -// reiserfs internal error code (used by search_by_key adn fix_nodes)) +/* reiserfs internal error code (used by search_by_key and fix_nodes)) */ #define CARRY_ON 0 #define REPEAT_SEARCH -1 #define IO_ERROR -2 -- cgit v1.2.3 From 3a343ee4509c982552b35fbc99d3213f3bb1acde Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 12 Jul 2010 19:28:27 +0200 Subject: HID: add HID_QUIRK_HIDINPUT_FORCE For devices with exotic HID report descriptors, it might be necessary to make the HID core force the registration of an input device. Make that possible by introducing a new quirk type. Signed-off-by: Daniel Mack Cc: Jiri Kosina Cc: Dmitry Torokhov Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 2 ++ include/linux/hid.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 866e54ec5fb2..7ccee899b59e 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1157,6 +1157,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); + if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) + connect_mask |= HID_CONNECT_HIDINPUT_FORCE; if (hdev->bus != BUS_USB) connect_mask &= ~HID_CONNECT_HIDDEV; if (hid_hiddev(hdev)) diff --git a/include/linux/hid.h b/include/linux/hid.h index 895001f7f4b2..42a0f1d11365 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -311,6 +311,7 @@ struct hid_item { #define HID_QUIRK_HIDDEV_FORCE 0x00000010 #define HID_QUIRK_BADPAD 0x00000020 #define HID_QUIRK_MULTI_INPUT 0x00000040 +#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 -- cgit v1.2.3 From 772a2f9b488f4d27c314da5eeabde750b9ead41b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 15 Jul 2010 10:39:47 +0200 Subject: fb: handle allocation failure in alloc_apertures() If the kzalloc() fails we should return NULL. All the places that call alloc_apertures() check for this already. Signed-off-by: Dan Carpenter Acked-by: James Simmons Acked-by: Marcin Slusarz Signed-off-by: Dave Airlie --- include/linux/fb.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/fb.h b/include/linux/fb.h index 8e5a9dfb76bf..e7445df44d6c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -873,6 +873,8 @@ struct fb_info { static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) + max_num * sizeof(struct aperture), GFP_KERNEL); + if (!a) + return NULL; a->count = max_num; return a; } -- cgit v1.2.3 From 07fca0e57fca925032526349f4370f97ed580cc9 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sat, 10 Jul 2010 08:35:00 +0200 Subject: tracing: Properly align linker defined symbols We define a number of symbols in the linker scipt like this: __start_syscalls_metadata = .; *(__syscalls_metadata) But we do not know the alignment of "." when we assign the __start_syscalls_metadata symbol. gcc started to uses bigger alignment for structs (32 bytes), so we saw situations where the linker due to alignment constraints increased the value of "." after the symbol assignment. This resulted in boot fails. Fix this by forcing a 32 byte alignment of "." before the assignment. This patch introduces the forced alignment for ftrace_events and syscalls_metadata. It may be required in more places. Reported-by: Zeev Tarantov Signed-off-by: Sam Ravnborg LKML-Reference: <20100710063459.GA14596@merkur.ravnborg.org> Cc: Frederic Weisbecker Signed-off-by: Steven Rostedt --- include/asm-generic/vmlinux.lds.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 48c5299cbf26..4b5902ad0d5d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -63,6 +63,12 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* + * Align to a 32 byte boundary equal to the + * alignment gcc 4.5 uses for a struct + */ +#define STRUCT_ALIGN() . = ALIGN(32) + /* The actual configuration determine if the init/exit sections * are handled as text/data or they can be discarded (which * often happens at runtime) @@ -166,7 +172,11 @@ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ + \ + STRUCT_ALIGN(); \ FTRACE_EVENTS() \ + \ + STRUCT_ALIGN(); \ TRACE_SYSCALLS() /* -- cgit v1.2.3 From 844b9a8707f1fcf0482e0c52f44a555e799ccda6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 20 Jul 2010 13:24:34 -0700 Subject: vfs: fix RCU-lockdep false positive due to /proc If a single-threaded process does a file-descriptor operation, and some other process accesses that same file descriptor via /proc, the current rcu_dereference_check_fdtable() can give a false-positive RCU-lockdep splat due to the reference count being increased by the /proc access after the reference-count check in fget_light() but before the check in rcu_dereference_check_fdtable(). This commit prevents this false positive by checking for a single-threaded process. To avoid #include hell, this commit uses the wrapper for thread_group_empty(current) defined by rcu_my_thread_group_empty() provided in a separate commit. Located-by: Miles Lane Located-by: Eric Dumazet Signed-off-by: Paul E. McKenney Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fdtable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 013dc529e95f..d147461bc271 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -61,7 +61,8 @@ struct files_struct { (rcu_dereference_check((fdtfd), \ rcu_read_lock_held() || \ lockdep_is_held(&(files)->file_lock) || \ - atomic_read(&(files)->count) == 1)) + atomic_read(&(files)->count) == 1 || \ + rcu_my_thread_group_empty())) #define files_fdtable(files) \ (rcu_dereference_check_fdtable((files), (files)->fdt)) -- cgit v1.2.3 From a6a1a095ec8ace2912fc280d371eee8ff5da5736 Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Tue, 20 Jul 2010 15:22:25 -0700 Subject: include/linux/vgaarb.h: add missing part of include guard vgaarb.h was missing the #define of the #ifndef at the top for the guard to prevent multiple #include's from causing re-define errors Signed-off-by: Doug Goldstein Cc: Dave Airlie Cc: Jesse Barnes Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- include/linux/vgaarb.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index c9a975976995..814f294d4cd0 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -29,6 +29,7 @@ */ #ifndef LINUX_VGA_H +#define LINUX_VGA_H #include -- cgit v1.2.3 From f8324e20f8289dffc646d64366332e05eaacab25 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Tue, 20 Jul 2010 18:45:14 -0700 Subject: math-emu: correct test for downshifting fraction in _FP_FROM_INT() The kernel's math-emu code contains a macro _FP_FROM_INT() which is used to convert an integer to a raw normalized floating-point value. It does this basically in three steps: 1. Compute the exponent from the number of leading zero bits. 2. Downshift large fractions to put the MSB in the right position for normalized fractions. 3. Upshift small fractions to put the MSB in the right position. There is an boundary error in step 2, causing a fraction with its MSB exactly one bit above the normalized MSB position to not be downshifted. This results in a non-normalized raw float, which when packed becomes a massively inaccurate representation for that input. The impact of this depends on a number of arch-specific factors, but it is known to have broken emulation of FXTOD instructions on UltraSPARC III, which was originally reported as GCC bug 44631 . Any arch which uses math-emu to emulate conversions from integers to same-size floats may be affected. The fix is simple: the exponent comparison used to determine if the fraction should be downshifted must be "<=" not "<". I'm sending a kernel module to test this as a reply to this message. There are also SPARC user-space test cases in the GCC bug entry. Signed-off-by: Mikael Pettersson Signed-off-by: David S. Miller --- include/math-emu/op-common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index fd882261225e..9696a5e2c437 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -799,7 +799,7 @@ do { \ X##_e -= (_FP_W_TYPE_SIZE - rsize); \ X##_e = rsize - X##_e - 1; \ \ - if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs < X##_e) \ + if (_FP_FRACBITS_##fs < rsize && _FP_WFRACBITS_##fs <= X##_e) \ __FP_FRAC_SRS_1(ur_, (X##_e - _FP_WFRACBITS_##fs + 1), rsize);\ _FP_FRAC_DISASSEMBLE_##wc(X, ur_, rsize); \ if ((_FP_WFRACBITS_##fs - X##_e - 1) > 0) \ -- cgit v1.2.3 From edd63cb6b91024332d6983fc51058ac1ef0c081e Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Wed, 21 Jul 2010 19:27:07 -0500 Subject: sysrq,kdb: Use __handle_sysrq() for kdb's sysrq function The kdb code should not toggle the sysrq state in case an end user wants to try and resume the normal kernel execution. Signed-off-by: Jason Wessel Acked-by: Dmitry Torokhov --- drivers/char/sysrq.c | 2 +- include/linux/sysrq.h | 1 + kernel/debug/kdb/kdb_main.c | 3 +-- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 5d64e3acb000..878ac0c2cc68 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -493,7 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p) sysrq_key_table[i] = op_p; } -static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) +void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) { struct sysrq_key_op *op_p; int orig_log_level; diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 4496322e28dd..609e8ca5f534 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -45,6 +45,7 @@ struct sysrq_key_op { */ void handle_sysrq(int key, struct tty_struct *tty); +void __handle_sysrq(int key, struct tty_struct *tty, int check_mask); int register_sysrq_key(int key, struct sysrq_key_op *op); int unregister_sysrq_key(int key, struct sysrq_key_op *op); struct sysrq_key_op *__sysrq_get_key_op(int key); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 7e9bfd54a0db..ebe4a287419e 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1820,9 +1820,8 @@ static int kdb_sr(int argc, const char **argv) { if (argc != 1) return KDB_ARGCOUNT; - sysrq_toggle_support(1); kdb_trap_printk++; - handle_sysrq(*argv[1], NULL); + __handle_sysrq(*argv[1], NULL, 0); kdb_trap_printk--; return 0; -- cgit v1.2.3 From 718be4aaf3613cf7c2d097f925abc3d3553c0605 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Thu, 22 Jul 2010 16:54:27 -0400 Subject: ACPI: skip checking BM_STS if the BIOS doesn't ask for it It turns out that there is a bit in the _CST for Intel FFH C3 that tells the OS if we should be checking BM_STS or not. Linux has been unconditionally checking BM_STS. If the chip-set is configured to enable BM_STS, it can retard or completely prevent entry into deep C-states -- as illustrated by turbostat: http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/ ref: Intel Processor Vendor-Specific ACPI Interface Specification table 4 "_CST FFH GAS Field Encoding" Bit 1: Set to 1 if OSPM should use Bus Master avoidance for this C-state https://bugzilla.kernel.org/show_bug.cgi?id=15886 Signed-off-by: Len Brown --- arch/x86/kernel/acpi/cstate.c | 9 +++++++++ drivers/acpi/processor_idle.c | 2 +- include/acpi/processor.h | 3 ++- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 2e837f5080fe..fb7a5f052e2b 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, percpu_entry->states[cx->index].eax = cx->address; percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; } + + /* + * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared, + * then we should skip checking BM_STS for this C-state. + * ref: "Intel Processor Vendor-Specific ACPI Interface Specification" + */ + if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) + cx->bm_sts_skip = 1; + return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b1b385692f46..b351342f1faf 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -947,7 +947,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); - if (acpi_idle_bm_check()) { + if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (dev->safe_state) { dev->last_state = dev->safe_state; return dev->safe_state->enter(dev, dev->safe_state); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index da565a48240e..a68ca8a11a53 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -48,7 +48,7 @@ struct acpi_power_register { u8 space_id; u8 bit_width; u8 bit_offset; - u8 reserved; + u8 access_size; u64 address; } __attribute__ ((packed)); @@ -63,6 +63,7 @@ struct acpi_processor_cx { u32 power; u32 usage; u64 time; + u8 bm_sts_skip; char desc[ACPI_CX_DESC_LEN]; }; -- cgit v1.2.3 From da5e37efe8704fc2b354626467f80f73c5e3c020 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Tue, 13 Jul 2010 11:39:42 +0200 Subject: vmlinux.lds: fix .data..init_task output section (fix popwerpc boot) The .data..init_task output section was missing a load offset causing a popwerpc target to fail to boot. Sean MacLennan tracked it down to the definition of INIT_TASK_DATA_SECTION(). There are only two users of INIT_TASK_DATA_SECTION() in the kernel today: cris and popwerpc. cris do not support relocatable kernels and is thus not impacted by this change. Fix INIT_TASK_DATA_SECTION() to specify load offset like all other output sections. Reported-by: Sean MacLennan Signed-off-by: Sam Ravnborg Signed-off-by: Benjamin Herrenschmidt --- include/asm-generic/vmlinux.lds.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 48c5299cbf26..cdfff74e9739 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data..init_task : { \ + .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ INIT_TASK_DATA(align) \ } -- cgit v1.2.3 From 72ad5d77fb981963edae15eee8196c80238f5ed0 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 23 Jul 2010 22:59:09 +0200 Subject: ACPI / Sleep: Allow the NVS saving to be skipped during suspend to RAM Commit 2a6b69765ad794389f2fc3e14a0afa1a995221c2 (ACPI: Store NVS state even when entering suspend to RAM) caused the ACPI suspend code save the NVS area during suspend and restore it during resume unconditionally, although it is known that some systems need to use acpi_sleep=s4_nonvs for hibernation to work. To allow the affected systems to avoid saving and restoring the NVS area during suspend to RAM and resume, introduce kernel command line option acpi_sleep=nonvs and make acpi_sleep=s4_nonvs work as its alias temporarily (add acpi_sleep=s4_nonvs to the feature removal file). Addresses https://bugzilla.kernel.org/show_bug.cgi?id=16396 . Signed-off-by: Rafael J. Wysocki Reported-and-tested-by: tomas m Signed-off-by: Len Brown --- Documentation/feature-removal-schedule.txt | 7 ++++++ Documentation/kernel-parameters.txt | 4 ++-- arch/x86/kernel/acpi/sleep.c | 9 ++++++-- drivers/acpi/sleep.c | 35 +++++++++++++++--------------- include/linux/acpi.h | 2 +- 5 files changed, 34 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index c268783bc4e7..1571c0c83dba 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -647,3 +647,10 @@ Who: Stefan Richter ---------------------------- +What: The acpi_sleep=s4_nonvs command line option +When: 2.6.37 +Files: arch/x86/kernel/acpi/sleep.c +Why: superseded by acpi_sleep=nonvs +Who: Rafael J. Wysocki + +---------------------------- diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4ddb58df081e..2b2407d9a6d0 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -254,8 +254,8 @@ and is between 256 and 4096 characters. It is defined in the file control method, with respect to putting devices into low power states, to be enforced (the ACPI 2.0 ordering of _PTS is used by default). - s4_nonvs prevents the kernel from saving/restoring the - ACPI NVS memory during hibernation. + nonvs prevents the kernel from saving/restoring the + ACPI NVS memory during suspend/hibernation and resume. sci_force_enable causes the kernel to set SCI_EN directly on resume from S1/S3 (which is against the ACPI spec, but some broken systems don't work without it). diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 82e508677b91..fcc3c61fdecc 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -157,9 +157,14 @@ static int __init acpi_sleep_setup(char *str) #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_nohwsig", 10) == 0) acpi_no_s4_hw_signature(); - if (strncmp(str, "s4_nonvs", 8) == 0) - acpi_s4_no_nvs(); + if (strncmp(str, "s4_nonvs", 8) == 0) { + pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " + "please use acpi_sleep=nonvs instead"); + acpi_nvs_nosave(); + } #endif + if (strncmp(str, "nonvs", 5) == 0) + acpi_nvs_nosave(); if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); str = strchr(str, ','); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 5b7c52e4a00f..2862c781b372 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -81,6 +81,20 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; +/* + * The ACPI specification wants us to save NVS memory regions during hibernation + * and to restore them during the subsequent resume. Windows does that also for + * suspend to RAM. However, it is known that this mechanism does not work on + * all machines, so we allow the user to disable it with the help of the + * 'acpi_sleep=nonvs' kernel command line option. + */ +static bool nvs_nosave; + +void __init acpi_nvs_nosave(void) +{ + nvs_nosave = true; +} + /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * user to request that behavior by using the 'acpi_old_suspend_ordering' @@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) u32 acpi_state = acpi_suspend_states[pm_state]; int error = 0; - error = suspend_nvs_alloc(); - + error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (error) return error; @@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION -/* - * The ACPI specification wants us to save NVS memory regions during hibernation - * and to restore them during the subsequent resume. However, it is not certain - * if this mechanism is going to work on all machines, so we allow the user to - * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line - * option. - */ -static bool s4_no_nvs; - -void __init acpi_s4_no_nvs(void) -{ - s4_no_nvs = true; -} - static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; static bool nosigcheck; @@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void) { int error; - error = s4_no_nvs ? 0 : suspend_nvs_alloc(); + error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_sleep_tts_switch(acpi_target_sleep_state); @@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void) error = acpi_sleep_prepare(ACPI_STATE_S4); if (!error) { - if (!s4_no_nvs) + if (!nvs_nosave) error = suspend_nvs_alloc(); if (!error) acpi_target_sleep_state = ACPI_STATE_S4; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 224a38c960d4..ccf94dc5acdf 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -253,7 +253,7 @@ int acpi_resources_are_enforced(void); #ifdef CONFIG_PM_SLEEP void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); -void __init acpi_s4_no_nvs(void); +void __init acpi_nvs_nosave(void); #endif /* CONFIG_PM_SLEEP */ struct acpi_osc_context { -- cgit v1.2.3 From ba9f507a1bea5ca2fc4a19e227c56b60fd5faca3 Mon Sep 17 00:00:00 2001 From: Xiaolong Chen Date: Mon, 26 Jul 2010 01:01:11 -0700 Subject: Input: adp5588-keys - export unused GPIO pins This patch allows exporting GPIO pins not used by the keypad itself to be accessible from elsewhere. Signed-off-by: Xiaolong Chen Signed-off-by: Yuanbo Ye Signed-off-by: Tao Hu Acked-by: Michael Hennerich Signed-off-by: Dmitry Torokhov --- drivers/input/keyboard/adp5588-keys.c | 209 +++++++++++++++++++++++++++++++++- include/linux/i2c/adp5588.h | 1 + 2 files changed, 208 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 9096db73c3c8..c39ec93c0c58 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,10 @@ #define KEYP_MAX_EVENT 10 +#define MAXGPIO 18 +#define ADP_BANK(offs) ((offs) >> 3) +#define ADP_BIT(offs) (1u << ((offs) & 0x7)) + /* * Early pre 4.0 Silicon required to delay readout by at least 25ms, * since the Event Counter Register updated 25ms after the interrupt @@ -69,6 +74,14 @@ struct adp5588_kpad { unsigned short keycode[ADP5588_KEYMAPSIZE]; const struct adp5588_gpi_map *gpimap; unsigned short gpimapsize; +#ifdef CONFIG_GPIOLIB + unsigned char gpiomap[MAXGPIO]; + bool export_gpio; + struct gpio_chip gc; + struct mutex gpio_lock; /* Protect cached dir, dat_out */ + u8 dat_out[3]; + u8 dir[3]; +#endif }; static int adp5588_read(struct i2c_client *client, u8 reg) @@ -86,6 +99,183 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val) return i2c_smbus_write_byte_data(client, reg, val); } +#ifdef CONFIG_GPIOLIB +static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) +{ + struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); + unsigned int bank = ADP_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + + return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); +} + +static void adp5588_gpio_set_value(struct gpio_chip *chip, + unsigned off, int val) +{ + struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); + unsigned int bank = ADP_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + + mutex_lock(&kpad->gpio_lock); + + if (val) + kpad->dat_out[bank] |= bit; + else + kpad->dat_out[bank] &= ~bit; + + adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, + kpad->dat_out[bank]); + + mutex_unlock(&kpad->gpio_lock); +} + +static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) +{ + struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); + unsigned int bank = ADP_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + int ret; + + mutex_lock(&kpad->gpio_lock); + + kpad->dir[bank] &= ~bit; + ret = adp5588_write(kpad->client, GPIO_DIR1 + bank, kpad->dir[bank]); + + mutex_unlock(&kpad->gpio_lock); + + return ret; +} + +static int adp5588_gpio_direction_output(struct gpio_chip *chip, + unsigned off, int val) +{ + struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); + unsigned int bank = ADP_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + int ret; + + mutex_lock(&kpad->gpio_lock); + + kpad->dir[bank] |= bit; + + if (val) + kpad->dat_out[bank] |= bit; + else + kpad->dat_out[bank] &= ~bit; + + ret = adp5588_write(kpad->client, GPIO_DAT_OUT1 + bank, + kpad->dat_out[bank]); + ret |= adp5588_write(kpad->client, GPIO_DIR1 + bank, + kpad->dir[bank]); + + mutex_unlock(&kpad->gpio_lock); + + return ret; +} + +static int __devinit adp5588_gpio_add(struct device *dev) +{ + struct adp5588_kpad *kpad = dev_get_drvdata(dev); + const struct adp5588_kpad_platform_data *pdata = dev->platform_data; + const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data; + int i, error; + + if (gpio_data) { + int j = 0; + bool pin_used[MAXGPIO]; + + for (i = 0; i < pdata->rows; i++) + pin_used[i] = true; + + for (i = 0; i < pdata->cols; i++) + pin_used[i + GPI_PIN_COL_BASE - GPI_PIN_BASE] = true; + + for (i = 0; i < kpad->gpimapsize; i++) + pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; + + for (i = 0; i < MAXGPIO; i++) { + if (!pin_used[i]) + kpad->gpiomap[j++] = i; + } + kpad->gc.ngpio = j; + + if (kpad->gc.ngpio) + kpad->export_gpio = true; + } + + if (!kpad->export_gpio) { + dev_info(dev, "No unused gpios left to export\n"); + return 0; + } + + kpad->gc.direction_input = adp5588_gpio_direction_input; + kpad->gc.direction_output = adp5588_gpio_direction_output; + kpad->gc.get = adp5588_gpio_get_value; + kpad->gc.set = adp5588_gpio_set_value; + kpad->gc.can_sleep = 1; + + kpad->gc.base = gpio_data->gpio_start; + kpad->gc.label = kpad->client->name; + kpad->gc.owner = THIS_MODULE; + + mutex_init(&kpad->gpio_lock); + + error = gpiochip_add(&kpad->gc); + if (error) { + dev_err(dev, "gpiochip_add failed, err: %d\n", error); + return error; + } + + for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { + kpad->dat_out[i] = adp5588_read(kpad->client, + GPIO_DAT_OUT1 + i); + kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); + } + + if (gpio_data->setup) { + error = gpio_data->setup(kpad->client, + kpad->gc.base, kpad->gc.ngpio, + gpio_data->context); + if (error) + dev_warn(dev, "setup failed, %d\n", error); + } + + return 0; +} + +static void __devexit adp5588_gpio_remove(struct device *dev) +{ + struct adp5588_kpad *kpad = dev_get_drvdata(dev); + const struct adp5588_kpad_platform_data *pdata = dev->platform_data; + const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data; + int error; + + if (!kpad->export_gpio) + return; + + if (gpio_data->teardown) { + error = gpio_data->teardown(kpad->client, + kpad->gc.base, kpad->gc.ngpio, + gpio_data->context); + if (error) + dev_warn(dev, "teardown failed %d\n", error); + } + + error = gpiochip_remove(&kpad->gc); + if (error) + dev_warn(dev, "gpiochip_remove failed %d\n", error); +} +#else +static inline int adp5588_gpio_add(struct device *dev) +{ + return 0; +} + +static inline void adp5588_gpio_remove(struct device *dev) +{ +} +#endif + static void adp5588_report_events(struct adp5588_kpad *kpad, int ev_cnt) { int i, j; @@ -150,7 +340,8 @@ static irqreturn_t adp5588_irq(int irq, void *handle) static int __devinit adp5588_setup(struct i2c_client *client) { - struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; + const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; + const struct adp5588_gpio_platform_data *gpio_data = pdata->gpio_data; int i, ret; unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0; @@ -184,6 +375,15 @@ static int __devinit adp5588_setup(struct i2c_client *client) ret |= adp5588_write(client, GPI_EM3, evt_mode3); } + if (gpio_data) { + for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { + int pull_mask = gpio_data->pullup_dis_mask; + + ret |= adp5588_write(client, GPIO_PULL1 + i, + (pull_mask >> (8 * i)) & 0xFF); + } + } + ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | OVR_FLOW_INT | K_LCK_INT | GPI_INT | KE_INT); /* Status is W1C */ @@ -240,7 +440,7 @@ static int __devinit adp5588_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adp5588_kpad *kpad; - struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; + const struct adp5588_kpad_platform_data *pdata = client->dev.platform_data; struct input_dev *input; unsigned int revid; int ret, i; @@ -381,6 +581,10 @@ static int __devinit adp5588_probe(struct i2c_client *client, if (kpad->gpimapsize) adp5588_report_switch_state(kpad); + error = adp5588_gpio_add(&client->dev); + if (error) + goto err_free_irq; + device_init_wakeup(&client->dev, 1); i2c_set_clientdata(client, kpad); @@ -407,6 +611,7 @@ static int __devexit adp5588_remove(struct i2c_client *client) free_irq(client->irq, kpad); cancel_delayed_work_sync(&kpad->work); input_unregister_device(kpad->input); + adp5588_gpio_remove(&client->dev); kfree(kpad); return 0; diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h index b5f57c498e24..269181b8f623 100644 --- a/include/linux/i2c/adp5588.h +++ b/include/linux/i2c/adp5588.h @@ -123,6 +123,7 @@ struct adp5588_kpad_platform_data { unsigned short unlock_key2; /* Unlock Key 2 */ const struct adp5588_gpi_map *gpimap; unsigned short gpimapsize; + const struct adp5588_gpio_platform_data *gpio_data; }; struct adp5588_gpio_platform_data { -- cgit v1.2.3 From 40e2e97316af6e62affab7a392e792494b8d9dde Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 18 Jul 2010 21:17:09 +0000 Subject: direct-io: move aio_complete into ->end_io Filesystems with unwritten extent support must not complete an AIO request until the transaction to convert the extent has been commited. That means the aio_complete calls needs to be moved into the ->end_io callback so that the filesystem can control when to call it exactly. This makes a bit of a mess out of dio_complete and the ->end_io callback prototype even more complicated. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Alex Elder --- fs/direct-io.c | 26 ++++++++++++++------------ fs/ext4/inode.c | 10 +++++++--- fs/ocfs2/aops.c | 7 ++++++- fs/xfs/linux-2.6/xfs_aops.c | 7 ++++++- fs/xfs/linux-2.6/xfs_aops.h | 2 ++ include/linux/fs.h | 3 ++- 6 files changed, 37 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/fs/direct-io.c b/fs/direct-io.c index 7600aacf531d..a10cb91cadea 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio) * filesystems can use it to hold additional state between get_block calls and * dio_complete. */ -static int dio_complete(struct dio *dio, loff_t offset, int ret) +static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async) { ssize_t transferred = 0; @@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) transferred = dio->i_size - offset; } - if (dio->end_io && dio->result) - dio->end_io(dio->iocb, offset, transferred, - dio->map_bh.b_private); - - if (dio->flags & DIO_LOCKING) - /* lockdep: non-owner release */ - up_read_non_owner(&dio->inode->i_alloc_sem); - if (ret == 0) ret = dio->page_errors; if (ret == 0) @@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) if (ret == 0) ret = transferred; + if (dio->end_io && dio->result) { + dio->end_io(dio->iocb, offset, transferred, + dio->map_bh.b_private, ret, is_async); + } else if (is_async) { + aio_complete(dio->iocb, ret, 0); + } + + if (dio->flags & DIO_LOCKING) + /* lockdep: non-owner release */ + up_read_non_owner(&dio->inode->i_alloc_sem); + return ret; } @@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) spin_unlock_irqrestore(&dio->bio_lock, flags); if (remaining == 0) { - int ret = dio_complete(dio, dio->iocb->ki_pos, 0); - aio_complete(dio->iocb, ret, 0); + dio_complete(dio, dio->iocb->ki_pos, 0, true); kfree(dio); } } @@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, spin_unlock_irqrestore(&dio->bio_lock, flags); if (ret2 == 0) { - ret = dio_complete(dio, offset, ret); + ret = dio_complete(dio, offset, ret, false); kfree(dio); } else BUG_ON(ret != -EIOCBQUEUED); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 42272d67955a..0afc8c1d8cf3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) } static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - ssize_t size, void *private) + ssize_t size, void *private, int ret, + bool is_async) { ext4_io_end_t *io_end = iocb->private; struct workqueue_struct *wq; @@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, /* if not async direct IO or dio with 0 bytes write, just return */ if (!io_end || !size) - return; + goto out; ext_debug("ext4_end_io_dio(): io_end 0x%p" "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", @@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, if (io_end->flag != EXT4_IO_UNWRITTEN){ ext4_free_io_end(io_end); iocb->private = NULL; - return; + goto out; } io_end->offset = offset; @@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, list_add_tail(&io_end->list, &ei->i_completed_io_list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); iocb->private = NULL; +out: + if (is_async) + aio_complete(iocb, ret, 0); } static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 356e976772bf..96337a4fbbdf 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -578,7 +578,9 @@ bail: static void ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, - void *private) + void *private, + int ret, + bool is_async) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; int level; @@ -592,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, if (!level) up_read(&inode->i_alloc_sem); ocfs2_rw_unlock(inode, level); + + if (is_async) + aio_complete(iocb, ret, 0); } /* diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 8abbf0532ea1..95d1e2695c3a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1406,7 +1406,9 @@ xfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, - void *private) + void *private, + int ret, + bool is_async) { xfs_ioend_t *ioend = iocb->private; @@ -1452,6 +1454,9 @@ xfs_end_io_direct( * against double-freeing. */ iocb->private = NULL; + + if (is_async) + aio_complete(iocb, ret, 0); } STATIC ssize_t diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 319da173cc1a..c5057fb6237a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -37,6 +37,8 @@ typedef struct xfs_ioend { size_t io_size; /* size of the extent */ xfs_off_t io_offset; /* offset in the file */ struct work_struct io_work; /* xfsdatad work queue */ + struct kiocb *io_iocb; + int io_result; } xfs_ioend_t; extern const struct address_space_operations xfs_address_space_operations; diff --git a/include/linux/fs.h b/include/linux/fs.h index 68ca1b0491af..f91affb7d530 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -415,7 +415,8 @@ struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, - ssize_t bytes, void *private); + ssize_t bytes, void *private, int ret, + bool is_async); /* * Attribute flags. These should be or-ed together to figure out what -- cgit v1.2.3 From 98864ff58dd2b8ef9e72b0d2c70f34e7ff24a2ee Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 22 May 2010 23:59:11 +0100 Subject: ARM: OMAP: Convert OMAPFB and VRAM SDRAM reservation to LMB Signed-off-by: Russell King --- arch/arm/plat-omap/common.c | 4 ++-- arch/arm/plat-omap/fb.c | 30 +++++++++++++++++++----------- arch/arm/plat-omap/include/plat/vram.h | 4 ++-- drivers/video/omap2/vram.c | 33 +++++++++++++++------------------ include/linux/omapfb.h | 2 +- 5 files changed, 39 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index 9f6bbc178a74..ebed82699eb2 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -85,8 +85,8 @@ EXPORT_SYMBOL(omap_get_var_config); void __init omap_reserve(void) { - omapfb_reserve_sdram(); - omap_vram_reserve_sdram(); + omapfb_reserve_sdram_memblock(); + omap_vram_reserve_sdram_memblock(); } /* diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 97db493904fa..0054b9501a53 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -173,25 +173,27 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg, static int valid_sdram(unsigned long addr, unsigned long size) { - struct bootmem_data *bdata = NODE_DATA(0)->bdata; - unsigned long sdram_start, sdram_end; + struct memblock_property res; - sdram_start = bdata->node_min_pfn << PAGE_SHIFT; - sdram_end = bdata->node_low_pfn << PAGE_SHIFT; - - return addr >= sdram_start && sdram_end - addr >= size; + res.base = addr; + res.size = size; + return !memblock_find(&res) && res.base == addr && res.size == size; } static int reserve_sdram(unsigned long addr, unsigned long size) { - return reserve_bootmem(addr, size, BOOTMEM_EXCLUSIVE); + if (memblock_is_region_reserved(addr, size)) + return -EBUSY; + if (memblock_reserve(addr, size)) + return -ENOMEM; + return 0; } /* * Called from map_io. We need to call to this early enough so that we * can reserve the fixed SDRAM regions before VM could get hold of them. */ -void __init omapfb_reserve_sdram(void) +void __init omapfb_reserve_sdram_memblock(void) { unsigned long reserved = 0; int i; @@ -386,7 +388,10 @@ static inline int omap_init_fb(void) arch_initcall(omap_init_fb); -void omapfb_reserve_sdram(void) {} +void omapfb_reserve_sdram_memblock(void) +{ +} + unsigned long omapfb_reserve_sram(unsigned long sram_pstart, unsigned long sram_vstart, unsigned long sram_size, @@ -402,7 +407,10 @@ void omapfb_set_platform_data(struct omapfb_platform_data *data) { } -void omapfb_reserve_sdram(void) {} +void omapfb_reserve_sdram_memblock(void) +{ +} + unsigned long omapfb_reserve_sram(unsigned long sram_pstart, unsigned long sram_vstart, unsigned long sram_size, diff --git a/arch/arm/plat-omap/include/plat/vram.h b/arch/arm/plat-omap/include/plat/vram.h index edd4987758a6..0aa4ecd12c7d 100644 --- a/arch/arm/plat-omap/include/plat/vram.h +++ b/arch/arm/plat-omap/include/plat/vram.h @@ -38,7 +38,7 @@ extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram, extern void omap_vram_set_sdram_vram(u32 size, u32 start); extern void omap_vram_set_sram_vram(u32 size, u32 start); -extern void omap_vram_reserve_sdram(void); +extern void omap_vram_reserve_sdram_memblock(void); extern unsigned long omap_vram_reserve_sram(unsigned long sram_pstart, unsigned long sram_vstart, unsigned long sram_size, @@ -48,7 +48,7 @@ extern unsigned long omap_vram_reserve_sram(unsigned long sram_pstart, static inline void omap_vram_set_sdram_vram(u32 size, u32 start) { } static inline void omap_vram_set_sram_vram(u32 size, u32 start) { } -static inline void omap_vram_reserve_sdram(void) { } +static inline void omap_vram_reserve_sdram_memblock(void) { } static inline unsigned long omap_vram_reserve_sram(unsigned long sram_pstart, unsigned long sram_vstart, unsigned long sram_size, diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index 3b1237ad85ed..f6fdc2085f3e 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -525,10 +525,8 @@ early_param("vram", omap_vram_early_vram); * Called from map_io. We need to call to this early enough so that we * can reserve the fixed SDRAM regions before VM could get hold of them. */ -void __init omap_vram_reserve_sdram(void) +void __init omap_vram_reserve_sdram_memblock(void) { - struct bootmem_data *bdata; - unsigned long sdram_start, sdram_size; u32 paddr; u32 size = 0; @@ -555,29 +553,28 @@ void __init omap_vram_reserve_sdram(void) size = PAGE_ALIGN(size); - bdata = NODE_DATA(0)->bdata; - sdram_start = bdata->node_min_pfn << PAGE_SHIFT; - sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start; - if (paddr) { - if ((paddr & ~PAGE_MASK) || paddr < sdram_start || - paddr + size > sdram_start + sdram_size) { + struct memblock_property res; + + res.base = paddr; + res.size = size; + if ((paddr & ~PAGE_MASK) || memblock_find(&res) || + res.base != paddr || res.size != size) { pr_err("Illegal SDRAM region for VRAM\n"); return; } - if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) { - pr_err("FB: failed to reserve VRAM\n"); + if (memblock_is_region_reserved(paddr, size)) { + pr_err("FB: failed to reserve VRAM - busy\n"); return; } - } else { - if (size > sdram_size) { - pr_err("Illegal SDRAM size for VRAM\n"); + + if (memblock_reserve(paddr, size) < 0) { + pr_err("FB: failed to reserve VRAM - no memory\n"); return; } - - paddr = virt_to_phys(alloc_bootmem_pages(size)); - BUG_ON(paddr & ~PAGE_MASK); + } else { + paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); } omap_vram_add_region(paddr, size); diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index 9bdd91486b49..7e4cd616bcb5 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h @@ -253,7 +253,7 @@ struct omapfb_platform_data { /* in arch/arm/plat-omap/fb.c */ extern void omapfb_set_platform_data(struct omapfb_platform_data *data); extern void omapfb_set_ctrl_platform_data(void *pdata); -extern void omapfb_reserve_sdram(void); +extern void omapfb_reserve_sdram_memblock(void); #endif -- cgit v1.2.3 From ec489aa8f993f8d2ec962ce113071faac482aa27 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 2 Jun 2010 08:13:52 +0100 Subject: ARM: 6157/2: PL011 TX/RX split of LCR for ST-Ericssons derivative In the ST-Ericsson version of the PL011 the TX and RX have different control registers. Cc: Alessandro Rubini Signed-off-by: Marcin Mielczarczyk Signed-off-by: Linus Walleij Signed-off-by: Russell King --- drivers/serial/amba-pl011.c | 61 +++++++++++++++++++++++++++++++++++++-------- include/linux/amba/serial.h | 2 ++ 2 files changed, 52 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index eb4cb480b93e..5644cf2385bb 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -69,9 +69,11 @@ struct uart_amba_port { struct uart_port port; struct clk *clk; - unsigned int im; /* interrupt mask */ + unsigned int im; /* interrupt mask */ unsigned int old_status; - unsigned int ifls; /* vendor-specific */ + unsigned int ifls; /* vendor-specific */ + unsigned int lcrh_tx; /* vendor-specific */ + unsigned int lcrh_rx; /* vendor-specific */ bool autorts; }; @@ -79,16 +81,22 @@ struct uart_amba_port { struct vendor_data { unsigned int ifls; unsigned int fifosize; + unsigned int lcrh_tx; + unsigned int lcrh_rx; }; static struct vendor_data vendor_arm = { .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, .fifosize = 16, + .lcrh_tx = UART011_LCRH, + .lcrh_rx = UART011_LCRH, }; static struct vendor_data vendor_st = { .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, .fifosize = 64, + .lcrh_tx = ST_UART011_LCRH_TX, + .lcrh_rx = ST_UART011_LCRH_RX, }; static void pl011_stop_tx(struct uart_port *port) @@ -327,12 +335,12 @@ static void pl011_break_ctl(struct uart_port *port, int break_state) unsigned int lcr_h; spin_lock_irqsave(&uap->port.lock, flags); - lcr_h = readw(uap->port.membase + UART011_LCRH); + lcr_h = readw(uap->port.membase + uap->lcrh_tx); if (break_state == -1) lcr_h |= UART01x_LCRH_BRK; else lcr_h &= ~UART01x_LCRH_BRK; - writew(lcr_h, uap->port.membase + UART011_LCRH); + writew(lcr_h, uap->port.membase + uap->lcrh_tx); spin_unlock_irqrestore(&uap->port.lock, flags); } @@ -393,7 +401,17 @@ static int pl011_startup(struct uart_port *port) writew(cr, uap->port.membase + UART011_CR); writew(0, uap->port.membase + UART011_FBRD); writew(1, uap->port.membase + UART011_IBRD); - writew(0, uap->port.membase + UART011_LCRH); + writew(0, uap->port.membase + uap->lcrh_rx); + if (uap->lcrh_tx != uap->lcrh_rx) { + int i; + /* + * Wait 10 PCLKs before writing LCRH_TX register, + * to get this delay write read only register 10 times + */ + for (i = 0; i < 10; ++i) + writew(0xff, uap->port.membase + UART011_MIS); + writew(0, uap->port.membase + uap->lcrh_tx); + } writew(0, uap->port.membase + UART01x_DR); while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) barrier(); @@ -422,10 +440,19 @@ static int pl011_startup(struct uart_port *port) return retval; } +static void pl011_shutdown_channel(struct uart_amba_port *uap, + unsigned int lcrh) +{ + unsigned long val; + + val = readw(uap->port.membase + lcrh); + val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); + writew(val, uap->port.membase + lcrh); +} + static void pl011_shutdown(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; - unsigned long val; /* * disable all interrupts @@ -450,9 +477,9 @@ static void pl011_shutdown(struct uart_port *port) /* * disable break condition and fifos */ - val = readw(uap->port.membase + UART011_LCRH); - val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); - writew(val, uap->port.membase + UART011_LCRH); + pl011_shutdown_channel(uap, uap->lcrh_rx); + if (uap->lcrh_rx != uap->lcrh_tx) + pl011_shutdown_channel(uap, uap->lcrh_tx); /* * Shut down the clock producer @@ -561,7 +588,17 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L * ----------^----------^----------^----------^----- */ - writew(lcr_h, port->membase + UART011_LCRH); + writew(lcr_h, port->membase + uap->lcrh_rx); + if (uap->lcrh_rx != uap->lcrh_tx) { + int i; + /* + * Wait 10 PCLKs before writing LCRH_TX register, + * to get this delay write read only register 10 times + */ + for (i = 0; i < 10; ++i) + writew(0xff, uap->port.membase + UART011_MIS); + writew(lcr_h, port->membase + uap->lcrh_tx); + } writew(old_cr, port->membase + UART011_CR); spin_unlock_irqrestore(&port->lock, flags); @@ -688,7 +725,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; - lcr_h = readw(uap->port.membase + UART011_LCRH); + lcr_h = readw(uap->port.membase + uap->lcrh_tx); *parity = 'n'; if (lcr_h & UART01x_LCRH_PEN) { @@ -800,6 +837,8 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) } uap->ifls = vendor->ifls; + uap->lcrh_rx = vendor->lcrh_rx; + uap->lcrh_tx = vendor->lcrh_tx; uap->port.dev = &dev->dev; uap->port.mapbase = dev->res.start; uap->port.membase = base; diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 5a5a7fd62490..93c96a66c518 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -38,10 +38,12 @@ #define UART01x_FR 0x18 /* Flag register (Read only). */ #define UART010_IIR 0x1C /* Interrupt indentification register (Read). */ #define UART010_ICR 0x1C /* Interrupt clear register (Write). */ +#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ #define UART01x_ILPR 0x20 /* IrDA low power counter register. */ #define UART011_IBRD 0x24 /* Integer baud rate divisor register. */ #define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */ #define UART011_LCRH 0x2c /* Line control register. */ +#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */ #define UART011_CR 0x30 /* Control register. */ #define UART011_IFLS 0x34 /* Interrupt fifo level select. */ #define UART011_IMSC 0x38 /* Interrupt mask. */ -- cgit v1.2.3 From ac3e3fb424d44109dda3b1a3459e1b30fa60ac4a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 2 Jun 2010 20:40:22 +0100 Subject: ARM: 6158/2: PL011 baudrate extension for ST-Ericssons derivative Implementation of the ST-Ericsson baudrate extension in the PL011 block. In this modified variant it is possible to change the sampling factor from 16 to 8, and thanks to this we can get higher baudrates while still using the same peripheral clock. Also replace the simple division to determine the baud divisor with DIV_ROUND_CLOSEST() rather than a simple integer division. Cc: Alessandro Rubini Cc: Jerzy Kasenberg Signed-off-by: Marcin Mielczarczyk Signed-off-by: Linus Walleij Signed-off-by: Russell King --- drivers/serial/amba-pl011.c | 27 +++++++++++++++++++++++++-- include/linux/amba/serial.h | 1 + 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c index 5644cf2385bb..f67e09da6d33 100644 --- a/drivers/serial/amba-pl011.c +++ b/drivers/serial/amba-pl011.c @@ -74,6 +74,7 @@ struct uart_amba_port { unsigned int ifls; /* vendor-specific */ unsigned int lcrh_tx; /* vendor-specific */ unsigned int lcrh_rx; /* vendor-specific */ + bool oversampling; /* vendor-specific */ bool autorts; }; @@ -83,6 +84,7 @@ struct vendor_data { unsigned int fifosize; unsigned int lcrh_tx; unsigned int lcrh_rx; + bool oversampling; }; static struct vendor_data vendor_arm = { @@ -90,6 +92,7 @@ static struct vendor_data vendor_arm = { .fifosize = 16, .lcrh_tx = UART011_LCRH, .lcrh_rx = UART011_LCRH, + .oversampling = false, }; static struct vendor_data vendor_st = { @@ -97,6 +100,7 @@ static struct vendor_data vendor_st = { .fifosize = 64, .lcrh_tx = ST_UART011_LCRH_TX, .lcrh_rx = ST_UART011_LCRH_RX, + .oversampling = true, }; static void pl011_stop_tx(struct uart_port *port) @@ -499,8 +503,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, /* * Ask the core to calculate the divisor for us. */ - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); - quot = port->uartclk * 4 / baud; + baud = uart_get_baud_rate(port, termios, old, 0, + port->uartclk/(uap->oversampling ? 8 : 16)); + + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); switch (termios->c_cflag & CSIZE) { case CS5: @@ -579,6 +588,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, uap->autorts = false; } + if (uap->oversampling) { + if (baud > port->uartclk/16) + old_cr |= ST_UART011_CR_OVSFACT; + else + old_cr &= ~ST_UART011_CR_OVSFACT; + } + /* Set baud rate */ writew(quot & 0x3f, port->membase + UART011_FBRD); writew(quot >> 6, port->membase + UART011_IBRD); @@ -744,6 +760,12 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, fbrd = readw(uap->port.membase + UART011_FBRD); *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); + + if (uap->oversampling) { + if (readw(uap->port.membase + UART011_CR) + & ST_UART011_CR_OVSFACT) + *baud *= 2; + } } } @@ -839,6 +861,7 @@ static int pl011_probe(struct amba_device *dev, struct amba_id *id) uap->ifls = vendor->ifls; uap->lcrh_rx = vendor->lcrh_rx; uap->lcrh_tx = vendor->lcrh_tx; + uap->oversampling = vendor->oversampling; uap->port.dev = &dev->dev; uap->port.mapbase = dev->res.start; uap->port.membase = base; diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 93c96a66c518..e1b634b635f2 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -86,6 +86,7 @@ #define UART010_CR_TIE 0x0020 #define UART010_CR_RIE 0x0010 #define UART010_CR_MSIE 0x0008 +#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */ #define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */ #define UART01x_CR_SIREN 0x0002 /* SIR enable */ #define UART01x_CR_UARTEN 0x0001 /* UART enable */ -- cgit v1.2.3 From 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 11 Jun 2010 16:43:59 -0700 Subject: [SCSI] fcoe: clean up TBD comments in FCoE prototype header Some old comments in fc_fcoe.h say TBD long after the standard has been passed by T11. Clean them up. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc/fc_fcoe.h | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h index e6ad3d2ae475..d5dcd6062815 100644 --- a/include/scsi/fc/fc_fcoe.h +++ b/include/scsi/fc/fc_fcoe.h @@ -22,23 +22,18 @@ /* * FCoE - Fibre Channel over Ethernet. + * See T11 FC-BB-5 Rev 2.00 (09-056v5.pdf) */ /* - * FC_FCOE_OUI hasn't been standardized yet. XXX TBD. + * Default FC_FCOE_OUI / FC-MAP value. */ -#ifndef FC_FCOE_OUI -#define FC_FCOE_OUI 0x0efc00 /* upper 24 bits of FCOE dest MAC TBD */ -#endif +#define FC_FCOE_OUI 0x0efc00 /* upper 24 bits of FCOE MAC */ /* - * The destination MAC address for the fabric login may get a different OUI. - * This isn't standardized yet. + * Fabric Login (FLOGI) MAC for non-FIP use. Non-FIP use is deprecated. */ -#ifndef FC_FCOE_FLOGI_MAC -/* gateway MAC - TBD */ #define FC_FCOE_FLOGI_MAC { 0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe } -#endif #define FC_FCOE_VER 0 /* version */ @@ -51,8 +46,6 @@ /* * FCoE frame header - 14 bytes - * - * This is the August 2007 version of the FCoE header as defined by T11. * This follows the VLAN header, which includes the ethertype. */ struct fcoe_hdr { -- cgit v1.2.3 From f8fc6c2c99b8085368119d6cf39b997255052826 Mon Sep 17 00:00:00 2001 From: Bhanu Prakash Gollapudi Date: Fri, 11 Jun 2010 16:44:04 -0700 Subject: [SCSI] libfc: Handle unsolicited PRLO request Resubmitting after incorporating Joe's review comment. Unsolicited PRLO request is now handled by sending LS_ACC, and then relogin to the remote port if an N-port login session exists for that remote port. Note that this patch should be applied on top of Joe Eykholt's "Fix remote port restart problem" patch. Signed-off-by: Bhanu Prakash Gollapudi Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 71 ++++++++++++++++++++++++++++++++++++++----- include/scsi/fc/fc_els.h | 9 ++++++ 2 files changed, 72 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e33c5c7961a2..df85e19079fb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1573,30 +1573,85 @@ drop: * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests * @rdata: The remote port that sent the PRLO request * @sp: The sequence that the PRLO was on - * @fp: The PRLO request frame + * @rx_fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_seq *sp, - struct fc_frame *fp) + struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; - struct fc_frame_header *fh; + struct fc_exch *ep; + struct fc_frame *fp; + struct { + struct fc_els_prlo prlo; + struct fc_els_spp spp; + } *pp; + struct fc_els_spp *rspp; /* request service param page */ + struct fc_els_spp *spp; /* response spp */ + unsigned int len; + unsigned int plen; + u32 f_ctl; struct fc_seq_els_data rjt_data; - fh = fc_frame_header_get(fp); + rjt_data.fp = NULL; + fh = fc_frame_header_get(rx_fp); FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); - rjt_data.fp = NULL; - rjt_data.reason = ELS_RJT_UNAB; - rjt_data.explan = ELS_EXPL_NONE; + len = fr_len(rx_fp) - sizeof(*fh); + pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); + if (!pp) + goto reject_len; + plen = ntohs(pp->prlo.prlo_len); + if (plen != 20) + goto reject_len; + if (plen < len) + len = plen; + + rspp = &pp->spp; + + fp = fc_frame_alloc(lport, len); + if (!fp) { + rjt_data.reason = ELS_RJT_UNAB; + rjt_data.explan = ELS_EXPL_INSUF_RES; + goto reject; + } + + sp = lport->tt.seq_start_next(sp); + WARN_ON(!sp); + pp = fc_frame_payload_get(fp, len); + WARN_ON(!pp); + memset(pp, 0, len); + pp->prlo.prlo_cmd = ELS_LS_ACC; + pp->prlo.prlo_obs = 0x10; + pp->prlo.prlo_len = htons(len); + spp = &pp->spp; + spp->spp_type = rspp->spp_type; + spp->spp_type_ext = rspp->spp_type_ext; + spp->spp_flags = FC_SPP_RESP_ACK; + + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); + goto drop; + +reject_len: + rjt_data.reason = ELS_RJT_PROT; + rjt_data.explan = ELS_EXPL_INV_LEN; +reject: lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); - fc_frame_free(fp); +drop: + fc_frame_free(rx_fp); } /** diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h index f94328132a26..70a7e92a7664 100644 --- a/include/scsi/fc/fc_els.h +++ b/include/scsi/fc/fc_els.h @@ -404,6 +404,15 @@ struct fc_els_prli { /* service parameter pages follow */ }; +/* + * ELS_PRLO - Process logout request and response. + */ +struct fc_els_prlo { + __u8 prlo_cmd; /* command */ + __u8 prlo_obs; /* obsolete, but shall be set to 10h */ + __be16 prlo_len; /* payload length */ +}; + /* * ELS_ADISC payload */ -- cgit v1.2.3 From 4b2164d4d212e437c9f080023a67f8f9356d2c4c Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 11 Jun 2010 16:44:51 -0700 Subject: [SCSI] libfc: Fix remote port restart problem This patch somewhat combines two fixes to remote port handing in libfc. The first problem was that rport work could be queued on a deleted and freed rport. This is handled by not resetting rdata->event ton NONE if the rdata is about to be deleted. However, that fix led to the second problem, described by Bhanu Gollapudi, as follows: > Here is the sequence of events. T1 is first LOGO receive thread, T2 is > fc_rport_work() scheduled by T1 and T3 is second LOGO receive thread and > T4 is fc_rport_work scheduled by T3. > > 1. (T1)Received 1st LOGO in state Ready > 2. (T1)Delete port & enter to RESTART state. > 3. (T1)schdule event_work, since event is RPORT_EV_NONE. > 4. (T1)set event = RPORT_EV_LOGO > 5. (T1)Enter RESTART state as disc_id is set. > 6. (T2)remember to PLOGI, and set event = RPORT_EV_NONE > 6. (T3)Received 2nd LOGO > 7. (T3)Delete Port & enter to RESTART state. > 8. (T3)schedule event_work, since event is RPORT_EV_NONE. > 9. (T3)Enter RESTART state as disc_id is set. > 9. (T3)set event = RPORT_EV_LOGO > 10.(T2)work restart, enter PLOGI state and issues PLOGI > 11.(T4)Since state is not RESTART anymore, restart is not set, and the > event is not reset to RPORT_EV_NONE. (current event is RPORT_EV_LOGO). > 12. Now, PLOGI succeeds and fc_rport_enter_ready() will not schedule > event_work, and hence the rport will never be created, eventually losing > the target after dev_loss_tmo. So, the problem here is that we were tracking the desire for the rport be restarted by state RESTART, which was otherwise equivalent to DELETE. A contributing factor is that we dropped the lock between steps 6 and 10 in thread T2, which allows the state to change, and we didn't completely re-evaluate then. This is hopefully corrected by the following minor redesign: Simplify the rport restart logic by making the decision to restart after deleting the transport rport. That decision is based on a new STARTED flag that indicates fc_rport_login() has been called and fc_rport_logoff() has not been called since then. This replaces the need for the RESTART state. Only restart if the rdata is still in DELETED state and only if it still has the STARTED flag set. Also now, since we clear the event code much later in the work thread, allow for the possibility that the rport may have become READY again via incoming PLOGI, and if so, queue another event to handle that. In the problem scenario, the second LOGO received will cause the LOGO event to occur again. Reported-by: Bhanu Gollapudi Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 75 ++++++++++++++++++------------------------- include/scsi/libfc.h | 5 ++- 2 files changed, 33 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index df85e19079fb..d385efc68c15 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -89,7 +89,6 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_LOGO] = "LOGO", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", - [RPORT_ST_RESTART] = "Restart", }; /** @@ -246,7 +245,6 @@ static void fc_rport_work(struct work_struct *work) struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; - int restart = 0; mutex_lock(&rdata->rp_mutex); event = rdata->event; @@ -298,24 +296,6 @@ static void fc_rport_work(struct work_struct *work) port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); - if (port_id != FC_FID_DIR_SERV) { - /* - * We must drop rp_mutex before taking disc_mutex. - * Re-evaluate state to allow for restart. - * A transition to RESTART state must only happen - * while disc_mutex is held and rdata is on the list. - */ - mutex_lock(&lport->disc.disc_mutex); - mutex_lock(&rdata->rp_mutex); - if (rdata->rp_state == RPORT_ST_RESTART) - restart = 1; - else - list_del(&rdata->peers); - rdata->event = RPORT_EV_NONE; - mutex_unlock(&rdata->rp_mutex); - mutex_unlock(&lport->disc.disc_mutex); - } - if (rport_ops && rport_ops->event_callback) { FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); @@ -336,13 +316,34 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); fc_remote_port_delete(rport); } - if (restart) { - mutex_lock(&rdata->rp_mutex); - FC_RPORT_DBG(rdata, "work restart\n"); - fc_rport_enter_plogi(rdata); + + mutex_lock(&lport->disc.disc_mutex); + mutex_lock(&rdata->rp_mutex); + if (rdata->rp_state == RPORT_ST_DELETE) { + if (port_id == FC_FID_DIR_SERV) { + rdata->event = RPORT_EV_NONE; + mutex_unlock(&rdata->rp_mutex); + } else if (rdata->flags & FC_RP_STARTED) { + rdata->event = RPORT_EV_NONE; + FC_RPORT_DBG(rdata, "work restart\n"); + fc_rport_enter_plogi(rdata); + mutex_unlock(&rdata->rp_mutex); + } else { + FC_RPORT_DBG(rdata, "work delete\n"); + list_del(&rdata->peers); + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); + } + } else { + /* + * Re-open for events. Reissue READY event if ready. + */ + rdata->event = RPORT_EV_NONE; + if (rdata->rp_state == RPORT_ST_READY) + fc_rport_enter_ready(rdata); mutex_unlock(&rdata->rp_mutex); - } else - kref_put(&rdata->kref, lport->tt.rport_destroy); + } + mutex_unlock(&lport->disc.disc_mutex); break; default: @@ -367,16 +368,14 @@ int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); + rdata->flags |= FC_RP_STARTED; switch (rdata->rp_state) { case RPORT_ST_READY: FC_RPORT_DBG(rdata, "ADISC port\n"); fc_rport_enter_adisc(rdata); break; - case RPORT_ST_RESTART: - break; case RPORT_ST_DELETE: FC_RPORT_DBG(rdata, "Restart deleted port\n"); - fc_rport_state_enter(rdata, RPORT_ST_RESTART); break; default: FC_RPORT_DBG(rdata, "Login to port\n"); @@ -431,15 +430,12 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) FC_RPORT_DBG(rdata, "Remove port\n"); + rdata->flags &= ~FC_RP_STARTED; if (rdata->rp_state == RPORT_ST_DELETE) { FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); goto out; } - - if (rdata->rp_state == RPORT_ST_RESTART) - FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); - else - fc_rport_enter_logo(rdata); + fc_rport_enter_logo(rdata); /* * Change the state to Delete so that we discard @@ -503,7 +499,6 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: - case RPORT_ST_RESTART: break; } @@ -527,6 +522,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) switch (rdata->rp_state) { case RPORT_ST_PLOGI: case RPORT_ST_LOGO: + rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; case RPORT_ST_RTV: @@ -537,7 +533,6 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) fc_rport_enter_logo(rdata); break; case RPORT_ST_DELETE: - case RPORT_ST_RESTART: case RPORT_ST_READY: case RPORT_ST_INIT: break; @@ -1392,7 +1387,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, break; case RPORT_ST_DELETE: case RPORT_ST_LOGO: - case RPORT_ST_RESTART: FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", fc_rport_state(rdata)); mutex_unlock(&rdata->rp_mutex); @@ -1684,13 +1678,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, fc_rport_state(rdata)); fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - - /* - * If the remote port was created due to discovery, set state - * to log back in. It may have seen a stale RSCN about us. - */ - if (rdata->disc_id) - fc_rport_state_enter(rdata, RPORT_ST_RESTART); mutex_unlock(&rdata->rp_mutex); } else FC_RPORT_ID_DBG(lport, sid, diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 7495c0ba67ee..db54c4a2d14b 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -104,7 +104,6 @@ enum fc_disc_event { * @RPORT_ST_LOGO: Remote port logout (LOGO) sent * @RPORT_ST_ADISC: Discover Address sent * @RPORT_ST_DELETE: Remote port being deleted - * @RPORT_ST_RESTART: Remote port being deleted and will restart */ enum fc_rport_state { RPORT_ST_INIT, @@ -115,7 +114,6 @@ enum fc_rport_state { RPORT_ST_LOGO, RPORT_ST_ADISC, RPORT_ST_DELETE, - RPORT_ST_RESTART, }; /** @@ -173,6 +171,7 @@ struct fc_rport_libfc_priv { u16 flags; #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) #define FC_RP_FLAGS_RETRY (1 << 1) + #define FC_RP_STARTED (1 << 2) unsigned int e_d_tov; unsigned int r_a_tov; }; @@ -185,7 +184,7 @@ struct fc_rport_libfc_priv { * @rp_state: Enumeration that tracks progress of PLOGI, PRLI, * and RTV exchanges * @ids: The remote port identifiers and roles - * @flags: REC and RETRY supported flags + * @flags: STARTED, REC and RETRY_SUPPORTED flags * @max_seq: Maximum number of concurrent sequences * @disc_id: The discovery identifier * @maxframe_size: The maximum frame size -- cgit v1.2.3 From f034260db330bb3ffc815fcb682b1c84aca09591 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Fri, 11 Jun 2010 16:44:57 -0700 Subject: [SCSI] libfc: fix indefinite rport restart Remote ports were restarting indefinitely after getting rejects in PRLI. Fix by adding a counter of restarts and limiting that with the port login retry limit as well. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 6 +++++- include/scsi/libfc.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index d385efc68c15..363cde30c940 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -257,6 +257,7 @@ static void fc_rport_work(struct work_struct *work) case RPORT_EV_READY: ids = rdata->ids; rdata->event = RPORT_EV_NONE; + rdata->major_retries = 0; kref_get(&rdata->kref); mutex_unlock(&rdata->rp_mutex); @@ -323,7 +324,10 @@ static void fc_rport_work(struct work_struct *work) if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); - } else if (rdata->flags & FC_RP_STARTED) { + } else if ((rdata->flags & FC_RP_STARTED) && + rdata->major_retries < + lport->max_rport_retry_count) { + rdata->major_retries++; rdata->event = RPORT_EV_NONE; FC_RPORT_DBG(rdata, "work restart\n"); fc_rport_enter_plogi(rdata); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index db54c4a2d14b..6d78df77dab6 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -189,6 +189,7 @@ struct fc_rport_libfc_priv { * @disc_id: The discovery identifier * @maxframe_size: The maximum frame size * @retries: The retry count for the current state + * @major_retries: The retry count for the entire PLOGI/PRLI state machine * @e_d_tov: Error detect timeout value (in msec) * @r_a_tov: Resource allocation timeout value (in msec) * @rp_mutex: The mutex that protects the remote port @@ -206,6 +207,7 @@ struct fc_rport_priv { u16 disc_id; u16 maxframe_size; unsigned int retries; + unsigned int major_retries; unsigned int e_d_tov; unsigned int r_a_tov; struct mutex rp_mutex; -- cgit v1.2.3 From 3b2bef1fc85f127a99ad6b90a94b033fdc57341c Mon Sep 17 00:00:00 2001 From: Vikas Chaudhary Date: Sat, 10 Jul 2010 14:51:30 +0530 Subject: [SCSI] iscsi_transport: added new iscsi_param to display target alias in sysfs Signed-off-by: Vikas Chaudhary Signed-off-by: Ravi Anand Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/qla4xxx/ql4_init.c | 3 +++ drivers/scsi/qla4xxx/ql4_os.c | 7 ++++++- drivers/scsi/scsi_transport_iscsi.c | 6 ++++-- include/scsi/iscsi_if.h | 2 ++ 4 files changed, 15 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index e6b73b9fcc5a..266ebd45396d 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -594,6 +594,9 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha, memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], min(sizeof(ddb_entry->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); + memcpy(&ddb_entry->iscsi_alias[0], &fw_ddb_entry->iscsi_alias[0], + min(sizeof(ddb_entry->iscsi_alias), + sizeof(fw_ddb_entry->iscsi_alias))); memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index daf5a4bf9b0a..821384147a41 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -126,7 +126,8 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD | CAP_DATA_PATH_OFFLOAD, .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT, + ISCSI_TARGET_NAME | ISCSI_TPGT | + ISCSI_TARGET_ALIAS, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | ISCSI_HOST_INITIATOR_NAME, @@ -210,6 +211,10 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, case ISCSI_PARAM_TPGT: len = sprintf(buf, "%u\n", ddb_entry->tpgt); break; + case ISCSI_PARAM_TARGET_ALIAS: + len = snprintf(buf, PAGE_SIZE - 1, "%s\n", + ddb_entry->iscsi_alias); + break; default: return -ENOSYS; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 1e6d4793542c..b9aec304872c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -31,7 +31,7 @@ #include #include -#define ISCSI_SESSION_ATTRS 22 +#define ISCSI_SESSION_ATTRS 23 #define ISCSI_CONN_ATTRS 13 #define ISCSI_HOST_ATTRS 4 @@ -1763,7 +1763,8 @@ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); -iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0) +iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); +iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, @@ -2006,6 +2007,7 @@ iscsi_register_transport(struct iscsi_transport *tt) SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO); SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); + SETUP_SESSION_RD_ATTR(targetalias, ISCSI_TARGET_ALIAS); SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); SETUP_PRIV_SESSION_RD_ATTR(state); diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 66d377b9c72b..a8631acd37c3 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -313,6 +313,7 @@ enum iscsi_param { ISCSI_PARAM_INITIATOR_NAME, ISCSI_PARAM_TGT_RESET_TMO, + ISCSI_PARAM_TARGET_ALIAS, /* must always be last */ ISCSI_PARAM_MAX, }; @@ -353,6 +354,7 @@ enum iscsi_param { #define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID) #define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME) #define ISCSI_TGT_RESET_TMO (1ULL << ISCSI_PARAM_TGT_RESET_TMO) +#define ISCSI_TARGET_ALIAS (1ULL << ISCSI_PARAM_TARGET_ALIAS) /* iSCSI HBA params */ enum iscsi_host_param { -- cgit v1.2.3 From d058fd31c7f44960b00566bda39c85377f461a7b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 20 Jul 2010 15:19:15 -0700 Subject: [SCSI] fcoe: make it possible to verify fcoe with sparse Analyzing fcoe with sparse currently fails. This is because struct fcoe_rcv_info contains two enum members that have been declared with __attribute__((packed)). Apparently gcc honors this attribute while sparse ignores it. The result is that sizeof(struct fcoe_rcv_info) == sizeof(struct sk_buff::cb) == 48 on a 64-bit system according to gcc, but not according to sparse. The patch below modifies the definition of struct fcoe_rcv_info such that gcc and sparse interpret this structure definition in the same way. The current sparse output is as follows: $ cd linux-2.6.34 $ make C=2 M=drivers/scsi/fcoe modules CHECK drivers/scsi/fcoe/fcoe.c include/scsi/fc_frame.h:81:9: error: invalid bitfield width, -1. CC [M] drivers/scsi/fcoe/fcoe.o CHECK drivers/scsi/fcoe/libfcoe.c include/scsi/fc_frame.h:81:9: error: invalid bitfield width, -1. drivers/scsi/fcoe/libfcoe.c:56:37: error: invalid initializer Signed-off-by: Bart Van Assche Cc: jeykholt@cisco.com Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc_frame.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index 4d3e9c7b7c57..15427fab8a57 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -66,8 +66,8 @@ struct fcoe_rcv_info { struct fc_fcp_pkt *fr_fsp; /* for the corresponding fcp I/O */ u32 fr_crc; u16 fr_max_payload; /* max FC payload */ - enum fc_sof fr_sof; /* start of frame delimiter */ - enum fc_eof fr_eof; /* end of frame delimiter */ + u8 fr_sof; /* start of frame delimiter */ + u8 fr_eof; /* end of frame delimiter */ u8 fr_flags; /* flags - see below */ u8 granted_mac[ETH_ALEN]; /* FCoE MAC address */ }; -- cgit v1.2.3 From 519e5135e2537c9dbc1cbcc0891b0a936ff5dcd2 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 20 Jul 2010 15:19:32 -0700 Subject: [SCSI] fcoe: adds src and dest mac address checking for fcoe frames This is per FC-BB-5 Annex-D recommendation and per that if address checking fails then drop the frame. FIP code paths are already doing this so only needed for fcoe frames. The src address checking is limited to only fip mode since this might break non-fip mode used in p2p due to used OUI based addressing in some p2p code paths, going forward FIP will be the only mode, therefore limited this to only FIP mode so that it won't break non-fip p2p mode for now. -v2 Removes FCOE packet type checking since fcoe_rcv is registered to receive only FCoE type packets from netdev and it is already checked by netdev. Signed-off-by: Vasu Dev Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 20 +++++++++++++++++--- include/scsi/libfcoe.h | 10 ++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d340cf2d857a..a120962b25b8 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1210,6 +1210,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, struct fcoe_interface *fcoe; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; + struct fcoe_port *port; + struct ethhdr *eh; unsigned int cpu; fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); @@ -1227,9 +1229,21 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, skb->dev ? skb->dev->name : ""); - /* check for FCOE packet type */ - if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { - FCOE_NETDEV_DBG(netdev, "Wrong FC type frame"); + /* check for mac addresses */ + eh = eth_hdr(skb); + port = lport_priv(lport); + if (compare_ether_addr(eh->h_dest, port->data_src_addr) && + compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) && + compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) { + FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n", + eh->h_dest); + goto err; + } + + if (is_fip_mode(&fcoe->ctlr) && + compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { + FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", + eh->h_source); goto err; } diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index ec13f51531f8..81aee1c4c2f3 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -170,4 +170,14 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); +/** + * is_fip_mode() - returns true if FIP mode selected. + * @fip: FCoE controller. + */ +static inline bool is_fip_mode(struct fcoe_ctlr *fip) +{ + return fip->state == FIP_ST_ENABLED; +} + + #endif /* _LIBFCOE_H */ -- cgit v1.2.3 From 42e9041467cf5fd33501b91b27e26807c259c896 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:19:37 -0700 Subject: [SCSI] libfc: convert rport lookup to be RCU safe To allow LLD to do lookups on rports without grabbing a mutex, make them RCU-safe. The caller of lport->tt.rport_lookup will have the choice of holding disc_mutex or the rcu_read_lock(). Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 6 +++--- drivers/scsi/libfc/fc_rport.c | 22 ++++++++++++++++++---- include/scsi/libfc.h | 2 ++ 3 files changed, 23 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index c7985da88099..d0fa9a0ddc8d 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -63,12 +63,12 @@ static void fc_disc_restart(struct fc_disc *); void fc_disc_stop_rports(struct fc_disc *disc) { struct fc_lport *lport; - struct fc_rport_priv *rdata, *next; + struct fc_rport_priv *rdata; lport = disc->lport; mutex_lock(&disc->disc_mutex); - list_for_each_entry_safe(rdata, next, &disc->rports, peers) + list_for_each_entry_rcu(rdata, &disc->rports, peers) lport->tt.rport_logoff(rdata); mutex_unlock(&disc->disc_mutex); } @@ -292,7 +292,7 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) * Skip ports which were never discovered. These are the dNS port * and ports which were created by PLOGI. */ - list_for_each_entry(rdata, &disc->rports, peers) { + list_for_each_entry_rcu(rdata, &disc->rports, peers) { if (!rdata->disc_id) continue; if (rdata->disc_id == disc->disc_id) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 363cde30c940..6b569732f892 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -95,13 +95,15 @@ static const char *fc_rport_state_names[] = { * fc_rport_lookup() - Lookup a remote port by port_id * @lport: The local port to lookup the remote port on * @port_id: The remote port ID to look up + * + * The caller must hold either disc_mutex or rcu_read_lock(). */ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, u32 port_id) { struct fc_rport_priv *rdata; - list_for_each_entry(rdata, &lport->disc.rports, peers) + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) if (rdata->ids.port_id == port_id) return rdata; return NULL; @@ -146,10 +148,22 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); if (port_id != FC_FID_DIR_SERV) - list_add(&rdata->peers, &lport->disc.rports); + list_add_rcu(&rdata->peers, &lport->disc.rports); return rdata; } +/** + * fc_rport_free_rcu() - Free a remote port + * @rcu: The rcu_head structure inside the remote port + */ +static void fc_rport_free_rcu(struct rcu_head *rcu) +{ + struct fc_rport_priv *rdata; + + rdata = container_of(rcu, struct fc_rport_priv, rcu); + kfree(rdata); +} + /** * fc_rport_destroy() - Free a remote port after last reference is released * @kref: The remote port's kref @@ -159,7 +173,7 @@ static void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - kfree(rdata); + call_rcu(&rdata->rcu, fc_rport_free_rcu); } /** @@ -334,7 +348,7 @@ static void fc_rport_work(struct work_struct *work) mutex_unlock(&rdata->rp_mutex); } else { FC_RPORT_DBG(rdata, "work delete\n"); - list_del(&rdata->peers); + list_del_rcu(&rdata->peers); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, lport->tt.rport_destroy); } diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 6d78df77dab6..b0310b9b3469 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -195,6 +195,7 @@ struct fc_rport_libfc_priv { * @rp_mutex: The mutex that protects the remote port * @retry_work: Handle for retries * @event_callback: Callback when READY, FAILED or LOGO states complete + * @rcu: Structure used for freeing in an RCU-safe manner */ struct fc_rport_priv { struct fc_lport *local_port; @@ -217,6 +218,7 @@ struct fc_rport_priv { struct list_head peers; struct work_struct event_work; u32 supported_classes; + struct rcu_head rcu; }; /** -- cgit v1.2.3 From f90377abcab2e305450ee76a0f9042907560c5d8 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:19:42 -0700 Subject: [SCSI] libfc: provide space for LLD after remote port structure Add pre-zeroed space after the allocation for fc_rport_priv for use by the lower-level driver. This is primarily for VN2VN FIP mode, but could be used in other ways someday. The space required is specified in lport->rport_priv_size. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 2 +- include/scsi/libfc.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6b569732f892..6d68482649c9 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -127,7 +127,7 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, if (rdata) return rdata; - rdata = kzalloc(sizeof(*rdata), GFP_KERNEL); + rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); if (!rdata) return NULL; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index b0310b9b3469..fcbee8c38b0c 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -799,6 +799,7 @@ struct fc_disc { * @mfs: The maximum Fibre Channel payload size * @max_retry_count: The maximum retry attempts * @max_rport_retry_count: The maximum remote port retry attempts + * @rport_priv_size: Size needed by driver after struct fc_rport_priv * @lro_xid: The maximum XID for LRO * @lso_max: The maximum large offload send size * @fcts: FC-4 type mask @@ -848,6 +849,7 @@ struct fc_lport { u32 mfs; u8 max_retry_count; u8 max_rport_retry_count; + u16 rport_priv_size; u16 link_speed; u16 link_supported_speeds; u16 lro_xid; -- cgit v1.2.3 From fdb068c6cd6e30d43664f856d3530715a5742713 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:19:47 -0700 Subject: [SCSI] libfcoe: convert FIP to lock with mutex instead of spin lock It turns out most of the FIP work is now done from worker threads or process context now, so there's no need to use a spin lock. Change to use mutex instead of spin lock and delayed_work instead of a timer. This will make it nicer for the VN_port to VN_port feature that will interact more with the libfc layers requiring that spinlocks not be held. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 116 +++++++++++++++++++++----------------------- include/scsi/libfcoe.h | 9 +--- 2 files changed, 57 insertions(+), 68 deletions(-) (limited to 'include') diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index f009191063f1..e510888e78ca 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -113,7 +113,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip) fip->state = FIP_ST_LINK_WAIT; fip->mode = FIP_ST_AUTO; INIT_LIST_HEAD(&fip->fcfs); - spin_lock_init(&fip->lock); + mutex_init(&fip->ctlr_mutex); fip->flogi_oxid = FC_XID_UNKNOWN; setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); @@ -159,10 +159,10 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) cancel_work_sync(&fip->recv_work); skb_queue_purge(&fip->fip_recv_list); - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); fip->state = FIP_ST_DISABLED; fcoe_ctlr_reset_fcfs(fip); - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); del_timer_sync(&fip->timer); cancel_work_sync(&fip->timer_work); } @@ -255,19 +255,19 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) */ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) { - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { fip->state = fip->mode; - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); if (fip->state == FIP_ST_AUTO) LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); fc_linkup(fip->lp); fcoe_ctlr_solicit(fip, NULL); } else - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); } EXPORT_SYMBOL(fcoe_ctlr_link_up); @@ -300,11 +300,11 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip) int link_dropped; LIBFCOE_FIP_DBG(fip, "link down.\n"); - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); fcoe_ctlr_reset(fip); link_dropped = fip->state != FIP_ST_LINK_WAIT; fip->state = FIP_ST_LINK_WAIT; - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); if (link_dropped) fc_linkdown(fip->lp); @@ -577,12 +577,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) unsigned long sel_time = 0; struct fcoe_dev_stats *stats; + stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; if (fip->sel_fcf == fcf) { if (time_after(jiffies, deadline)) { - stats = per_cpu_ptr(fip->lp->dev_stats, - smp_processor_id()); stats->MissDiscAdvCount++; printk(KERN_INFO "libfcoe: host%d: " "Missing Discovery Advertisement " @@ -601,8 +601,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) WARN_ON(!fip->fcf_count); fip->fcf_count--; kfree(fcf); - stats = per_cpu_ptr(fip->lp->dev_stats, - smp_processor_id()); stats->VLinkFailureCount++; } else { if (time_after(next_timer, deadline)) @@ -612,6 +610,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } + put_cpu(); if (sel_time && !fip->sel_fcf && !fip->sel_time) { sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); fip->sel_time = sel_time; @@ -768,7 +767,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) if (fcoe_ctlr_parse_adv(fip, skb, &new)) return; - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); first = list_empty(&fip->fcfs); found = NULL; list_for_each_entry(fcf, &fip->fcfs, list) { @@ -847,7 +846,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) mod_timer(&fip->timer, fip->sel_time); } out: - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); } /** @@ -1108,11 +1107,12 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, if (is_vn_port) fc_lport_reset(vn_port); else { - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); per_cpu_ptr(lport->dev_stats, - smp_processor_id())->VLinkFailureCount++; + get_cpu())->VLinkFailureCount++; + put_cpu(); fcoe_ctlr_reset(fip); - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); fc_lport_reset(fip->lp); fcoe_ctlr_solicit(fip, NULL); @@ -1166,7 +1166,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) goto drop; - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); state = fip->state; if (state == FIP_ST_AUTO) { fip->map_dest = 0; @@ -1174,7 +1174,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) state = FIP_ST_ENABLED; LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); if (state != FIP_ST_ENABLED) goto drop; @@ -1240,19 +1240,38 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) /** * fcoe_ctlr_timeout() - FIP timeout handler * @arg: The FCoE controller that timed out - * - * Ages FCFs. Triggers FCF selection if possible. Sends keep-alives. */ static void fcoe_ctlr_timeout(unsigned long arg) { struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg; + + schedule_work(&fip->timer_work); +} + +/** + * fcoe_ctlr_timer_work() - Worker thread function for timer work + * @work: Handle to a FCoE controller + * + * Ages FCFs. Triggers FCF selection if possible. + * Sends keep-alives and resets. + */ +static void fcoe_ctlr_timer_work(struct work_struct *work) +{ + struct fcoe_ctlr *fip; + struct fc_lport *vport; + u8 *mac; + u8 reset = 0; + u8 send_ctlr_ka = 0; + u8 send_port_ka = 0; struct fcoe_fcf *sel; struct fcoe_fcf *fcf; unsigned long next_timer; - spin_lock_bh(&fip->lock); + fip = container_of(work, struct fcoe_ctlr, timer_work); + + mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_DISABLED) { - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); return; } @@ -1286,7 +1305,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) "FIP Fibre-Channel Forwarder timed out. " "Starting FCF discovery.\n", fip->lp->host->host_no); - fip->reset_req = 1; + reset = 1; schedule_work(&fip->timer_work); } } @@ -1294,7 +1313,7 @@ static void fcoe_ctlr_timeout(unsigned long arg) if (sel && !sel->fd_flags) { if (time_after_eq(jiffies, fip->ctlr_ka_time)) { fip->ctlr_ka_time = jiffies + sel->fka_period; - fip->send_ctlr_ka = 1; + send_ctlr_ka = 1; } if (time_after(next_timer, fip->ctlr_ka_time)) next_timer = fip->ctlr_ka_time; @@ -1302,37 +1321,14 @@ static void fcoe_ctlr_timeout(unsigned long arg) if (time_after_eq(jiffies, fip->port_ka_time)) { fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); - fip->send_port_ka = 1; + send_port_ka = 1; } if (time_after(next_timer, fip->port_ka_time)) next_timer = fip->port_ka_time; } if (!list_empty(&fip->fcfs)) mod_timer(&fip->timer, next_timer); - if (fip->send_ctlr_ka || fip->send_port_ka) - schedule_work(&fip->timer_work); - spin_unlock_bh(&fip->lock); -} - -/** - * fcoe_ctlr_timer_work() - Worker thread function for timer work - * @work: Handle to a FCoE controller - * - * Sends keep-alives and resets which must not - * be called from the timer directly, since they use a mutex. - */ -static void fcoe_ctlr_timer_work(struct work_struct *work) -{ - struct fcoe_ctlr *fip; - struct fc_lport *vport; - u8 *mac; - int reset; - - fip = container_of(work, struct fcoe_ctlr, timer_work); - spin_lock_bh(&fip->lock); - reset = fip->reset_req; - fip->reset_req = 0; - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); if (reset) { fc_lport_reset(fip->lp); @@ -1340,12 +1336,10 @@ static void fcoe_ctlr_timer_work(struct work_struct *work) fcoe_ctlr_solicit(fip, NULL); } - if (fip->send_ctlr_ka) { - fip->send_ctlr_ka = 0; + if (send_ctlr_ka) fcoe_ctlr_send_keep_alive(fip, NULL, 0, fip->ctl_src_addr); - } - if (fip->send_port_ka) { - fip->send_port_ka = 0; + + if (send_port_ka) { mutex_lock(&fip->lp->lp_mutex); mac = fip->get_src_addr(fip->lp); fcoe_ctlr_send_keep_alive(fip, fip->lp, 1, mac); @@ -1402,9 +1396,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && fip->flogi_oxid == ntohs(fh->fh_ox_id)) { - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) { - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); return -EINVAL; } fip->state = FIP_ST_NON_FIP; @@ -1424,13 +1418,13 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, fip->map_dest = 0; } fip->flogi_oxid = FC_XID_UNKNOWN; - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id); } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { /* * Save source MAC for point-to-point responses. */ - spin_lock_bh(&fip->lock); + mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { memcpy(fip->dest_addr, sa, ETH_ALEN); fip->map_dest = 0; @@ -1439,7 +1433,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, "Setting non-FIP mode\n"); fip->state = FIP_ST_NON_FIP; } - spin_unlock_bh(&fip->lock); + mutex_unlock(&fip->ctlr_mutex); } return 0; } diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 81aee1c4c2f3..7d18b500f2c1 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -75,14 +75,12 @@ enum fip_state { * @flogi_count: number of FLOGI attempts in AUTO mode. * @map_dest: use the FC_MAP mode for destination MAC addresses. * @spma: supports SPMA server-provided MACs mode - * @send_ctlr_ka: need to send controller keep alive - * @send_port_ka: need to send port keep alives * @dest_addr: MAC address of the selected FC forwarder. * @ctl_src_addr: the native MAC address of our local port. * @send: LLD-supplied function to handle sending FIP Ethernet frames * @update_mac: LLD-supplied function to handle changes to MAC addresses. * @get_src_addr: LLD-supplied function to supply a source MAC address. - * @lock: lock protecting this structure. + * @ctlr_mutex: lock protecting this structure. * * This structure is used by all FCoE drivers. It contains information * needed by all FCoE low-level drivers (LLDs) as well as internal state @@ -106,18 +104,15 @@ struct fcoe_ctlr { u16 user_mfs; u16 flogi_oxid; u8 flogi_count; - u8 reset_req; u8 map_dest; u8 spma; - u8 send_ctlr_ka; - u8 send_port_ka; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; void (*send)(struct fcoe_ctlr *, struct sk_buff *); void (*update_mac)(struct fc_lport *, u8 *addr); u8 * (*get_src_addr)(struct fc_lport *); - spinlock_t lock; + struct mutex ctlr_mutex; }; /** -- cgit v1.2.3 From 0685230c59b5482e04ab50e7afc51119ceaba651 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:19:53 -0700 Subject: [SCSI] libfc: add discovery-private pointer for LLD For VN_port to VN_port mode, FIP will do discovery and needs a way to find its state from the local port or discovery structure. It seems that any other LLD that implements its own discovery would also need something like this. Replace disc->lport with disc->priv, and use container_of to find the lport. We could use disc->priv for that, but container_of is smaller and faster. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_disc.c | 14 +++++++------- drivers/scsi/libfc/fc_libfc.h | 2 +- include/scsi/libfc.h | 9 +++++++-- 3 files changed, 15 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index d0fa9a0ddc8d..04474556f2dc 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -65,7 +65,7 @@ void fc_disc_stop_rports(struct fc_disc *disc) struct fc_lport *lport; struct fc_rport_priv *rdata; - lport = disc->lport; + lport = fc_disc_lport(disc); mutex_lock(&disc->disc_mutex); list_for_each_entry_rcu(rdata, &disc->rports, peers) @@ -96,7 +96,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, LIST_HEAD(disc_ports); struct fc_disc_port *dp, *next; - lport = disc->lport; + lport = fc_disc_lport(disc); FC_DISC_DBG(disc, "Received an RSCN event\n"); @@ -275,7 +275,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, */ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) { - struct fc_lport *lport = disc->lport; + struct fc_lport *lport = fc_disc_lport(disc); struct fc_rport_priv *rdata; FC_DISC_DBG(disc, "Discovery complete\n"); @@ -313,7 +313,7 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) */ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) { - struct fc_lport *lport = disc->lport; + struct fc_lport *lport = fc_disc_lport(disc); unsigned long delay = 0; FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n", @@ -353,7 +353,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) static void fc_disc_gpn_ft_req(struct fc_disc *disc) { struct fc_frame *fp; - struct fc_lport *lport = disc->lport; + struct fc_lport *lport = fc_disc_lport(disc); WARN_ON(!fc_lport_test_ready(lport)); @@ -396,7 +396,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) struct fc_rport_identifiers ids; struct fc_rport_priv *rdata; - lport = disc->lport; + lport = fc_disc_lport(disc); disc->seq_count++; /* @@ -733,7 +733,7 @@ int fc_disc_init(struct fc_lport *lport) mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); - disc->lport = lport; + disc->priv = lport; return 0; } diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index f5c0ca4b6ef8..16d2162dda1f 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -52,7 +52,7 @@ extern unsigned int fc_debug_logging; #define FC_DISC_DBG(disc, fmt, args...) \ FC_CHECK_LOGGING(FC_DISC_LOGGING, \ printk(KERN_INFO "host%u: disc: " fmt, \ - (disc)->lport->host->host_no, \ + fc_disc_lport(disc)->host->host_no, \ ##args)) #define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index fcbee8c38b0c..5f64e593cca8 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -739,7 +739,7 @@ struct libfc_function_template { * @buf_len: Length of the discovery buffer * @disc_id: Discovery ID * @rports: List of discovered remote ports - * @lport: The local port that discovery is for + * @priv: Private pointer for use by discovery code * @disc_mutex: Mutex that protects the discovery context * @partial_buf: Partial name buffer (if names are returned * in multiple frames) @@ -755,7 +755,7 @@ struct fc_disc { u16 disc_id; struct list_head rports; - struct fc_lport *lport; + void *priv; struct mutex disc_mutex; struct fc_gpn_ft_resp partial_buf; struct delayed_work disc_work; @@ -1003,6 +1003,11 @@ void fc_rport_terminate_io(struct fc_rport *); *****************************/ int fc_disc_init(struct fc_lport *); +static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) +{ + return container_of(disc, struct fc_lport, disc); +} + /* * FCP LAYER *****************************/ -- cgit v1.2.3 From 3d902ac09a2812b359edf633425d1327a18399e9 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:19:58 -0700 Subject: [SCSI] libfcoe: fcoe: fnic: change fcoe_ctlr_init interface to specify mode There are three modes that libfcoe currently supports, and a new one is coming. Change the fcoe_ctlr_init() interface to add the mode desired. This should not change any functionality. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/fcoe/libfcoe.c | 4 ++-- drivers/scsi/fnic/fnic_main.c | 4 ++-- include/scsi/libfcoe.h | 11 ++++++++++- 4 files changed, 15 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index a120962b25b8..9d64e08305c7 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -357,7 +357,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev) /* * Initialize FIP. */ - fcoe_ctlr_init(&fcoe->ctlr); + fcoe_ctlr_init(&fcoe->ctlr, FIP_MODE_AUTO); fcoe->ctlr.send = fcoe_fip_send; fcoe->ctlr.update_mac = fcoe_update_src_mac; fcoe->ctlr.get_src_addr = fcoe_get_src_mac; diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index e510888e78ca..76056e4c9297 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -108,10 +108,10 @@ static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf) * fcoe_ctlr_init() - Initialize the FCoE Controller instance * @fip: The FCoE controller to initialize */ -void fcoe_ctlr_init(struct fcoe_ctlr *fip) +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) { fip->state = FIP_ST_LINK_WAIT; - fip->mode = FIP_ST_AUTO; + fip->mode = mode; INIT_LIST_HEAD(&fip->fcfs); mutex_init(&fip->ctlr_mutex); fip->flogi_oxid = FC_XID_UNKNOWN; diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 265e73d9cd6f..d0fe1c3345b8 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -617,7 +617,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev, fnic->ctlr.send = fnic_eth_send; fnic->ctlr.update_mac = fnic_update_mac; fnic->ctlr.get_src_addr = fnic_get_mac; - fcoe_ctlr_init(&fnic->ctlr); if (fnic->config.flags & VFCF_FIP_CAPABLE) { shost_printk(KERN_INFO, fnic->lport->host, "firmware supports FIP\n"); @@ -625,10 +624,11 @@ static int __devinit fnic_probe(struct pci_dev *pdev, vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); - fnic->ctlr.mode = FIP_ST_NON_FIP; + fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); } fnic->state = FNIC_IN_FC_MODE; diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 7d18b500f2c1..1a84a3182da0 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -54,6 +54,15 @@ enum fip_state { FIP_ST_ENABLED, }; +/* + * Modes: + * The mode is the state that is to be entered after link up. + * It must not change after fcoe_ctlr_init() sets it. + */ +#define FIP_MODE_AUTO FIP_ST_AUTO +#define FIP_MODE_NON_FIP FIP_ST_NON_FIP +#define FIP_MODE_FABRIC FIP_ST_ENABLED + /** * struct fcoe_ctlr - FCoE Controller and FIP state * @state: internal FIP state for network link and FIP or non-FIP mode. @@ -152,7 +161,7 @@ struct fcoe_fcf { }; /* FIP API functions */ -void fcoe_ctlr_init(struct fcoe_ctlr *); +void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state); void fcoe_ctlr_destroy(struct fcoe_ctlr *); void fcoe_ctlr_link_up(struct fcoe_ctlr *); int fcoe_ctlr_link_down(struct fcoe_ctlr *); -- cgit v1.2.3 From 3726f3584e113697b68d3d4ff1ecf1042a06f800 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:03 -0700 Subject: [SCSI] libfc: Add local port point-to-multipoint flag For VN_port to VN_port mode, the transport sets the port_id and there's no lport FLOGI. This is similar to FC loop mode. Add a point_to_multipoint flag that indicates the local port is in point-to-multipoint mode. This skips FLOGI and discovery. It also skips resetting the port_id on resets other than link down. Add function fc_lport_set_local_id() that sets the local port_id. This is called by libfcoe on behalf of the low-level driver to set the port_id when the link comes up. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 36 +++++++++++++++++++++++++++++++++++- include/scsi/libfc.h | 2 ++ 2 files changed, 37 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 79c9e3ccd341..f7bff2cad4ee 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -754,6 +754,34 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, lport->tt.lport_set_port_id(lport, port_id, fp); } +/** + * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint + * @lport: The local port which will have its Port ID set. + * @port_id: The new port ID. + * + * Called by the lower-level driver when transport sets the local port_id. + * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and + * discovery to be skipped. + */ +void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) +{ + mutex_lock(&lport->lp_mutex); + + fc_lport_set_port_id(lport, port_id, NULL); + + switch (lport->state) { + case LPORT_ST_RESET: + case LPORT_ST_FLOGI: + if (port_id) + fc_lport_enter_ready(lport); + break; + default: + break; + } + mutex_unlock(&lport->lp_mutex); +} +EXPORT_SYMBOL(fc_lport_set_local_id); + /** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @sp_in: The sequence the FLOGI is on @@ -954,7 +982,7 @@ static void fc_lport_reset_locked(struct fc_lport *lport) lport->tt.exch_mgr_reset(lport, 0, 0); fc_host_fabric_name(lport->host) = 0; - if (lport->port_id) + if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) fc_lport_set_port_id(lport, 0, NULL); } @@ -1536,6 +1564,12 @@ void fc_lport_enter_flogi(struct fc_lport *lport) fc_lport_state_enter(lport, LPORT_ST_FLOGI); + if (lport->point_to_multipoint) { + if (lport->port_id) + fc_lport_enter_ready(lport); + return; + } + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) return fc_lport_error(lport, fp); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 5f64e593cca8..bd0560509ce6 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -846,6 +846,7 @@ struct fc_lport { u32 lro_enabled:1; u32 does_npiv:1; u32 npiv_enabled:1; + u32 point_to_multipoint:1; u32 mfs; u8 max_retry_count; u8 max_rport_retry_count; @@ -991,6 +992,7 @@ int fc_set_mfs(struct fc_lport *, u32 mfs); struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize); struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id); int fc_lport_bsg_request(struct fc_bsg_job *); +void fc_lport_set_local_id(struct fc_lport *, u32 port_id); /* * REMOTE PORT LAYER -- cgit v1.2.3 From a7b12a279faaad26837276065104a1f9cf60e962 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:08 -0700 Subject: [SCSI] libfc: add FLOGI state to rport for VN2VN The FIP proposal for VN_port to VN_port point-to-multipoint operation requires a FLOGI be sent to each remote port. The FLOGI is sent with the assigned S_ID and D_IDs of the local and remote ports. This and the response get FIP-encapsulated for Ethernet. Add FLOGI state to the remote port state machine. This will be skipped if not in point-to-multipoint mode. To reduce a little duplication between PLOGI and FLOGI response handling, added fc_rport_login_complete(), which handles the parameters for the rdata struct. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 4 +- drivers/scsi/libfc/fc_rport.c | 290 ++++++++++++++++++++++++++++++++++++++++-- include/scsi/fc/fc_els.h | 2 + include/scsi/libfc.h | 4 + 4 files changed, 286 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f7bff2cad4ee..ec9850c46170 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -906,10 +906,10 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: - recv = fc_lport_recv_flogi_req; + if (!lport->point_to_multipoint) + recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: - fh = fc_frame_header_get(fp); if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6d68482649c9..4d6adf29b4f8 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -60,6 +60,7 @@ struct workqueue_struct *rport_event_queue; +static void fc_rport_enter_flogi(struct fc_rport_priv *); static void fc_rport_enter_plogi(struct fc_rport_priv *); static void fc_rport_enter_prli(struct fc_rport_priv *); static void fc_rport_enter_rtv(struct fc_rport_priv *); @@ -82,6 +83,8 @@ static void fc_rport_work(struct work_struct *); static const char *fc_rport_state_names[] = { [RPORT_ST_INIT] = "Init", + [RPORT_ST_FLOGI] = "FLOGI", + [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT", [RPORT_ST_PLOGI] = "PLOGI", [RPORT_ST_PRLI] = "PRLI", [RPORT_ST_RTV] = "RTV", @@ -207,7 +210,7 @@ EXPORT_SYMBOL(fc_set_rport_loss_tmo); /** * fc_plogi_get_maxframe() - Get the maximum payload from the common service * parameters in a FLOGI frame - * @flp: The FLOGI payload + * @flp: The FLOGI or PLOGI payload * @maxval: The maximum frame size upper limit; this may be less than what * is in the service parameters */ @@ -344,7 +347,7 @@ static void fc_rport_work(struct work_struct *work) rdata->major_retries++; rdata->event = RPORT_EV_NONE; FC_RPORT_DBG(rdata, "work restart\n"); - fc_rport_enter_plogi(rdata); + fc_rport_enter_flogi(rdata); mutex_unlock(&rdata->rp_mutex); } else { FC_RPORT_DBG(rdata, "work delete\n"); @@ -397,7 +400,7 @@ int fc_rport_login(struct fc_rport_priv *rdata) break; default: FC_RPORT_DBG(rdata, "Login to port\n"); - fc_rport_enter_plogi(rdata); + fc_rport_enter_flogi(rdata); break; } mutex_unlock(&rdata->rp_mutex); @@ -499,6 +502,9 @@ static void fc_rport_timeout(struct work_struct *work) mutex_lock(&rdata->rp_mutex); switch (rdata->rp_state) { + case RPORT_ST_FLOGI: + fc_rport_enter_flogi(rdata); + break; case RPORT_ST_PLOGI: fc_rport_enter_plogi(rdata); break; @@ -514,6 +520,7 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_ADISC: fc_rport_enter_adisc(rdata); break; + case RPORT_ST_PLOGI_WAIT: case RPORT_ST_READY: case RPORT_ST_INIT: case RPORT_ST_DELETE: @@ -538,6 +545,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) fc_rport_state(rdata), rdata->retries); switch (rdata->rp_state) { + case RPORT_ST_FLOGI: case RPORT_ST_PLOGI: case RPORT_ST_LOGO: rdata->flags &= ~FC_RP_STARTED; @@ -550,6 +558,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) case RPORT_ST_ADISC: fc_rport_enter_logo(rdata); break; + case RPORT_ST_PLOGI_WAIT: case RPORT_ST_DELETE: case RPORT_ST_READY: case RPORT_ST_INIT: @@ -592,7 +601,260 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, } /** - * fc_rport_plogi_recv_resp() - Handler for ELS PLOGI responses + * fc_rport_login_complete() - Handle parameters and completion of p-mp login. + * @rdata: The remote port which we logged into or which logged into us. + * @fp: The FLOGI or PLOGI request or response frame + * + * Returns non-zero error if a problem is detected with the frame. + * Does not free the frame. + * + * This is only used in point-to-multipoint mode for FIP currently. + */ +static int fc_rport_login_complete(struct fc_rport_priv *rdata, + struct fc_frame *fp) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_els_flogi *flogi; + unsigned int e_d_tov; + u16 csp_flags; + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + if (!flogi) + return -EINVAL; + + csp_flags = ntohs(flogi->fl_csp.sp_features); + + if (fc_frame_payload_op(fp) == ELS_FLOGI) { + if (csp_flags & FC_SP_FT_FPORT) { + FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n"); + return -EINVAL; + } + } else { + + /* + * E_D_TOV is not valid on an incoming FLOGI request. + */ + e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov); + if (csp_flags & FC_SP_FT_EDTR) + e_d_tov /= 1000000; + if (e_d_tov > rdata->e_d_tov) + rdata->e_d_tov = e_d_tov; + } + rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs); + return 0; +} + +/** + * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode + * @sp: The sequence that the FLOGI was on + * @fp: The FLOGI response frame + * @rp_arg: The remote port that received the FLOGI response + */ +void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rp_arg) +{ + struct fc_rport_priv *rdata = rp_arg; + struct fc_lport *lport = rdata->local_port; + struct fc_els_flogi *flogi; + unsigned int r_a_tov; + + FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); + + if (fp == ERR_PTR(-FC_EX_CLOSED)) + return; + + mutex_lock(&rdata->rp_mutex); + + if (rdata->rp_state != RPORT_ST_FLOGI) { + FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state " + "%s\n", fc_rport_state(rdata)); + if (IS_ERR(fp)) + goto err; + goto out; + } + + if (IS_ERR(fp)) { + fc_rport_error(rdata, fp); + goto err; + } + + if (fc_frame_payload_op(fp) != ELS_LS_ACC) + goto bad; + if (fc_rport_login_complete(rdata, fp)) + goto bad; + + flogi = fc_frame_payload_get(fp, sizeof(*flogi)); + if (!flogi) + goto bad; + r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov); + if (r_a_tov > rdata->r_a_tov) + rdata->r_a_tov = r_a_tov; + + if (rdata->ids.port_name < lport->wwpn) + fc_rport_enter_plogi(rdata); + else + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); +out: + fc_frame_free(fp); +err: + mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); + return; +bad: + FC_RPORT_DBG(rdata, "Bad FLOGI response\n"); + fc_rport_error_retry(rdata, fp); + goto out; +} + +/** + * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp + * @rdata: The remote port to send a FLOGI to + * + * Locking Note: The rport lock is expected to be held before calling + * this routine. + */ +static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_frame *fp; + + if (!lport->point_to_multipoint) + return fc_rport_enter_plogi(rdata); + + FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n", + fc_rport_state(rdata)); + + fc_rport_state_enter(rdata, RPORT_ST_FLOGI); + + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); + if (!fp) + return fc_rport_error_retry(rdata, fp); + + if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI, + fc_rport_flogi_resp, rdata, + 2 * lport->r_a_tov)) + fc_rport_error_retry(rdata, NULL); + else + kref_get(&rdata->kref); +} + +/** + * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode + * @lport: The local port that received the PLOGI request + * @sp: The sequence that the PLOGI request was on + * @rx_fp: The PLOGI request frame + */ +static void fc_rport_recv_flogi_req(struct fc_lport *lport, + struct fc_seq *sp, struct fc_frame *rx_fp) +{ + struct fc_disc *disc; + struct fc_els_flogi *flp; + struct fc_rport_priv *rdata; + struct fc_frame *fp = rx_fp; + struct fc_exch *ep; + struct fc_frame_header *fh; + struct fc_seq_els_data rjt_data; + u32 sid, f_ctl; + + rjt_data.fp = NULL; + fh = fc_frame_header_get(fp); + sid = ntoh24(fh->fh_s_id); + + FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); + + disc = &lport->disc; + mutex_lock(&disc->disc_mutex); + + if (!lport->point_to_multipoint) { + rjt_data.reason = ELS_RJT_UNSUP; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + + flp = fc_frame_payload_get(fp, sizeof(*flp)); + if (!flp) { + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_INV_LEN; + goto reject; + } + + rdata = lport->tt.rport_lookup(lport, sid); + if (!rdata) { + rjt_data.reason = ELS_RJT_FIP; + rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; + goto reject; + } + mutex_lock(&rdata->rp_mutex); + + FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n", + fc_rport_state(rdata)); + + switch (rdata->rp_state) { + case RPORT_ST_INIT: + case RPORT_ST_LOGO: + case RPORT_ST_DELETE: + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_FIP; + rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR; + goto reject; + case RPORT_ST_FLOGI: + case RPORT_ST_PLOGI_WAIT: + case RPORT_ST_PLOGI: + break; + case RPORT_ST_PRLI: + case RPORT_ST_RTV: + case RPORT_ST_READY: + case RPORT_ST_ADISC: + /* + * Set the remote port to be deleted and to then restart. + * This queues work to be sure exchanges are reset. + */ + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_BUSY; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + if (fc_rport_login_complete(rdata, fp)) { + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } + fc_frame_free(rx_fp); + + fp = fc_frame_alloc(lport, sizeof(*flp)); + if (!fp) + goto out; + + sp = lport->tt.seq_start_next(sp); + fc_flogi_fill(lport, fp); + flp = fc_frame_payload_get(fp, sizeof(*flp)); + flp->fl_cmd = ELS_LS_ACC; + + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; + ep = fc_seq_exch(sp); + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, + FC_TYPE_ELS, f_ctl, 0); + lport->tt.seq_send(lport, sp, fp); + + if (rdata->ids.port_name < lport->wwpn) + fc_rport_enter_plogi(rdata); + else + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); +out: + mutex_unlock(&rdata->rp_mutex); + mutex_unlock(&disc->disc_mutex); + return; + +reject: + mutex_unlock(&disc->disc_mutex); + lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/** + * fc_rport_plogi_resp() - Handler for ELS PLOGI responses * @sp: The sequence the PLOGI is on * @fp: The PLOGI response frame * @rdata_arg: The remote port that sent the PLOGI response @@ -607,7 +869,6 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_rport_priv *rdata = rdata_arg; struct fc_lport *lport = rdata->local_port; struct fc_els_flogi *plp = NULL; - unsigned int tov; u16 csp_seq; u16 cssp_seq; u8 op; @@ -635,11 +896,8 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); - tov = ntohl(plp->fl_csp.sp_e_d_tov); - if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) - tov /= 1000000; - if (tov > rdata->e_d_tov) - rdata->e_d_tov = tov; + if (lport->point_to_multipoint) + fc_rport_login_complete(rdata, fp); csp_seq = ntohs(plp->fl_csp.sp_tot_seq); cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); if (cssp_seq < csp_seq) @@ -677,6 +935,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { + FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__); fc_rport_error_retry(rdata, fp); return; } @@ -1041,7 +1300,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp, get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name || get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) { FC_RPORT_DBG(rdata, "ADISC error or mismatch\n"); - fc_rport_enter_plogi(rdata); + fc_rport_enter_flogi(rdata); } else { FC_RPORT_DBG(rdata, "ADISC OK\n"); fc_rport_enter_ready(rdata); @@ -1291,12 +1550,15 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, struct fc_seq_els_data els_data; /* - * Handle PLOGI and LOGO requests separately, since they + * Handle FLOGI, PLOGI and LOGO requests separately, since they * don't require prior login. * Check for unsupported opcodes first and reject them. * For some ops, it would be incorrect to reject with "PLOGI required". */ switch (fc_frame_payload_op(fp)) { + case ELS_FLOGI: + fc_rport_recv_flogi_req(lport, sp, fp); + break; case ELS_PLOGI: fc_rport_recv_plogi_req(lport, sp, fp); break; @@ -1386,6 +1648,9 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, case RPORT_ST_INIT: FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n"); break; + case RPORT_ST_PLOGI_WAIT: + FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n"); + break; case RPORT_ST_PLOGI: FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n"); if (rdata->ids.port_name < lport->wwpn) { @@ -1403,6 +1668,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, "- ignored for now\n", rdata->rp_state); /* XXX TBD - should reset */ break; + case RPORT_ST_FLOGI: case RPORT_ST_DELETE: case RPORT_ST_LOGO: FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h index 70a7e92a7664..481abbd48e39 100644 --- a/include/scsi/fc/fc_els.h +++ b/include/scsi/fc/fc_els.h @@ -191,6 +191,7 @@ enum fc_els_rjt_reason { ELS_RJT_UNAB = 0x09, /* unable to perform command request */ ELS_RJT_UNSUP = 0x0b, /* command not supported */ ELS_RJT_INPROG = 0x0e, /* command already in progress */ + ELS_RJT_FIP = 0x20, /* FIP error */ ELS_RJT_VENDOR = 0xff, /* vendor specific error */ }; @@ -212,6 +213,7 @@ enum fc_els_rjt_explan { ELS_EXPL_UNAB_DATA = 0x2a, /* unable to supply requested data */ ELS_EXPL_UNSUPR = 0x2c, /* Request not supported */ ELS_EXPL_INV_LEN = 0x2d, /* Invalid payload length */ + ELS_EXPL_NOT_NEIGHBOR = 0x62, /* VN2VN_Port not in neighbor set */ /* TBD - above definitions incomplete */ }; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index bd0560509ce6..24b91c922055 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -97,6 +97,8 @@ enum fc_disc_event { /** * enum fc_rport_state - Remote port states * @RPORT_ST_INIT: Initialized + * @RPORT_ST_FLOGI: Waiting for FLOGI completion for point-to-multipoint + * @RPORT_ST_PLOGI_WAIT: Waiting for peer to login for point-to-multipoint * @RPORT_ST_PLOGI: Waiting for PLOGI completion * @RPORT_ST_PRLI: Waiting for PRLI completion * @RPORT_ST_RTV: Waiting for RTV completion @@ -107,6 +109,8 @@ enum fc_disc_event { */ enum fc_rport_state { RPORT_ST_INIT, + RPORT_ST_FLOGI, + RPORT_ST_PLOGI_WAIT, RPORT_ST_PLOGI, RPORT_ST_PRLI, RPORT_ST_RTV, -- cgit v1.2.3 From f60e12e9c778c8256a646f80603d1b88ba5ce891 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:14 -0700 Subject: [SCSI] libfc: track FIP exchanges When an exchange is received with a FIP encapsulation, we need to know that the response must be sent via FIP and what the original ELS opcode was. This becomes important for VN2VN mode, where we may receive FLOGI or LOGO from several peer VN_ports, and the LS_ACC or LS_RJT must be sent FIP-encapsulated with the correct sub-type. Add a field to the struct fc_frame, fr_encaps, to indicate the encapsulation values. That term is chosen to be neutral and LLD-agnostic in case non-FCoE/FIP LLDs might find it useful. The frame fr_encaps is transferred from the ingress frame to the exchange by fc_exch_recv_req(), and back to the outgoing frame by fc_seq_send(). This is taking the last byte in the skb->cb array. If needed, we could combine the info in sof, eof, flags, and encaps together into one field, but it'd be better to do that if and when its needed. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 2 ++ include/scsi/fc_frame.h | 3 +++ include/scsi/libfc.h | 2 ++ 3 files changed, 7 insertions(+) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 104e0fba7c43..61eabd3ce436 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -464,6 +464,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, f_ctl = ntoh24(fh->fh_f_ctl); fc_exch_setup_hdr(ep, fp, f_ctl); + fr_encaps(fp) = ep->encaps; /* * update sequence count if this frame is carrying @@ -1259,6 +1260,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, sp = fr_seq(fp); /* sequence will be held */ ep = fc_seq_exch(sp); fc_seq_send_ack(sp, fp); + ep->encaps = fr_encaps(fp); /* * Call the receive function. diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index 15427fab8a57..29dd97d5b53a 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -51,6 +51,7 @@ #define fr_sof(fp) (fr_cb(fp)->fr_sof) #define fr_eof(fp) (fr_cb(fp)->fr_eof) #define fr_flags(fp) (fr_cb(fp)->fr_flags) +#define fr_encaps(fp) (fr_cb(fp)->fr_encaps) #define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload) #define fr_fsp(fp) (fr_cb(fp)->fr_fsp) #define fr_crc(fp) (fr_cb(fp)->fr_crc) @@ -69,6 +70,7 @@ struct fcoe_rcv_info { u8 fr_sof; /* start of frame delimiter */ u8 fr_eof; /* end of frame delimiter */ u8 fr_flags; /* flags - see below */ + u8 fr_encaps; /* LLD encapsulation info (e.g. FIP) */ u8 granted_mac[ETH_ALEN]; /* FCoE MAC address */ }; @@ -97,6 +99,7 @@ static inline void fc_frame_init(struct fc_frame *fp) fr_dev(fp) = NULL; fr_seq(fp) = NULL; fr_flags(fp) = 0; + fr_encaps(fp) = 0; } struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 24b91c922055..8d297f9a0a47 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -412,6 +412,7 @@ struct fc_seq { * @esb_stat: ESB exchange status * @r_a_tov: Resouce allocation time out value (in msecs) * @seq_id: The next sequence ID to use + * @encaps: encapsulation information for lower-level driver * @f_ctl: F_CTL flags for the sequence * @fh_type: The frame type * @class: The class of service @@ -443,6 +444,7 @@ struct fc_exch { u32 esb_stat; u32 r_a_tov; u8 seq_id; + u8 encaps; u32 f_ctl; u8 fh_type; enum fc_class class; -- cgit v1.2.3 From edcbb4395ecd2f2731fbf38ecbff5be0316513cb Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:19 -0700 Subject: [SCSI] libfcoe: add protocol description of FIP VN2VN mode The FC-BB-6 committee is proposing a new FIP usage model called VN_port to VN_port mode. It allows VN_ports to discover each other over a loss-free L2 Ethernet without any FCF or Fibre-channel fabric services. This is point-to-multipoint. There is also a variant of this called point-to-point which provides for making sure there is just one pair of ports operating over the Ethernet fabric. This patch defines the new message type and subtypes as well as one new descriptor type used by VN2VN mode. These are all still at the proposed stage and subject to change. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- include/scsi/fc/fc_fip.h | 46 +++++++++++++++++++++++++++++++++++++++++++--- include/scsi/fc/fc_ns.h | 7 +++++++ 2 files changed, 50 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h index 17baa19380f0..ae25d4ab2548 100644 --- a/include/scsi/fc/fc_fip.h +++ b/include/scsi/fc/fc_fip.h @@ -17,9 +17,12 @@ #ifndef _FC_FIP_H_ #define _FC_FIP_H_ +#include + /* * This version is based on: * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf + * and T11 FC-BB-6 10-019v4.pdf (June 2010 VN2VN proposal) */ #define FIP_DEF_PRI 128 /* default selection priority */ @@ -28,12 +31,25 @@ #define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */ #define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */ +/* + * VN2VN proposed-standard values. + */ +#define FIP_VN_FC_MAP 0x0efd00 /* MAC OUI for VN2VN use */ +#define FIP_VN_PROBE_WAIT 100 /* interval between VN2VN probes (ms) */ +#define FIP_VN_ANN_WAIT 400 /* interval between VN2VN announcements (ms) */ +#define FIP_VN_RLIM_INT 10000 /* interval between probes when rate limited */ +#define FIP_VN_RLIM_COUNT 10 /* number of probes before rate limiting */ +#define FIP_VN_BEACON_INT 8000 /* interval between VN2VN beacons */ +#define FIP_VN_BEACON_FUZZ 100 /* random time to add to beacon period (ms) */ + /* * Multicast MAC addresses. T11-adopted. */ -#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 }) -#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 }) -#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) +#define FIP_ALL_FCOE_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 0 }) +#define FIP_ALL_ENODE_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 1 }) +#define FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) +#define FIP_ALL_VN2VN_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 4 }) +#define FIP_ALL_P2P_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 5 }) #define FIP_VER 1 /* version for fip_header */ @@ -60,6 +76,7 @@ enum fip_opcode { FIP_OP_LS = 2, /* Link Service request or reply */ FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */ FIP_OP_VLAN = 4, /* VLAN discovery */ + FIP_OP_VN2VN = 5, /* VN2VN operation */ FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */ FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */ }; @@ -96,12 +113,24 @@ enum fip_vlan_subcode { FIP_SC_VL_REP = 2, /* reply */ }; +/* + * Subcodes for FIP_OP_VN2VN. + */ +enum fip_vn2vn_subcode { + FIP_SC_VN_PROBE_REQ = 1, /* probe request */ + FIP_SC_VN_PROBE_REP = 2, /* probe reply */ + FIP_SC_VN_CLAIM_NOTIFY = 3, /* claim notification */ + FIP_SC_VN_CLAIM_REP = 4, /* claim response */ + FIP_SC_VN_BEACON = 5, /* beacon */ +}; + /* * flags in header fip_flags. */ enum fip_flag { FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */ FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */ + FIP_FL_REC_OR_P2P = 0x0008, /* configured addr or point-to-point */ FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */ FIP_FL_SOL = 0x0002, /* this is a solicited message */ FIP_FL_FPORT = 0x0001, /* sent from an F port */ @@ -130,6 +159,7 @@ enum fip_desc_type { FIP_DT_FKA = 12, /* advertisement keep-alive period */ FIP_DT_VENDOR = 13, /* vendor ID */ FIP_DT_VLAN = 14, /* vlan number */ + FIP_DT_FC4F = 15, /* FC-4 features */ FIP_DT_LIMIT, /* max defined desc_type + 1 */ FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */ }; @@ -228,6 +258,16 @@ enum fip_fka_flags { /* FIP_DT_FKA flags */ +/* + * FIP_DT_FC4F - FC-4 features. + */ +struct fip_fc4_feat { + struct fip_desc fd_desc; + __u8 fd_resvd[2]; + struct fc_ns_fts fd_fts; + struct fc_ns_ff fd_ff; +} __attribute__((packed)); + /* * FIP_DT_VENDOR descriptor. */ diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h index e7d3ac497d7d..185015dd1166 100644 --- a/include/scsi/fc/fc_ns.h +++ b/include/scsi/fc/fc_ns.h @@ -99,6 +99,13 @@ struct fc_ns_fts { __be32 ff_type_map[FC_NS_TYPES / FC_NS_BPW]; /* bitmap of FC-4 types */ }; +/* + * FC4-features object. + */ +struct fc_ns_ff { + __be32 fd_feat[FC_NS_TYPES * 4 / FC_NS_BPW]; /* 4-bits per FC-type */ +}; + /* * GID_PT request. */ -- cgit v1.2.3 From e10f8c667b874a57512c936089092a3d1ef7ab8a Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:30 -0700 Subject: [SCSI] libfcoe: fcoe: fnic: add FIP VN2VN point-to-multipoint support The FC-BB-6 committee is proposing a new FIP usage model called VN_port to VN_port mode. It allows VN_ports to discover each other over a loss-free L2 Ethernet without any FCF or Fibre-channel fabric services. This is point-to-multipoint. There is also a variant of this called point-to-point which provides for making sure there is just one pair of ports operating over the Ethernet fabric. We add these new states: VNMP_START, _PROBE1, _PROBE2, _CLAIM, and _UP. These usually go quickly in that sequence. After waiting a random amount of time up to 100 ms in START, we select a pseudo-random proposed locally-unique port ID and send out probes in states PROBE1 and PROBE2, 100 ms apart. If no probe responses are heard, we proceed to CLAIM state 400 ms later and send a claim notification. We wait another 400 ms to receive claim responses, which give us a list of the other nodes on the network, including their FC-4 capabilities. After another 400 ms we go to VNMP_UP state and should start interoperating with any of the nodes for whic we receivec claim responses. More details are in the spec.j Add the new mode as FIP_MODE_VN2VN. The driver must specify explicitly that it wants to operate in this mode. There is no automatic detection between point-to-multipoint and fabric mode, and the local port initialization is affected, so it isn't anticipated that there will ever be any such automatic switchover. It may eventually be possible to have both fabric and VN2VN modes on the same L2 network, which may be done by two separate local VN_ports (lports). When in VN2VN mode, FIP replaces libfc's fabric-oriented discovery module with its own simple code that adds remote ports as they are discovered from incoming claim notifications and responses. These hooks are placed by fcoe_disc_init(). A linear list of discovered vn_ports is maintained under the fcoe_ctlr struct. It is expected to be short for now, and accessed infrequently. It is kept under RCU for lock-ordering reasons. The lport and/or rport mutexes may be held when we need to lookup a fcoe_vnport during an ELS send. Change fcoe_ctlr_encaps() to lookup the destination vn_port in the list of peers for the destination MAC address of the FIP-encapsulated frame. Add a new function fcoe_disc_init() to initialize just the discovery portion of libfcoe for VN2VN mode. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/fcoe.c | 16 +- drivers/scsi/fcoe/libfcoe.c | 1075 ++++++++++++++++++++++++++++++++++++++--- drivers/scsi/fnic/fnic_main.c | 7 +- include/scsi/libfcoe.h | 42 +- 4 files changed, 1071 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9d64e08305c7..216aba375fe1 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -315,7 +315,11 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, dev_uc_add(netdev, flogi_maddr); if (fip->spma) dev_uc_add(netdev, fip->ctl_src_addr); - dev_mc_add(netdev, FIP_ALL_ENODE_MACS); + if (fip->mode == FIP_MODE_VN2VN) { + dev_mc_add(netdev, FIP_ALL_VN2VN_MACS); + dev_mc_add(netdev, FIP_ALL_P2P_MACS); + } else + dev_mc_add(netdev, FIP_ALL_ENODE_MACS); /* * setup the receive function from ethernet driver @@ -401,7 +405,11 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) dev_uc_del(netdev, flogi_maddr); if (fip->spma) dev_uc_del(netdev, fip->ctl_src_addr); - dev_mc_del(netdev, FIP_ALL_ENODE_MACS); + if (fip->mode == FIP_MODE_VN2VN) { + dev_mc_del(netdev, FIP_ALL_VN2VN_MACS); + dev_mc_del(netdev, FIP_ALL_P2P_MACS); + } else + dev_mc_del(netdev, FIP_ALL_ENODE_MACS); /* Tell the LLD we are done w/ FCoE */ ops = netdev->netdev_ops; @@ -967,7 +975,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, } /* Initialize the library */ - rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ); + rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); if (rc) { FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " "interface\n"); @@ -2533,6 +2541,8 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, switch (op) { case ELS_FLOGI: case ELS_FDISC: + if (lport->point_to_multipoint) + break; return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp, fip, timeout); case ELS_LOGO: diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 11f3db5e506b..79df78f2b085 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -54,7 +55,15 @@ static void fcoe_ctlr_timeout(unsigned long); static void fcoe_ctlr_timer_work(struct work_struct *); static void fcoe_ctlr_recv_work(struct work_struct *); +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *); +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *); + static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; +static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; +static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; +static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; unsigned int libfcoe_debug_logging; module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); @@ -86,6 +95,11 @@ static const char *fcoe_ctlr_states[] = { [FIP_ST_AUTO] = "AUTO", [FIP_ST_NON_FIP] = "NON_FIP", [FIP_ST_ENABLED] = "ENABLED", + [FIP_ST_VNMP_START] = "VNMP_START", + [FIP_ST_VNMP_PROBE1] = "VNMP_PROBE1", + [FIP_ST_VNMP_PROBE2] = "VNMP_PROBE2", + [FIP_ST_VNMP_CLAIM] = "VNMP_CLAIM", + [FIP_ST_VNMP_UP] = "VNMP_UP", }; static const char *fcoe_ctlr_state(enum fip_state state) @@ -295,11 +309,25 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { fcoe_ctlr_set_state(fip, fip->mode); - mutex_unlock(&fip->ctlr_mutex); - if (fip->state == FIP_ST_AUTO) + switch (fip->mode) { + default: + LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); + /* fall-through */ + case FIP_MODE_AUTO: LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); - fc_linkup(fip->lp); - fcoe_ctlr_solicit(fip, NULL); + /* fall-through */ + case FIP_MODE_FABRIC: + case FIP_MODE_NON_FIP: + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + fcoe_ctlr_solicit(fip, NULL); + break; + case FIP_MODE_VN2VN: + fcoe_ctlr_vn_start(fip); + mutex_unlock(&fip->ctlr_mutex); + fc_linkup(fip->lp); + break; + } } else mutex_unlock(&fip->ctlr_mutex); } @@ -423,6 +451,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, * @fip: The FCoE controller for the ELS frame * @dtype: The FIP descriptor type for the frame * @skb: The FCoE ELS frame including FC header but no FCoE headers + * @d_id: The destination port ID. * * Returns non-zero error code on failure. * @@ -433,7 +462,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, * Ethernet header. The tailroom is for the FIP MAC descriptor. */ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, - u8 dtype, struct sk_buff *skb) + u8 dtype, struct sk_buff *skb, u32 d_id) { struct fip_encaps_head { struct ethhdr eth; @@ -445,21 +474,24 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, size_t dlen; u16 fip_flags; - fcf = fip->sel_fcf; - if (!fcf) - return -ENODEV; - - /* set flags according to both FCF and lport's capability on SPMA */ - fip_flags = fcf->flags; - fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA; - if (!fip_flags) - return -ENODEV; - dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */ cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap)); - memset(cap, 0, sizeof(*cap)); - memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + + if (lport->point_to_multipoint) { + if (fcoe_ctlr_vn_lookup(fip, d_id, cap->eth.h_dest)) + return -ENODEV; + } else { + fcf = fip->sel_fcf; + if (!fcf) + return -ENODEV; + fip_flags = fcf->flags; + fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : + FIP_FL_FPMA; + if (!fip_flags) + return -ENODEV; + memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN); + } memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN); cap->eth.h_proto = htons(ETH_P_FIP); @@ -503,19 +535,22 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, * * The caller must check that the length is a multiple of 4. * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). + * The the skb must also be an fc_frame. */ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, struct sk_buff *skb) { + struct fc_frame *fp; struct fc_frame_header *fh; u16 old_xid; u8 op; u8 mac[ETH_ALEN]; + fp = container_of(skb, struct fc_frame, skb); fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); - if (op == ELS_FLOGI) { + if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) { old_xid = fip->flogi_oxid; fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { @@ -533,9 +568,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, if (fip->state == FIP_ST_NON_FIP) return 0; - if (!fip->sel_fcf) + if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) goto drop; - switch (op) { case ELS_FLOGI: op = FIP_DT_FLOGI; @@ -546,36 +580,49 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, op = FIP_DT_FDISC; break; case ELS_LOGO: - if (fip->state != FIP_ST_ENABLED) - return 0; - if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) - return 0; + if (fip->mode == FIP_MODE_VN2VN) { + if (fip->state != FIP_ST_VNMP_UP) + return -EINVAL; + if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) + return -EINVAL; + } else { + if (fip->state != FIP_ST_ENABLED) + return 0; + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + return 0; + } op = FIP_DT_LOGO; break; case ELS_LS_ACC: - if (fip->flogi_oxid == FC_XID_UNKNOWN) - return 0; - if (!ntoh24(fh->fh_s_id)) - return 0; - if (fip->state == FIP_ST_AUTO) - return 0; /* - * Here we must've gotten an SID by accepting an FLOGI + * If non-FIP, we may have gotten an SID by accepting an FLOGI * from a point-to-point connection. Switch to using * the source mac based on the SID. The destination * MAC in this case would have been set by receving the * FLOGI. */ - fip->flogi_oxid = FC_XID_UNKNOWN; - fc_fcoe_set_mac(mac, fh->fh_d_id); - fip->update_mac(lport, mac); + if (fip->state == FIP_ST_NON_FIP) { + if (fip->flogi_oxid == FC_XID_UNKNOWN) + return 0; + fip->flogi_oxid = FC_XID_UNKNOWN; + fc_fcoe_set_mac(mac, fh->fh_d_id); + fip->update_mac(lport, mac); + } + /* fall through */ + case ELS_LS_RJT: + op = fr_encaps(fp); + if (op) + break; return 0; default: - if (fip->state != FIP_ST_ENABLED) + if (fip->state != FIP_ST_ENABLED && + fip->state != FIP_ST_VNMP_UP) goto drop; return 0; } - if (fcoe_ctlr_encaps(fip, lport, op, skb)) + LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n", + op, ntoh24(fh->fh_d_id)); + if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) goto drop; fip->send(fip, skb); return -EINPROGRESS; @@ -717,8 +764,9 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); if (!is_valid_ether_addr(fcf->fcf_mac)) { - LIBFCOE_FIP_DBG(fip, "Invalid MAC address " - "in FIP adv\n"); + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP adv\n", + fcf->fcf_mac); return -EINVAL; } desc_mask &= ~BIT(FIP_DT_MAC); @@ -944,12 +992,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) memcpy(granted_mac, ((struct fip_mac_desc *)desc)->fd_mac, ETH_ALEN); - if (!is_valid_ether_addr(granted_mac)) { - LIBFCOE_FIP_DBG(fip, "Invalid MAC address " - "in FIP ELS\n"); - goto drop; - } - memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); break; case FIP_DT_FLOGI: case FIP_DT_FDISC: @@ -990,10 +1032,20 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; els_op = *(u8 *)(fh + 1); - if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP && - fip->flogi_oxid == ntohs(fh->fh_ox_id) && - els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) - fip->flogi_oxid = FC_XID_UNKNOWN; + if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && + sub == FIP_SC_REP && els_op == ELS_LS_ACC && + fip->mode != FIP_MODE_VN2VN) { + if (!is_valid_ether_addr(granted_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC address %pM in FIP ELS\n", + granted_mac); + goto drop; + } + memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); + + if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) + fip->flogi_oxid = FC_XID_UNKNOWN; + } if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && (!(1U << FIP_DT_MAC & desc_mask)))) { @@ -1012,6 +1064,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_dev(fp) = lport; + fr_encaps(fp) = els_dtype; stats = per_cpu_ptr(lport->dev_stats, get_cpu()); stats->RxFrames++; @@ -1188,8 +1241,13 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) if (skb->len < sizeof(*fiph)) goto drop; eh = eth_hdr(skb); - if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, FIP_ALL_ENODE_MACS)) + if (fip->mode == FIP_MODE_VN2VN) { + if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && + compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && + compare_ether_addr(eh->h_dest, fcoe_all_p2p)) + goto drop; + } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && + compare_ether_addr(eh->h_dest, fcoe_all_enode)) goto drop; fiph = (struct fip_header *)skb->data; op = ntohs(fiph->fip_op); @@ -1209,13 +1267,22 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) LIBFCOE_FIP_DBG(fip, "Using FIP mode\n"); } mutex_unlock(&fip->ctlr_mutex); - if (state != FIP_ST_ENABLED) + + if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) + return fcoe_ctlr_vn_recv(fip, skb); + + if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP && + state != FIP_ST_VNMP_CLAIM) goto drop; if (op == FIP_OP_LS) { fcoe_ctlr_recv_els(fip, skb); /* consumes skb */ return 0; } + + if (state != FIP_ST_ENABLED) + goto drop; + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) fcoe_ctlr_recv_adv(fip, skb); else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) @@ -1302,7 +1369,8 @@ static void fcoe_ctlr_timer_work(struct work_struct *work) unsigned long next_timer; fip = container_of(work, struct fcoe_ctlr, timer_work); - + if (fip->mode == FIP_MODE_VN2VN) + return fcoe_ctlr_vn_timeout(fip); mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_DISABLED) { mutex_unlock(&fip->ctlr_mutex); @@ -1340,7 +1408,6 @@ static void fcoe_ctlr_timer_work(struct work_struct *work) "Starting FCF discovery.\n", fip->lp->host->host_no); reset = 1; - schedule_work(&fip->timer_work); } } @@ -1514,27 +1581,917 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], } EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); +/** + * fcoe_ctlr_rport() - return the fcoe_rport for a given fc_rport_priv + * @rdata: libfc remote port + */ +static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) +{ + return (struct fcoe_rport *)(rdata + 1); +} + +/** + * fcoe_ctlr_vn_send() - Send a FIP VN2VN Probe Request or Reply. + * @fip: The FCoE controller + * @sub: sub-opcode for probe request, reply, or advertisement. + * @dest: The destination Ethernet MAC address + * @min_len: minimum size of the Ethernet payload to be sent + */ +static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, + enum fip_vn2vn_subcode sub, + const u8 *dest, size_t min_len) +{ + struct sk_buff *skb; + struct fip_frame { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + struct fip_vn_desc vn; + } __attribute__((packed)) *frame; + struct fip_fc4_feat *ff; + struct fip_size_desc *size; + u32 fcp_feat; + size_t len; + size_t dlen; + + len = sizeof(*frame); + dlen = 0; + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + dlen = sizeof(struct fip_fc4_feat) + + sizeof(struct fip_size_desc); + len += dlen; + } + dlen += sizeof(frame->mac) + sizeof(frame->wwnn) + sizeof(frame->vn); + len = max(len, min_len + sizeof(struct ethhdr)); + + skb = dev_alloc_skb(len); + if (!skb) + return; + + frame = (struct fip_frame *)skb->data; + memset(frame, 0, len); + memcpy(frame->eth.h_dest, dest, ETH_ALEN); + memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + frame->eth.h_proto = htons(ETH_P_FIP); + + frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + frame->fip.fip_op = htons(FIP_OP_VN2VN); + frame->fip.fip_subcode = sub; + frame->fip.fip_dl_len = htons(dlen / FIP_BPW); + + frame->mac.fd_desc.fip_dtype = FIP_DT_MAC; + frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW; + memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + frame->wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + frame->wwnn.fd_desc.fip_dlen = sizeof(frame->wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &frame->wwnn.fd_wwn); + + frame->vn.fd_desc.fip_dtype = FIP_DT_VN_ID; + frame->vn.fd_desc.fip_dlen = sizeof(frame->vn) / FIP_BPW; + hton24(frame->vn.fd_mac, FIP_VN_FC_MAP); + hton24(frame->vn.fd_mac + 3, fip->port_id); + hton24(frame->vn.fd_fc_id, fip->port_id); + put_unaligned_be64(fip->lp->wwpn, &frame->vn.fd_wwpn); + + /* + * For claims, add FC-4 features. + * TBD: Add interface to get fc-4 types and features from libfc. + */ + if (sub == FIP_SC_VN_CLAIM_NOTIFY || sub == FIP_SC_VN_CLAIM_REP) { + ff = (struct fip_fc4_feat *)(frame + 1); + ff->fd_desc.fip_dtype = FIP_DT_FC4F; + ff->fd_desc.fip_dlen = sizeof(*ff) / FIP_BPW; + ff->fd_fts = fip->lp->fcts; + + fcp_feat = 0; + if (fip->lp->service_params & FCP_SPPF_INIT_FCN) + fcp_feat |= FCP_FEAT_INIT; + if (fip->lp->service_params & FCP_SPPF_TARG_FCN) + fcp_feat |= FCP_FEAT_TARG; + fcp_feat <<= (FC_TYPE_FCP * 4) % 32; + ff->fd_ff.fd_feat[FC_TYPE_FCP * 4 / 32] = htonl(fcp_feat); + + size = (struct fip_size_desc *)(ff + 1); + size->fd_desc.fip_dtype = FIP_DT_FCOE_SIZE; + size->fd_desc.fip_dlen = sizeof(*size) / FIP_BPW; + size->fd_size = htons(fcoe_ctlr_fcoe_size(fip)); + } + + skb_put(skb, len); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + + fip->send(fip, skb); +} + +/** + * fcoe_ctlr_vn_rport_callback - Event handler for rport events. + * @lport: The lport which is receiving the event + * @rdata: remote port private data + * @event: The event that occured + * + * Locking Note: The rport lock must not be held when calling this function. + */ +static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + + LIBFCOE_FIP_DBG(fip, "vn_rport_callback %x event %d\n", + rdata->ids.port_id, event); + + mutex_lock(&fip->ctlr_mutex); + switch (event) { + case RPORT_EV_READY: + frport->login_count = 0; + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + frport->login_count++; + if (frport->login_count > FCOE_CTLR_VN2VN_LOGIN_LIMIT) { + LIBFCOE_FIP_DBG(fip, + "rport FLOGI limited port_id %6.6x\n", + rdata->ids.port_id); + lport->tt.rport_logoff(rdata); + } + break; + default: + break; + } + mutex_unlock(&fip->ctlr_mutex); +} + +static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { + .event_callback = fcoe_ctlr_vn_rport_callback, +}; + +/** + * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) +{ + mutex_lock(&lport->disc.disc_mutex); + lport->disc.disc_callback = NULL; + mutex_unlock(&lport->disc.disc_mutex); +} + +/** + * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode + * @fip: The FCoE controller + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop(struct fc_lport *lport) +{ + struct fcoe_ctlr *fip = lport->disc.priv; + + mutex_lock(&fip->ctlr_mutex); + fcoe_ctlr_disc_stop_locked(lport); + mutex_unlock(&fip->ctlr_mutex); +} + +/** + * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode + * @fip: The FCoE controller + * + * Called through the local port template for discovery. + * Called without the ctlr_mutex held. + */ +static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport) +{ + fcoe_ctlr_disc_stop(lport); + lport->tt.rport_flush_queue(); + synchronize_rcu(); +} + +/** + * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) +{ + unsigned long wait; + u32 port_id; + + fcoe_ctlr_disc_stop_locked(fip->lp); + + /* + * Get proposed port ID. + * If this is the first try after link up, use any previous port_id. + * If there was none, use the low bits of the port_name. + * On subsequent tries, get the next random one. + * Don't use reserved IDs, use another non-zero value, just as random. + */ + port_id = fip->port_id; + if (fip->probe_tries) + port_id = prandom32(&fip->rnd_state) & 0xffff; + else if (!port_id) + port_id = fip->lp->wwpn & 0xffff; + if (!port_id || port_id == 0xffff) + port_id = 1; + fip->port_id = port_id; + + if (fip->probe_tries < FIP_VN_RLIM_COUNT) { + fip->probe_tries++; + wait = random32() % FIP_VN_PROBE_WAIT; + } else + wait = FIP_VN_RLIM_INT; + mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_START); +} + +/** + * fcoe_ctlr_vn_start() - Start in VN2VN mode + * @fip: The FCoE controller + * + * Called with fcoe_ctlr lock held. + */ +static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) +{ + fip->probe_tries = 0; + prandom32_seed(&fip->rnd_state, fip->lp->wwpn); + fcoe_ctlr_vn_restart(fip); +} + +/** + * fcoe_ctlr_vn_parse - parse probe request or response + * @fip: The FCoE controller + * @skb: incoming packet + * @rdata: buffer for resulting parsed VN entry plus fcoe_rport + * + * Returns non-zero error number on error. + * Does not consume the packet. + */ +static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, + struct sk_buff *skb, + struct fc_rport_priv *rdata) +{ + struct fip_header *fiph; + struct fip_desc *desc = NULL; + struct fip_mac_desc *macd = NULL; + struct fip_wwn_desc *wwn = NULL; + struct fip_vn_desc *vn = NULL; + struct fip_size_desc *size = NULL; + struct fcoe_rport *frport; + size_t rlen; + size_t dlen; + u32 desc_mask = 0; + u32 dtype; + u8 sub; + + memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); + frport = fcoe_ctlr_rport(rdata); + + fiph = (struct fip_header *)skb->data; + frport->flags = ntohs(fiph->fip_flags); + + sub = fiph->fip_subcode; + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + case FIP_SC_VN_PROBE_REP: + case FIP_SC_VN_BEACON: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + case FIP_SC_VN_CLAIM_REP: + desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | + BIT(FIP_DT_VN_ID) | BIT(FIP_DT_FC4F) | + BIT(FIP_DT_FCOE_SIZE); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub); + return -EINVAL; + } + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return -EINVAL; + + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + if (dlen < sizeof(*desc) || dlen > rlen) + return -EINVAL; + + dtype = desc->fip_dtype; + if (dtype < 32) { + if (!(desc_mask & BIT(dtype))) { + LIBFCOE_FIP_DBG(fip, + "unexpected or duplicated desc " + "desc type %u in " + "FIP VN2VN subtype %u\n", + dtype, sub); + return -EINVAL; + } + desc_mask &= ~BIT(dtype); + } + + switch (dtype) { + case FIP_DT_MAC: + if (dlen != sizeof(struct fip_mac_desc)) + goto len_err; + macd = (struct fip_mac_desc *)desc; + if (!is_valid_ether_addr(macd->fd_mac)) { + LIBFCOE_FIP_DBG(fip, + "Invalid MAC addr %pM in FIP VN2VN\n", + macd->fd_mac); + return -EINVAL; + } + memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN); + break; + case FIP_DT_NAME: + if (dlen != sizeof(struct fip_wwn_desc)) + goto len_err; + wwn = (struct fip_wwn_desc *)desc; + rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); + break; + case FIP_DT_VN_ID: + if (dlen != sizeof(struct fip_vn_desc)) + goto len_err; + vn = (struct fip_vn_desc *)desc; + memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); + rdata->ids.port_id = ntoh24(vn->fd_fc_id); + rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); + break; + case FIP_DT_FC4F: + if (dlen != sizeof(struct fip_fc4_feat)) + goto len_err; + break; + case FIP_DT_FCOE_SIZE: + if (dlen != sizeof(struct fip_size_desc)) + goto len_err; + size = (struct fip_size_desc *)desc; + frport->fcoe_len = ntohs(size->fd_size); + break; + default: + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " + "in FIP probe\n", dtype); + /* standard says ignore unknown descriptors >= 128 */ + if (dtype < FIP_DT_VENDOR_BASE) + return -EINVAL; + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + return 0; + +len_err: + LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n", + dtype, dlen); + return -EINVAL; +} + +/** + * fcoe_ctlr_vn_send_claim() - send multicast FIP VN2VN Claim Notification. + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) +{ + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_NOTIFY, fcoe_all_vn2vn, 0); + fip->sol_time = jiffies; +} + +/** + * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. + * @fip: The FCoE controller + * @rdata: parsed remote port with frport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); + + if (rdata->ids.port_id != fip->port_id) + return; + + switch (fip->state) { + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + /* + * Decide whether to reply to the Probe. + * Our selected address is never a "recorded" one, so + * only reply if our WWPN is greater and the + * Probe's REC bit is not set. + * If we don't reply, we will change our address. + */ + if (fip->lp->wwpn > rdata->ids.port_name && + !(frport->flags & FIP_FL_REC_OR_P2P)) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP, + frport->enode_mac, 0); + break; + } + /* fall through */ + case FIP_ST_VNMP_START: + fcoe_ctlr_vn_restart(fip); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. + * @fip: The FCoE controller + * @rdata: parsed remote port with frport from the probe request + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, + struct fc_rport_priv *rdata) +{ + if (rdata->ids.port_id != fip->port_id) + return; + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + case FIP_ST_VNMP_CLAIM: + fcoe_ctlr_vn_restart(fip); + break; + case FIP_ST_VNMP_UP: + fcoe_ctlr_vn_send_claim(fip); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. + * @fip: The FCoE controller + * @new: newly-parsed remote port with frport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fc_rport_identifiers *ids; + struct fcoe_rport *frport; + u32 port_id; + + port_id = new->ids.port_id; + if (port_id == fip->port_id) + return; + + mutex_lock(&lport->disc.disc_mutex); + rdata = lport->tt.rport_create(lport, port_id); + if (!rdata) { + mutex_unlock(&lport->disc.disc_mutex); + return; + } + + rdata->ops = &fcoe_ctlr_vn_rport_ops; + rdata->disc_id = lport->disc.disc_id; + + ids = &rdata->ids; + if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || + (ids->node_name != -1 && ids->node_name != new->ids.node_name)) + lport->tt.rport_logoff(rdata); + ids->port_name = new->ids.port_name; + ids->node_name = new->ids.node_name; + mutex_unlock(&lport->disc.disc_mutex); + + frport = fcoe_ctlr_rport(rdata); + LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n", + port_id, frport->fcoe_len ? "old" : "new"); + *frport = *fcoe_ctlr_rport(new); + frport->time = 0; +} + +/** + * fcoe_ctlr_vn_lookup() - Find VN remote port's MAC address + * @fip: The FCoE controller + * @port_id: The port_id of the remote VN_node + * @mac: buffer which will hold the VN_NODE destination MAC address, if found. + * + * Returns non-zero error if no remote port found. + */ +static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + int ret = -1; + + rcu_read_lock(); + rdata = lport->tt.rport_lookup(lport, port_id); + if (rdata) { + frport = fcoe_ctlr_rport(rdata); + memcpy(mac, frport->enode_mac, ETH_ALEN); + ret = 0; + } + rcu_read_unlock(); + return ret; +} + +/** + * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification + * @fip: The FCoE controller + * @new: newly-parsed remote port with frport as a template for new rdata + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + struct fcoe_rport *frport = fcoe_ctlr_rport(new); + + if (frport->flags & FIP_FL_REC_OR_P2P) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + switch (fip->state) { + case FIP_ST_VNMP_START: + case FIP_ST_VNMP_PROBE1: + case FIP_ST_VNMP_PROBE2: + if (new->ids.port_id == fip->port_id) + fcoe_ctlr_vn_restart(fip); + break; + case FIP_ST_VNMP_CLAIM: + case FIP_ST_VNMP_UP: + if (new->ids.port_id == fip->port_id) { + if (new->ids.port_name > fip->lp->wwpn) { + fcoe_ctlr_vn_restart(fip); + break; + } + fcoe_ctlr_vn_send_claim(fip); + break; + } + fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, + min((u32)frport->fcoe_len, + fcoe_ctlr_fcoe_size(fip))); + fcoe_ctlr_vn_add(fip, new); + break; + default: + break; + } +} + +/** + * fcoe_ctlr_vn_claim_resp() - handle received Claim Response + * @fip: The FCoE controller that received the frame + * @new: newly-parsed remote port with frport from the Claim Response + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", + new->ids.port_id, fcoe_ctlr_state(fip->state)); + if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) + fcoe_ctlr_vn_add(fip, new); +} + +/** + * fcoe_ctlr_vn_beacon() - handle received beacon. + * @fip: The FCoE controller that received the frame + * @new: newly-parsed remote port with frport from the Beacon + * + * Called with ctlr_mutex held. + */ +static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, + struct fc_rport_priv *new) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + + frport = fcoe_ctlr_rport(new); + if (frport->flags & FIP_FL_REC_OR_P2P) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + return; + } + mutex_lock(&lport->disc.disc_mutex); + rdata = lport->tt.rport_lookup(lport, new->ids.port_id); + if (rdata) + kref_get(&rdata->kref); + mutex_unlock(&lport->disc.disc_mutex); + if (rdata) { + if (rdata->ids.node_name == new->ids.node_name && + rdata->ids.port_name == new->ids.port_name) { + frport = fcoe_ctlr_rport(rdata); + if (!frport->time && fip->state == FIP_ST_VNMP_UP) + lport->tt.rport_login(rdata); + frport->time = jiffies; + } + kref_put(&rdata->kref, lport->tt.rport_destroy); + return; + } + if (fip->state != FIP_ST_VNMP_UP) + return; + + /* + * Beacon from a new neighbor. + * Send a claim notify if one hasn't been sent recently. + * Don't add the neighbor yet. + */ + LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", + new->ids.port_id); + if (time_after(jiffies, + fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) + fcoe_ctlr_vn_send_claim(fip); +} + +/** + * fcoe_ctlr_vn_age() - Check for VN_ports without recent beacons + * @fip: The FCoE controller + * + * Called with ctlr_mutex held. + * Called only in state FIP_ST_VNMP_UP. + * Returns the soonest time for next age-out or a time far in the future. + */ +static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + unsigned long next_time; + unsigned long deadline; + + next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { + frport = fcoe_ctlr_rport(rdata); + if (!frport->time) + continue; + deadline = frport->time + + msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10); + if (time_after_eq(jiffies, deadline)) { + frport->time = 0; + LIBFCOE_FIP_DBG(fip, + "port %16.16llx fc_id %6.6x beacon expired\n", + rdata->ids.port_name, rdata->ids.port_id); + lport->tt.rport_logoff(rdata); + } else if (time_before(deadline, next_time)) + next_time = deadline; + } + mutex_unlock(&lport->disc.disc_mutex); + return next_time; +} + +/** + * fcoe_ctlr_vn_recv() - Receive a FIP frame + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is dropped. + * Always consumes the frame. + */ +static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + enum fip_vn2vn_subcode sub; + union { + struct fc_rport_priv rdata; + struct fcoe_rport frport; + } buf; + int rc; + + fiph = (struct fip_header *)skb->data; + sub = fiph->fip_subcode; + + rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); + if (rc) { + LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); + goto drop; + } + + mutex_lock(&fip->ctlr_mutex); + switch (sub) { + case FIP_SC_VN_PROBE_REQ: + fcoe_ctlr_vn_probe_req(fip, &buf.rdata); + break; + case FIP_SC_VN_PROBE_REP: + fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); + break; + case FIP_SC_VN_CLAIM_NOTIFY: + fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); + break; + case FIP_SC_VN_CLAIM_REP: + fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); + break; + case FIP_SC_VN_BEACON: + fcoe_ctlr_vn_beacon(fip, &buf.rdata); + break; + default: + LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); + rc = -1; + break; + } + mutex_unlock(&fip->ctlr_mutex); +drop: + kfree_skb(skb); + return rc; +} + +/** + * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. + * @fip: The FCoE controller + * + * This should never be called since we don't see RSCNs or other + * fabric-generated ELSes. + */ +static void fcoe_ctlr_disc_recv(struct fc_seq *seq, struct fc_frame *fp, + struct fc_lport *lport) +{ + struct fc_seq_els_data rjt_data; + + rjt_data.fp = NULL; + rjt_data.reason = ELS_RJT_UNSUP; + rjt_data.explan = ELS_EXPL_NONE; + lport->tt.seq_els_rsp_send(seq, ELS_LS_RJT, &rjt_data); + fc_frame_free(fp); +} + +/** + * fcoe_ctlr_disc_recv - start discovery for VN2VN mode. + * @fip: The FCoE controller + * + * This sets a flag indicating that remote ports should be created + * and started for the peers we discover. We use the disc_callback + * pointer as that flag. Peers already discovered are created here. + * + * The lport lock is held during this call. The callback must be done + * later, without holding either the lport or discovery locks. + * The fcoe_ctlr lock may also be held during this call. + */ +static void fcoe_ctlr_disc_start(void (*callback)(struct fc_lport *, + enum fc_disc_event), + struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + struct fcoe_ctlr *fip = disc->priv; + + mutex_lock(&disc->disc_mutex); + disc->disc_callback = callback; + disc->disc_id = (disc->disc_id + 2) | 1; + disc->pending = 1; + schedule_work(&fip->timer_work); + mutex_unlock(&disc->disc_mutex); +} + +/** + * fcoe_ctlr_vn_disc() - report FIP VN_port discovery results after claim state. + * @fip: The FCoE controller + * + * Starts the FLOGI and PLOGI login process to each discovered rport for which + * we've received at least one beacon. + * Performs the discovery complete callback. + */ +static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) +{ + struct fc_lport *lport = fip->lp; + struct fc_disc *disc = &lport->disc; + struct fc_rport_priv *rdata; + struct fcoe_rport *frport; + void (*callback)(struct fc_lport *, enum fc_disc_event); + + mutex_lock(&disc->disc_mutex); + callback = disc->pending ? disc->disc_callback : NULL; + disc->pending = 0; + list_for_each_entry_rcu(rdata, &disc->rports, peers) { + frport = fcoe_ctlr_rport(rdata); + if (frport->time) + lport->tt.rport_login(rdata); + } + mutex_unlock(&disc->disc_mutex); + if (callback) + callback(lport, DISC_EV_SUCCESS); +} + +/** + * fcoe_ctlr_vn_timeout - timer work function for VN2VN mode. + * @fip: The FCoE controller + */ +static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) +{ + unsigned long next_time; + u8 mac[ETH_ALEN]; + u32 new_port_id = 0; + + mutex_lock(&fip->ctlr_mutex); + switch (fip->state) { + case FIP_ST_VNMP_START: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT); + break; + case FIP_ST_VNMP_PROBE1: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_PROBE2: + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_CLAIM); + new_port_id = fip->port_id; + hton24(mac, FIP_VN_FC_MAP); + hton24(mac + 3, new_port_id); + fip->update_mac(fip->lp, mac); + fcoe_ctlr_vn_send_claim(fip); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + break; + case FIP_ST_VNMP_CLAIM: + /* + * This may be invoked either by starting discovery so don't + * go to the next state unless it's been long enough. + */ + next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT); + if (time_after_eq(jiffies, next_time)) { + fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP); + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT); + fip->port_ka_time = next_time; + } + fcoe_ctlr_vn_disc(fip); + break; + case FIP_ST_VNMP_UP: + next_time = fcoe_ctlr_vn_age(fip); + if (time_after_eq(jiffies, fip->port_ka_time)) { + fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON, + fcoe_all_vn2vn, 0); + fip->port_ka_time = jiffies + + msecs_to_jiffies(FIP_VN_BEACON_INT + + (random32() % FIP_VN_BEACON_FUZZ)); + } + if (time_before(fip->port_ka_time, next_time)) + next_time = fip->port_ka_time; + break; + case FIP_ST_LINK_WAIT: + goto unlock; + default: + WARN(1, "unexpected state %d", fip->state); + goto unlock; + } + mod_timer(&fip->timer, next_time); +unlock: + mutex_unlock(&fip->ctlr_mutex); + + /* If port ID is new, notify local port after dropping ctlr_mutex */ + if (new_port_id) + fc_lport_set_local_id(fip->lp, new_port_id); +} + /** * fcoe_libfc_config() - Sets up libfc related properties for local port * @lp: The local port to configure libfc for + * @fip: The FCoE controller in use by the local port * @tt: The libfc function template + * @init_fcp: If non-zero, the FCP portion of libfc should be initialized * * Returns : 0 for success */ -int fcoe_libfc_config(struct fc_lport *lport, - struct libfc_function_template *tt) +int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, + const struct libfc_function_template *tt, int init_fcp) { /* Set the function pointers set by the LLDD */ memcpy(&lport->tt, tt, sizeof(*tt)); - if (fc_fcp_init(lport)) + if (init_fcp && fc_fcp_init(lport)) return -ENOMEM; fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); + if (fip->mode == FIP_MODE_VN2VN) + lport->rport_priv_size = sizeof(struct fcoe_rport); fc_rport_init(lport); - fc_disc_init(lport); - + if (fip->mode == FIP_MODE_VN2VN) { + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + mutex_init(&lport->disc.disc_mutex); + INIT_LIST_HEAD(&lport->disc.rports); + lport->disc.priv = fip; + } else { + fc_disc_init(lport); + } return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); - diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d0fe1c3345b8..9eb7a9ebccae 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -673,7 +673,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev, /* Start local port initiatialization */ lp->link_up = 0; - lp->tt = fnic_transport_template; lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; @@ -689,11 +688,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); - fc_lport_init(lp); - fc_exch_init(lp); - fc_elsct_init(lp); - fc_rport_init(lp); - fc_disc_init(lp); + fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, FCPIO_HOST_EXCH_RANGE_END, NULL)) { diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 1a84a3182da0..06f1b5a8ed19 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,7 @@ #define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ #define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ #define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ +#define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */ /** * enum fip_state - internal state of FCoE controller. @@ -45,6 +47,11 @@ * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode. * @FIP_ST_NON_FIP: non-FIP mode selected. * @FIP_ST_ENABLED: FIP mode selected. + * @FIP_ST_VNMP_START: VN2VN multipath mode start, wait + * @FIP_ST_VNMP_PROBE1: VN2VN sent first probe, listening + * @FIP_ST_VNMP_PROBE2: VN2VN sent second probe, listening + * @FIP_ST_VNMP_CLAIM: VN2VN sent claim, waiting for responses + * @FIP_ST_VNMP_UP: VN2VN multipath mode operation */ enum fip_state { FIP_ST_DISABLED, @@ -52,6 +59,11 @@ enum fip_state { FIP_ST_AUTO, FIP_ST_NON_FIP, FIP_ST_ENABLED, + FIP_ST_VNMP_START, + FIP_ST_VNMP_PROBE1, + FIP_ST_VNMP_PROBE2, + FIP_ST_VNMP_CLAIM, + FIP_ST_VNMP_UP, }; /* @@ -62,6 +74,7 @@ enum fip_state { #define FIP_MODE_AUTO FIP_ST_AUTO #define FIP_MODE_NON_FIP FIP_ST_NON_FIP #define FIP_MODE_FABRIC FIP_ST_ENABLED +#define FIP_MODE_VN2VN FIP_ST_VNMP_START /** * struct fcoe_ctlr - FCoE Controller and FIP state @@ -79,11 +92,14 @@ enum fip_state { * @timer_work: &work_struct for doing keep-alives and resets. * @recv_work: &work_struct for receiving FIP frames. * @fip_recv_list: list of received FIP frames. + * @rnd_state: state for pseudo-random number generator. + * @port_id: proposed or selected local-port ID. * @user_mfs: configured maximum FC frame size, including FC header. * @flogi_oxid: exchange ID of most recent fabric login. * @flogi_count: number of FLOGI attempts in AUTO mode. * @map_dest: use the FC_MAP mode for destination MAC addresses. * @spma: supports SPMA server-provided MACs mode + * @probe_tries: number of FC_IDs probed * @dest_addr: MAC address of the selected FC forwarder. * @ctl_src_addr: the native MAC address of our local port. * @send: LLD-supplied function to handle sending FIP Ethernet frames @@ -110,11 +126,16 @@ struct fcoe_ctlr { struct work_struct timer_work; struct work_struct recv_work; struct sk_buff_head fip_recv_list; + + struct rnd_state rnd_state; + u32 port_id; + u16 user_mfs; u16 flogi_oxid; u8 flogi_count; u8 map_dest; u8 spma; + u8 probe_tries; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; @@ -160,6 +181,24 @@ struct fcoe_fcf { u8 fd_flags:1; }; +/** + * struct fcoe_rport - VN2VN remote port + * @time: time of create or last beacon packet received from node + * @fcoe_len: max FCoE frame size, not including VLAN or Ethernet headers + * @flags: flags from probe or claim + * @login_count: number of unsuccessful rport logins to this port + * @enode_mac: E_Node control MAC address + * @vn_mac: VN_Node assigned MAC address for data + */ +struct fcoe_rport { + unsigned long time; + u16 fcoe_len; + u16 flags; + u8 login_count; + u8 enode_mac[ETH_ALEN]; + u8 vn_mac[ETH_ALEN]; +}; + /* FIP API functions */ void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state); void fcoe_ctlr_destroy(struct fcoe_ctlr *); @@ -172,7 +211,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, /* libfcoe funcs */ u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); -int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); +int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, + const struct libfc_function_template *, int init_fcp); /** * is_fip_mode() - returns true if FIP mode selected. -- cgit v1.2.3 From 079ecd8cfe95dfd28b74f3a00d66fdbcdfc8c611 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:51 -0700 Subject: [SCSI] libfc: eliminate rport LOGO state The LOGO state hasn't been used in a while, except in a brief transition to DELETE state while holding the rport mutex. All port LOGO responses have been ignored as well as any timeout if we don't get a response. So this patch just removes LOGO state and simplifies the response handler. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_rport.c | 88 +++++++++++-------------------------------- include/scsi/libfc.h | 2 - 2 files changed, 22 insertions(+), 68 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 4d6adf29b4f8..c06d63e4a00f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -89,7 +89,6 @@ static const char *fc_rport_state_names[] = { [RPORT_ST_PRLI] = "PRLI", [RPORT_ST_RTV] = "RTV", [RPORT_ST_READY] = "Ready", - [RPORT_ST_LOGO] = "LOGO", [RPORT_ST_ADISC] = "ADISC", [RPORT_ST_DELETE] = "Delete", }; @@ -514,9 +513,6 @@ static void fc_rport_timeout(struct work_struct *work) case RPORT_ST_RTV: fc_rport_enter_rtv(rdata); break; - case RPORT_ST_LOGO: - fc_rport_enter_logo(rdata); - break; case RPORT_ST_ADISC: fc_rport_enter_adisc(rdata); break; @@ -547,7 +543,6 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) switch (rdata->rp_state) { case RPORT_ST_FLOGI: case RPORT_ST_PLOGI: - case RPORT_ST_LOGO: rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_FAILED); break; @@ -791,7 +786,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, switch (rdata->rp_state) { case RPORT_ST_INIT: - case RPORT_ST_LOGO: case RPORT_ST_DELETE: mutex_unlock(&rdata->rp_mutex); rjt_data.reason = ELS_RJT_FIP; @@ -1036,52 +1030,6 @@ err: kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } -/** - * fc_rport_logo_resp() - Handler for logout (LOGO) responses - * @sp: The sequence the LOGO was on - * @fp: The LOGO response frame - * @rdata_arg: The remote port that sent the LOGO response - * - * Locking Note: This function will be called without the rport lock - * held, but it will lock, call an _enter_* function or fc_rport_error - * and then unlock the rport. - */ -static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rdata_arg) -{ - struct fc_rport_priv *rdata = rdata_arg; - u8 op; - - mutex_lock(&rdata->rp_mutex); - - FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp)); - - if (rdata->rp_state != RPORT_ST_LOGO) { - FC_RPORT_DBG(rdata, "Received a LOGO response, but in state " - "%s\n", fc_rport_state(rdata)); - if (IS_ERR(fp)) - goto err; - goto out; - } - - if (IS_ERR(fp)) { - fc_rport_error_retry(rdata, fp); - goto err; - } - - op = fc_frame_payload_op(fp); - if (op != ELS_LS_ACC) - FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n", - op); - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - -out: - fc_frame_free(fp); -err: - mutex_unlock(&rdata->rp_mutex); - kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); -} - /** * fc_rport_enter_prli() - Send Process Login (PRLI) request * @rdata: The remote port to send the PRLI request to @@ -1223,6 +1171,24 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) kref_get(&rdata->kref); } +/** + * fc_rport_logo_resp() - Handler for logout (LOGO) responses + * @sp: The sequence the LOGO was on + * @fp: The LOGO response frame + * @lport_arg: The local port + */ +static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, + void *lport_arg) +{ + struct fc_lport *lport = lport_arg; + + FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did, + "Received a LOGO %s\n", fc_els_resp_type(fp)); + if (IS_ERR(fp)) + return; + fc_frame_free(fp); +} + /** * fc_rport_enter_logo() - Send a logout (LOGO) request * @rdata: The remote port to send the LOGO request to @@ -1235,23 +1201,14 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; - FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n", + FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", fc_rport_state(rdata)); - fc_rport_state_enter(rdata, RPORT_ST_LOGO); - fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); - if (!fp) { - fc_rport_error_retry(rdata, fp); + if (!fp) return; - } - - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, - fc_rport_logo_resp, rdata, - 2 * lport->r_a_tov)) - fc_rport_error_retry(rdata, NULL); - else - kref_get(&rdata->kref); + (void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, + fc_rport_logo_resp, lport, 0); } /** @@ -1670,7 +1627,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, break; case RPORT_ST_FLOGI: case RPORT_ST_DELETE: - case RPORT_ST_LOGO: FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", fc_rport_state(rdata)); mutex_unlock(&rdata->rp_mutex); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 8d297f9a0a47..e6f07fba432c 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -103,7 +103,6 @@ enum fc_disc_event { * @RPORT_ST_PRLI: Waiting for PRLI completion * @RPORT_ST_RTV: Waiting for RTV completion * @RPORT_ST_READY: Ready for use - * @RPORT_ST_LOGO: Remote port logout (LOGO) sent * @RPORT_ST_ADISC: Discover Address sent * @RPORT_ST_DELETE: Remote port being deleted */ @@ -115,7 +114,6 @@ enum fc_rport_state { RPORT_ST_PRLI, RPORT_ST_RTV, RPORT_ST_READY, - RPORT_ST_LOGO, RPORT_ST_ADISC, RPORT_ST_DELETE, }; -- cgit v1.2.3 From 251748a99e631a2c46edcf9e519cfc60fae8153d Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:20:56 -0700 Subject: [SCSI] libfc: add fc_frame_sid() and fc_frame_did() functions To pave the way for eliminating exchanges from incoming requests, add simple inline fc_frame_sid() and fc_frame_did() functions which get the FC_IDs from the frame header. This can be almost as efficient as getting them from the sequence/exchange. Move ntohll, htonll, ntoh24 and hton24 to since we need them there and that's included by Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_lport.c | 12 ++++-------- drivers/scsi/libfc/fc_rport.c | 26 ++++++------------------- include/scsi/fc_frame.h | 45 ++++++++++++++++++++++++++++++++++++++++++- include/scsi/libfc.h | 18 ----------------- 4 files changed, 54 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index ec9850c46170..be3c2cee829f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -800,7 +800,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, struct fc_lport *lport) { struct fc_frame *fp; - struct fc_frame_header *fh; struct fc_seq *sp; struct fc_exch *ep; struct fc_els_flogi *flp; @@ -813,8 +812,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", fc_lport_state(lport)); - fh = fc_frame_header_get(rx_fp); - remote_fid = ntoh24(fh->fh_s_id); + remote_fid = fc_frame_sid(rx_fp); flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); if (!flp) goto out; @@ -910,7 +908,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: - if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) + if (fc_frame_sid(fp) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: @@ -1468,7 +1466,6 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; - struct fc_frame_header *fh; struct fc_els_flogi *flp; u32 did; u16 csp_flags; @@ -1496,8 +1493,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, goto err; } - fh = fc_frame_header_get(fp); - did = ntoh24(fh->fh_d_id); + did = fc_frame_did(fp); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { @@ -1523,7 +1519,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, "Port (%6.6x) entered " "point-to-point mode\n", lport->host->host_no, did); - fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), + fc_lport_ptp_setup(lport, fc_frame_sid(fp), get_unaligned_be64( &flp->fl_wwpn), get_unaligned_be64( diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c06d63e4a00f..123493166824 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -747,13 +747,11 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_exch *ep; - struct fc_frame_header *fh; struct fc_seq_els_data rjt_data; u32 sid, f_ctl; rjt_data.fp = NULL; - fh = fc_frame_header_get(fp); - sid = ntoh24(fh->fh_s_id); + sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); @@ -1430,17 +1428,14 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_rport_priv *rdata; - struct fc_frame_header *fh; struct fc_seq_els_data els_data; els_data.fp = NULL; els_data.reason = ELS_RJT_UNAB; els_data.explan = ELS_EXPL_PLOGI_REQD; - fh = fc_frame_header_get(fp); - mutex_lock(&lport->disc.disc_mutex); - rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id)); + rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); if (!rdata) { mutex_unlock(&lport->disc.disc_mutex); goto reject; @@ -1555,14 +1550,12 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; struct fc_exch *ep; - struct fc_frame_header *fh; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; u32 sid, f_ctl; rjt_data.fp = NULL; - fh = fc_frame_header_get(fp); - sid = ntoh24(fh->fh_s_id); + sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); @@ -1682,7 +1675,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_lport *lport = rdata->local_port; struct fc_exch *ep; struct fc_frame *fp; - struct fc_frame_header *fh; struct { struct fc_els_prli prli; struct fc_els_spp spp; @@ -1698,12 +1690,10 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, u32 roles = FC_RPORT_ROLE_UNKNOWN; rjt_data.fp = NULL; - fh = fc_frame_header_get(rx_fp); - FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); - len = fr_len(rx_fp) - sizeof(*fh); + len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; @@ -1817,7 +1807,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; - struct fc_frame_header *fh; struct fc_exch *ep; struct fc_frame *fp; struct { @@ -1832,12 +1821,11 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_seq_els_data rjt_data; rjt_data.fp = NULL; - fh = fc_frame_header_get(rx_fp); FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); - len = fr_len(rx_fp) - sizeof(*fh); + len = fr_len(rx_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); if (!pp) goto reject_len; @@ -1901,14 +1889,12 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { - struct fc_frame_header *fh; struct fc_rport_priv *rdata; u32 sid; lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); - fh = fc_frame_header_get(fp); - sid = ntoh24(fh->fh_s_id); + sid = fc_frame_sid(fp); mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, sid); diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h index 29dd97d5b53a..4ad02041b667 100644 --- a/include/scsi/fc_frame.h +++ b/include/scsi/fc_frame.h @@ -30,6 +30,23 @@ #include +/* some helpful macros */ + +#define ntohll(x) be64_to_cpu(x) +#define htonll(x) cpu_to_be64(x) + +static inline u32 ntoh24(const u8 *p) +{ + return (p[0] << 16) | (p[1] << 8) | p[2]; +} + +static inline void hton24(u8 *p, u32 v) +{ + p[0] = (v >> 16) & 0xff; + p[1] = (v >> 8) & 0xff; + p[2] = v & 0xff; +} + /* * The fc_frame interface is used to pass frame data between functions. * The frame includes the data buffer, length, and SOF / EOF delimiter types. @@ -137,6 +154,16 @@ static inline int fc_frame_is_linear(struct fc_frame *fp) return !skb_is_nonlinear(fp_skb(fp)); } +/* + * Get frame header from message in fc_frame structure. + * This version doesn't do a length check. + */ +static inline +struct fc_frame_header *__fc_frame_header_get(const struct fc_frame *fp) +{ + return (struct fc_frame_header *)fr_hdr(fp); +} + /* * Get frame header from message in fc_frame structure. * This hides a cast and provides a place to add some checking. @@ -145,7 +172,23 @@ static inline struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp) { WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header)); - return (struct fc_frame_header *) fr_hdr(fp); + return __fc_frame_header_get(fp); +} + +/* + * Get source FC_ID (S_ID) from frame header in message. + */ +static inline u32 fc_frame_sid(const struct fc_frame *fp) +{ + return ntoh24(__fc_frame_header_get(fp)->fh_s_id); +} + +/* + * Get destination FC_ID (D_ID) from frame header in message. + */ +static inline u32 fc_frame_did(const struct fc_frame *fp) +{ + return ntoh24(__fc_frame_header_get(fp)->fh_d_id); } /* diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index e6f07fba432c..f1ce793f33b3 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -42,24 +42,6 @@ #define FC_EX_TIMEOUT 1 /* Exchange timeout */ #define FC_EX_CLOSED 2 /* Exchange closed */ -/* some helpful macros */ - -#define ntohll(x) be64_to_cpu(x) -#define htonll(x) cpu_to_be64(x) - - -static inline u32 ntoh24(const u8 *p) -{ - return (p[0] << 16) | (p[1] << 8) | p[2]; -} - -static inline void hton24(u8 *p, u32 v) -{ - p[0] = (v >> 16) & 0xff; - p[1] = (v >> 8) & 0xff; - p[2] = v & 0xff; -} - /** * enum fc_lport_state - Local port states * @LPORT_ST_DISABLED: Disabled -- cgit v1.2.3 From 24f089e2f2c800f88039e9d536d558ec6e349fad Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:21:01 -0700 Subject: [SCSI] libfc: add fc_fill_reply_hdr() and fc_fill_hdr() Add functions to fill in an FC header given a request header. These reduces code lines in fc_lport and fc_rport and works without an exchange/sequence assigned. fc_fill_reply_hdr() fills a header for a final reply frame. fc_fill_hdr() which is similar but allows specifying the f_ctl parameter. Add defines for F_CTL values FC_FCTL_REQ and FC_FCTL_RESP. These can be used for most request and response sequences. v2 of patch adds a line to copy the frame encapsulation info from the received frame. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_elsct.c | 2 +- drivers/scsi/libfc/fc_fcp.c | 6 ++-- drivers/scsi/libfc/fc_libfc.c | 78 +++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/libfc/fc_lport.c | 39 +++++++--------------- drivers/scsi/libfc/fc_rport.c | 64 +++++++++-------------------------- include/scsi/fc_encode.h | 7 ++++ include/scsi/libfc.h | 4 +++ 7 files changed, 121 insertions(+), 79 deletions(-) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index e9412b710fab..9b25969e2ad0 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -64,7 +64,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did, } fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type, - FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + FC_FCTL_REQ, 0); return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); } diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 61a12970bd14..eac4d09314eb 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1108,7 +1108,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, rpriv->local_port->port_id, FC_TYPE_FCP, - FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + FC_FCTL_REQ, 0); seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0); @@ -1381,7 +1381,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) fr_seq(fp) = fsp->seq_ptr; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, rpriv->local_port->port_id, FC_TYPE_ELS, - FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + FC_FCTL_REQ, 0); if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) { @@ -1639,7 +1639,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, rpriv->local_port->port_id, FC_TYPE_FCP, - FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + FC_FCTL_REQ, 0); seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 39f4b6ab04b4..6a48c28e4420 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -23,6 +23,7 @@ #include #include +#include #include "fc_libfc.h" @@ -132,3 +133,80 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len, } return copy_len; } + +/** + * fc_fill_hdr() - fill FC header fields based on request + * @fp: reply frame containing header to be filled in + * @in_fp: request frame containing header to use in filling in reply + * @r_ctl: R_CTL value for header + * @f_ctl: F_CTL value for header, with 0 pad + * @seq_cnt: sequence count for the header, ignored if frame has a sequence + * @parm_offset: parameter / offset value + */ +void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, + enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset) +{ + struct fc_frame_header *fh; + struct fc_frame_header *in_fh; + struct fc_seq *sp; + u32 fill; + + fh = __fc_frame_header_get(fp); + in_fh = __fc_frame_header_get(in_fp); + + if (f_ctl & FC_FC_END_SEQ) { + fill = -fr_len(fp) & 3; + if (fill) { + /* TODO, this may be a problem with fragmented skb */ + memset(skb_put(fp_skb(fp), fill), 0, fill); + f_ctl |= fill; + } + fr_eof(fp) = FC_EOF_T; + } else { + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */ + fr_eof(fp) = FC_EOF_N; + } + + fh->fh_r_ctl = r_ctl; + memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id)); + memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id)); + fh->fh_type = in_fh->fh_type; + hton24(fh->fh_f_ctl, f_ctl); + fh->fh_ox_id = in_fh->fh_ox_id; + fh->fh_rx_id = in_fh->fh_rx_id; + fh->fh_cs_ctl = 0; + fh->fh_df_ctl = 0; + fh->fh_parm_offset = htonl(parm_offset); + + sp = fr_seq(in_fp); + if (sp) { + fr_seq(fp) = sp; + fh->fh_seq_id = sp->id; + seq_cnt = sp->cnt; + } else { + fh->fh_seq_id = 0; + } + fh->fh_seq_cnt = ntohs(seq_cnt); + fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3; + fr_encaps(fp) = fr_encaps(in_fp); +} +EXPORT_SYMBOL(fc_fill_hdr); + +/** + * fc_fill_reply_hdr() - fill FC reply header fields based on request + * @fp: reply frame containing header to be filled in + * @in_fp: request frame containing header to use in filling in reply + * @r_ctl: R_CTL value for reply + * @parm_offset: parameter / offset value + */ +void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, + enum fc_rctl r_ctl, u32 parm_offset) +{ + struct fc_seq *sp; + + sp = fr_seq(in_fp); + if (sp) + fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp); + fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); +} +EXPORT_SYMBOL(fc_fill_reply_hdr); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be3c2cee829f..e50a6606d4bf 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -405,11 +405,9 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; - struct fc_exch *ep = fc_seq_exch(sp); unsigned int len; void *pp; void *dp; - u32 f_ctl; FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); @@ -425,11 +423,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); } fc_frame_free(in_fp); } @@ -447,7 +442,6 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; - struct fc_exch *ep = fc_seq_exch(sp); struct fc_els_rnid *req; struct { struct fc_els_rnid_resp rnid; @@ -457,7 +451,6 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_seq_els_data rjt_data; u8 fmt; size_t len; - u32 f_ctl; FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", fc_lport_state(lport)); @@ -490,12 +483,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, memcpy(&rp->gen, &lport->rnid_gen, sizeof(rp->gen)); } - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; - f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); } } fc_frame_free(in_fp); @@ -800,14 +789,13 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, struct fc_lport *lport) { struct fc_frame *fp; + struct fc_frame_header *fh; struct fc_seq *sp; - struct fc_exch *ep; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; u32 remote_fid; u32 local_fid; - u32 f_ctl; FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", fc_lport_state(lport)); @@ -843,7 +831,6 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { - sp = lport->tt.seq_start_next(fr_seq(rx_fp)); new_flp = fc_frame_payload_get(fp, sizeof(*flp)); fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); new_flp->fl_cmd = (u8) ELS_LS_ACC; @@ -852,11 +839,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, * Send the response. If this fails, the originator should * repeat the sequence. */ - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + fh = fc_frame_header_get(fp); + hton24(fh->fh_s_id, local_fid); + hton24(fh->fh_d_id, remote_fid); + lport->tt.frame_send(lport, fp); } else { fc_lport_error(lport, fp); @@ -1731,8 +1718,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job, hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_ELS; - hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | - FC_FC_END_SEQ | FC_FC_SEQ_INIT); + hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; @@ -1791,8 +1777,7 @@ static int fc_lport_ct_request(struct fc_bsg_job *job, hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_CT; - hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | - FC_FC_END_SEQ | FC_FC_SEQ_INIT); + hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 123493166824..598795123211 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -746,9 +746,8 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_els_flogi *flp; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; - struct fc_exch *ep; struct fc_seq_els_data rjt_data; - u32 sid, f_ctl; + u32 sid; rjt_data.fp = NULL; sid = fc_frame_sid(fp); @@ -813,7 +812,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, rjt_data.explan = ELS_EXPL_NONE; goto reject; } - fc_frame_free(rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (!fp) @@ -824,11 +822,8 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, flp = fc_frame_payload_get(fp, sizeof(*flp)); flp->fl_cmd = ELS_LS_ACC; - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); if (rdata->ids.port_name < lport->wwpn) fc_rport_enter_plogi(rdata); @@ -837,12 +832,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, out: mutex_unlock(&rdata->rp_mutex); mutex_unlock(&disc->disc_mutex); + fc_frame_free(rx_fp); return; reject: mutex_unlock(&disc->disc_mutex); lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); - fc_frame_free(fp); + fc_frame_free(rx_fp); } /** @@ -1310,10 +1306,8 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; - struct fc_exch *ep = fc_seq_exch(sp); struct fc_els_adisc *adisc; struct fc_seq_els_data rjt_data; - u32 f_ctl; FC_RPORT_DBG(rdata, "Received ADISC request\n"); @@ -1332,11 +1326,8 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, fc_adisc_fill(lport, fp); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); adisc->adisc_cmd = ELS_LS_ACC; - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); drop: fc_frame_free(in_fp); } @@ -1356,13 +1347,11 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; - struct fc_exch *ep = fc_seq_exch(sp); struct fc_els_rls *rls; struct fc_els_rls_resp *rsp; struct fc_els_lesb *lesb; struct fc_seq_els_data rjt_data; struct fc_host_statistics *hst; - u32 f_ctl; FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", fc_rport_state(rdata)); @@ -1399,11 +1388,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, lesb->lesb_inv_crc = htonl(hst->invalid_crc_count); } - sp = lport->tt.seq_start_next(sp); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); goto out; out_rjt: @@ -1549,10 +1535,9 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_disc *disc; struct fc_rport_priv *rdata; struct fc_frame *fp = rx_fp; - struct fc_exch *ep; struct fc_els_flogi *pl; struct fc_seq_els_data rjt_data; - u32 sid, f_ctl; + u32 sid; rjt_data.fp = NULL; sid = fc_frame_sid(fp); @@ -1632,27 +1617,21 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, * Get session payload size from incoming PLOGI. */ rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); - fc_frame_free(rx_fp); /* * Send LS_ACC. If this fails, the originator should retry. */ - sp = lport->tt.seq_start_next(sp); - if (!sp) - goto out; fp = fc_frame_alloc(lport, sizeof(*pl)); if (!fp) goto out; fc_plogi_fill(lport, fp, ELS_LS_ACC); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); fc_rport_enter_prli(rdata); out: mutex_unlock(&rdata->rp_mutex); + fc_frame_free(rx_fp); return; reject: @@ -1673,7 +1652,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, struct fc_seq *sp, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; - struct fc_exch *ep; struct fc_frame *fp; struct { struct fc_els_prli prli; @@ -1685,7 +1663,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, unsigned int plen; enum fc_els_spp_resp resp; struct fc_seq_els_data rjt_data; - u32 f_ctl; u32 fcp_parm; u32 roles = FC_RPORT_ROLE_UNKNOWN; @@ -1714,8 +1691,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, rjt_data.explan = ELS_EXPL_INSUF_RES; goto reject; } - sp = lport->tt.seq_start_next(sp); - WARN_ON(!sp); pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); @@ -1768,12 +1743,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, /* * Send LS_ACC. If this fails, the originator should retry. */ - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; - f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); switch (rdata->rp_state) { case RPORT_ST_PRLI: @@ -1817,7 +1788,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, struct fc_els_spp *spp; /* response spp */ unsigned int len; unsigned int plen; - u32 f_ctl; struct fc_seq_els_data rjt_data; rjt_data.fp = NULL; @@ -1859,11 +1829,9 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; - f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; ep = fc_seq_exch(sp); fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, f_ctl, 0); + FC_TYPE_ELS, FC_FCTL_RESP, 0); lport->tt.seq_send(lport, sp, fp); goto drop; diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h index 9b4867c9c2d2..6d293c846a46 100644 --- a/include/scsi/fc_encode.h +++ b/include/scsi/fc_encode.h @@ -21,6 +21,13 @@ #define _FC_ENCODE_H_ #include +/* + * F_CTL values for simple requests and responses. + */ +#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT) +#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \ + FC_FC_END_SEQ | FC_FC_SEQ_INIT) + struct fc_ns_rft { struct fc_ns_fid fid; /* port ID object */ struct fc_ns_fts fts; /* FC4-types object */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index f1ce793f33b3..a6414ec63809 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1027,6 +1027,10 @@ struct fc_seq *fc_elsct_send(struct fc_lport *, u32 did, void *arg, u32 timer_msec); void fc_lport_flogi_resp(struct fc_seq *, struct fc_frame *, void *); void fc_lport_logo_resp(struct fc_seq *, struct fc_frame *, void *); +void fc_fill_reply_hdr(struct fc_frame *, const struct fc_frame *, + enum fc_rctl, u32 parm_offset); +void fc_fill_hdr(struct fc_frame *, const struct fc_frame *, + enum fc_rctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset); /* -- cgit v1.2.3 From 239e81048b7dcd27448db40c845f88ac7c68424e Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:21:07 -0700 Subject: [SCSI] libfc: add interface to allocate a sequence for incoming requests For incoming ELS and FCP requests, we often don't require an exchange and sequence, however, sometimes we do. For those cases, (primarily FCP requests for targets) add a function to set up the exchange and sequence. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/libfc/fc_exch.c | 25 +++++++++++++++++++++++++ include/scsi/libfc.h | 7 +++++++ 2 files changed, 32 insertions(+) (limited to 'include') diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 61eabd3ce436..027042a6de3b 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1230,6 +1230,28 @@ free: fc_frame_free(rx_fp); } +/** + * fc_seq_assign() - Assign exchange and sequence for incoming request + * @lport: The local port that received the request + * @fp: The request frame + * + * On success, the sequence pointer will be returned and also in fr_seq(@fp). + */ +static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) +{ + struct fc_exch_mgr_anchor *ema; + + WARN_ON(lport != fr_dev(fp)); + WARN_ON(fr_seq(fp)); + fr_seq(fp) = NULL; + + list_for_each_entry(ema, &lport->ema_list, ema_list) + if ((!ema->match || ema->match(fp)) && + fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE) + break; + return fr_seq(fp); +} + /** * fc_exch_recv_req() - Handler for an incoming request where is other * end is originating the sequence @@ -2283,6 +2305,9 @@ int fc_exch_init(struct fc_lport *lport) if (!lport->tt.seq_exch_abort) lport->tt.seq_exch_abort = fc_seq_exch_abort; + if (!lport->tt.seq_assign) + lport->tt.seq_assign = fc_seq_assign; + return 0; } EXPORT_SYMBOL(fc_exch_init); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index a6414ec63809..605f1d7861a7 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -555,6 +555,13 @@ struct libfc_function_template { */ struct fc_seq *(*seq_start_next)(struct fc_seq *); + /* + * Assign a sequence for an incoming request frame. + * + * STATUS: OPTIONAL + */ + struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *); + /* * Reset an exchange manager, completing all sequences and exchanges. * If s_id is non-zero, reset only exchanges originating from that FID. -- cgit v1.2.3 From 922611569572d3c1aa0ed6491d21583fb3fcca22 Mon Sep 17 00:00:00 2001 From: Joe Eykholt Date: Tue, 20 Jul 2010 15:21:12 -0700 Subject: [SCSI] libfc: don't require a local exchange for incoming requests Incoming requests shouldn't require a local exchange if we're just going to reply with one or two frames and don't expect anything further. Don't allocate exchanges for such requests until requested by the upper-layer protocol. The sequence is always NULL for new requests, so remove that as an argument to request handlers. Also change the first argument to lport->tt.seq_els_rsp_send from the sequence pointer to the received frame pointer, to supply the exchange IDs and destination ID info. Signed-off-by: Joe Eykholt Signed-off-by: Robert Love Signed-off-by: James Bottomley --- drivers/scsi/fcoe/libfcoe.c | 9 +- drivers/scsi/libfc/fc_disc.c | 19 ++--- drivers/scsi/libfc/fc_exch.c | 188 ++++++++++++++++++++++-------------------- drivers/scsi/libfc/fc_lport.c | 58 +++++-------- drivers/scsi/libfc/fc_rport.c | 112 +++++++++---------------- include/scsi/libfc.h | 16 ++-- 6 files changed, 174 insertions(+), 228 deletions(-) (limited to 'include') diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 4de8ced1fee7..2c265fe9ab32 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -2341,20 +2341,19 @@ drop: /** * fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode. - * @fip: The FCoE controller + * @lport: The local port + * @fp: The received frame * * This should never be called since we don't see RSCNs or other * fabric-generated ELSes. */ -static void fcoe_ctlr_disc_recv(struct fc_seq *seq, struct fc_frame *fp, - struct fc_lport *lport) +static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data rjt_data; - rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_UNSUP; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(seq, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 04474556f2dc..32f67c4b03fc 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -75,15 +75,13 @@ void fc_disc_stop_rports(struct fc_disc *disc) /** * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) - * @sp: The sequence of the RSCN exchange + * @disc: The discovery object to which the RSCN applies * @fp: The RSCN frame - * @lport: The local port that the request will be sent on * * Locking Note: This function expects that the disc_mutex is locked * before it is called. */ -static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_disc *disc) +static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) { struct fc_lport *lport; struct fc_els_rscn *rp; @@ -151,7 +149,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, break; } } - lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); /* * If not doing a complete rediscovery, do GPN_ID on @@ -177,25 +175,22 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, return; reject: FC_DISC_DBG(disc, "Received a bad RSCN frame\n"); - rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fc_disc_recv_req() - Handle incoming requests - * @sp: The sequence of the request exchange - * @fp: The request frame * @lport: The local port receiving the request + * @fp: The request frame * * Locking Note: This function is called from the EM and will lock * the disc_mutex before calling the handler for the * request. */ -static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) +static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp) { u8 op; struct fc_disc *disc = &lport->disc; @@ -204,7 +199,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, switch (op) { case ELS_RSCN: mutex_lock(&disc->disc_mutex); - fc_disc_recv_rscn_req(sp, fp, disc); + fc_disc_recv_rscn_req(disc, fp); mutex_unlock(&disc->disc_mutex); break; default: diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 027042a6de3b..b8560ad8cf66 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -129,11 +129,11 @@ struct fc_exch_mgr_anchor { }; static void fc_exch_rrq(struct fc_exch *); -static void fc_seq_ls_acc(struct fc_seq *); -static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason, +static void fc_seq_ls_acc(struct fc_frame *); +static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason, enum fc_els_rjt_explan); -static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *); -static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *); +static void fc_exch_els_rec(struct fc_frame *); +static void fc_exch_els_rrq(struct fc_frame *); /* * Internal implementation notes. @@ -1003,28 +1003,30 @@ static void fc_exch_set_addr(struct fc_exch *ep, /** * fc_seq_els_rsp_send() - Send an ELS response using infomation from * the existing sequence/exchange. - * @sp: The sequence/exchange to get information from + * @fp: The received frame * @els_cmd: The ELS command to be sent * @els_data: The ELS data to be sent + * + * The received frame is not freed. */ -static void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd, +static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd, struct fc_seq_els_data *els_data) { switch (els_cmd) { case ELS_LS_RJT: - fc_seq_ls_rjt(sp, els_data->reason, els_data->explan); + fc_seq_ls_rjt(fp, els_data->reason, els_data->explan); break; case ELS_LS_ACC: - fc_seq_ls_acc(sp); + fc_seq_ls_acc(fp); break; case ELS_RRQ: - fc_exch_els_rrq(sp, els_data->fp); + fc_exch_els_rrq(fp); break; case ELS_REC: - fc_exch_els_rec(sp, els_data->fp); + fc_exch_els_rec(fp); break; default: - FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd); + FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd); } } @@ -1253,11 +1255,13 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) } /** - * fc_exch_recv_req() - Handler for an incoming request where is other - * end is originating the sequence + * fc_exch_recv_req() - Handler for an incoming request * @lport: The local port that received the request * @mp: The EM that the exchange is on * @fp: The request frame + * + * This is used when the other end is originating the exchange + * and the sequence. */ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, struct fc_frame *fp) @@ -1275,8 +1279,17 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, fc_frame_free(fp); return; } + fr_dev(fp) = lport; + + BUG_ON(fr_seq(fp)); /* XXX remove later */ + + /* + * If the RX_ID is 0xffff, don't allocate an exchange. + * The upper-level protocol may request one later, if needed. + */ + if (fh->fh_rx_id == htons(FC_XID_UNKNOWN)) + return lport->tt.lport_recv(lport, fp); - fr_seq(fp) = NULL; reject = fc_seq_lookup_recip(lport, mp, fp); if (reject == FC_RJT_NONE) { sp = fr_seq(fp); /* sequence will be held */ @@ -1298,7 +1311,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, if (ep->resp) ep->resp(sp, fp, ep->arg); else - lport->tt.lport_recv(lport, sp, fp); + lport->tt.lport_recv(lport, fp); fc_exch_release(ep); /* release from lookup */ } else { FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n", @@ -1566,53 +1579,55 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) /** * fc_seq_ls_acc() - Accept sequence with LS_ACC - * @req_sp: The request sequence + * @rx_fp: The received frame, not freed here. * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ -static void fc_seq_ls_acc(struct fc_seq *req_sp) +static void fc_seq_ls_acc(struct fc_frame *rx_fp) { - struct fc_seq *sp; + struct fc_lport *lport; struct fc_els_ls_acc *acc; struct fc_frame *fp; - sp = fc_seq_start_next(req_sp); - fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc)); - if (fp) { - acc = fc_frame_payload_get(fp, sizeof(*acc)); - memset(acc, 0, sizeof(*acc)); - acc->la_cmd = ELS_LS_ACC; - fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - } + lport = fr_dev(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*acc)); + if (!fp) + return; + acc = fc_frame_payload_get(fp, sizeof(*acc)); + memset(acc, 0, sizeof(*acc)); + acc->la_cmd = ELS_LS_ACC; + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); } /** * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT - * @req_sp: The request sequence + * @rx_fp: The received frame, not freed here. * @reason: The reason the sequence is being rejected - * @explan: The explaination for the rejection + * @explan: The explanation for the rejection * * If this fails due to allocation or transmit congestion, assume the * originator will repeat the sequence. */ -static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason, +static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, enum fc_els_rjt_explan explan) { - struct fc_seq *sp; + struct fc_lport *lport; struct fc_els_ls_rjt *rjt; struct fc_frame *fp; - sp = fc_seq_start_next(req_sp); - fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt)); - if (fp) { - rjt = fc_frame_payload_get(fp, sizeof(*rjt)); - memset(rjt, 0, sizeof(*rjt)); - rjt->er_cmd = ELS_LS_RJT; - rjt->er_reason = reason; - rjt->er_explan = explan; - fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); - } + lport = fr_dev(rx_fp); + fp = fc_frame_alloc(lport, sizeof(*rjt)); + if (!fp) + return; + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + memset(rjt, 0, sizeof(*rjt)); + rjt->er_cmd = ELS_LS_RJT; + rjt->er_reason = reason; + rjt->er_explan = explan; + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); } /** @@ -1714,18 +1729,34 @@ void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did) } EXPORT_SYMBOL(fc_exch_mgr_reset); +/** + * fc_exch_lookup() - find an exchange + * @lport: The local port + * @xid: The exchange ID + * + * Returns exchange pointer with hold for caller, or NULL if not found. + */ +static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid) +{ + struct fc_exch_mgr_anchor *ema; + + list_for_each_entry(ema, &lport->ema_list, ema_list) + if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid) + return fc_exch_find(ema->mp, xid); + return NULL; +} + /** * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests - * @sp: The sequence the REC is on - * @rfp: The REC frame + * @rfp: The REC frame, not freed here. * * Note that the requesting port may be different than the S_ID in the request. */ -static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) +static void fc_exch_els_rec(struct fc_frame *rfp) { + struct fc_lport *lport; struct fc_frame *fp; struct fc_exch *ep; - struct fc_exch_mgr *em; struct fc_els_rec *rp; struct fc_els_rec_acc *acc; enum fc_els_rjt_reason reason = ELS_RJT_LOGIC; @@ -1734,6 +1765,7 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) u16 rxid; u16 oxid; + lport = fr_dev(rfp); rp = fc_frame_payload_get(rfp, sizeof(*rp)); explan = ELS_EXPL_INV_LEN; if (!rp) @@ -1742,35 +1774,19 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) rxid = ntohs(rp->rec_rx_id); oxid = ntohs(rp->rec_ox_id); - /* - * Currently it's hard to find the local S_ID from the exchange - * manager. This will eventually be fixed, but for now it's easier - * to lookup the subject exchange twice, once as if we were - * the initiator, and then again if we weren't. - */ - em = fc_seq_exch(sp)->em; - ep = fc_exch_find(em, oxid); + ep = fc_exch_lookup(lport, + sid == fc_host_port_id(lport->host) ? oxid : rxid); explan = ELS_EXPL_OXID_RXID; - if (ep && ep->oid == sid) { - if (ep->rxid != FC_XID_UNKNOWN && - rxid != FC_XID_UNKNOWN && - ep->rxid != rxid) - goto rel; - } else { - if (ep) - fc_exch_release(ep); - ep = NULL; - if (rxid != FC_XID_UNKNOWN) - ep = fc_exch_find(em, rxid); - if (!ep) - goto reject; - } - - fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc)); - if (!fp) { - fc_exch_done(sp); + if (!ep) + goto reject; + if (ep->oid != sid || oxid != ep->oxid) + goto rel; + if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) + goto rel; + fp = fc_frame_alloc(lport, sizeof(*acc)); + if (!fp) goto out; - } + acc = fc_frame_payload_get(fp, sizeof(*acc)); memset(acc, 0, sizeof(*acc)); acc->reca_cmd = ELS_LS_ACC; @@ -1785,18 +1801,16 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp) acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP | ESB_ST_SEQ_INIT | ESB_ST_COMPLETE)); - sp = fc_seq_start_next(sp); - fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS); + fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); out: fc_exch_release(ep); - fc_frame_free(rfp); return; rel: fc_exch_release(ep); reject: - fc_seq_ls_rjt(sp, reason, explan); - fc_frame_free(rfp); + fc_seq_ls_rjt(rfp, reason, explan); } /** @@ -1971,20 +1985,20 @@ retry: spin_unlock_bh(&ep->ex_lock); } - /** * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests - * @sp: The sequence that the RRQ is on - * @fp: The RRQ frame + * @fp: The RRQ frame, not freed here. */ -static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) +static void fc_exch_els_rrq(struct fc_frame *fp) { + struct fc_lport *lport; struct fc_exch *ep = NULL; /* request or subject exchange */ struct fc_els_rrq *rp; u32 sid; u16 xid; enum fc_els_rjt_explan explan; + lport = fr_dev(fp); rp = fc_frame_payload_get(fp, sizeof(*rp)); explan = ELS_EXPL_INV_LEN; if (!rp) @@ -1993,11 +2007,10 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) /* * lookup subject exchange. */ - ep = fc_seq_exch(sp); sid = ntoh24(rp->rrq_s_id); /* subject source */ - xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); - ep = fc_exch_find(ep->em, xid); - + xid = fc_host_port_id(lport->host) == sid ? + ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id); + ep = fc_exch_lookup(lport, xid); explan = ELS_EXPL_OXID_RXID; if (!ep) goto reject; @@ -2028,15 +2041,14 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp) /* * Send LS_ACC. */ - fc_seq_ls_acc(sp); + fc_seq_ls_acc(fp); goto out; unlock_reject: spin_unlock_bh(&ep->ex_lock); reject: - fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan); + fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan); out: - fc_frame_free(fp); if (ep) fc_exch_release(ep); /* drop hold from fc_exch_find */ } @@ -2267,7 +2279,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) fc_exch_recv_seq_resp(ema->mp, fp); else if (f_ctl & FC_FC_SEQ_CTX) fc_exch_recv_resp(ema->mp, fp); - else + else /* no EX_CTX and no SEQ_CTX */ fc_exch_recv_req(lport, ema->mp, fp); break; default: diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e50a6606d4bf..1998c03634da 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -375,34 +375,31 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) /** * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. - * @sp: The sequence in the RLIR exchange - * @fp: The RLIR request frame * @lport: Fibre Channel local port recieving the RLIR + * @fp: The RLIR request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ -static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) +static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) { FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", fc_lport_state(lport)); - lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_frame_free(fp); } /** * fc_lport_recv_echo_req() - Handle received ECHO request - * @sp: The sequence in the ECHO exchange - * @fp: ECHO request frame * @lport: The local port recieving the ECHO + * @fp: ECHO request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ -static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, - struct fc_lport *lport) +static void fc_lport_recv_echo_req(struct fc_lport *lport, + struct fc_frame *in_fp) { struct fc_frame *fp; unsigned int len; @@ -431,15 +428,14 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, /** * fc_lport_recv_rnid_req() - Handle received Request Node ID data request - * @sp: The sequence in the RNID exchange - * @fp: The RNID request frame * @lport: The local port recieving the RNID + * @fp: The RNID request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ -static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, - struct fc_lport *lport) +static void fc_lport_recv_rnid_req(struct fc_lport *lport, + struct fc_frame *in_fp) { struct fc_frame *fp; struct fc_els_rnid *req; @@ -457,10 +453,9 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, req = fc_frame_payload_get(in_fp, sizeof(*req)); if (!req) { - rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); } else { fmt = req->rnid_fmt; len = sizeof(*rp); @@ -492,17 +487,15 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, /** * fc_lport_recv_logo_req() - Handle received fabric LOGO request - * @sp: The sequence in the LOGO exchange - * @fp: The LOGO request frame * @lport: The local port recieving the LOGO + * @fp: The LOGO request frame * * Locking Note: The lport lock is exected to be held before calling * this function. */ -static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) +static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { - lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); fc_lport_enter_reset(lport); fc_frame_free(fp); } @@ -773,9 +766,8 @@ EXPORT_SYMBOL(fc_lport_set_local_id); /** * fc_lport_recv_flogi_req() - Receive a FLOGI request - * @sp_in: The sequence the FLOGI is on - * @rx_fp: The FLOGI frame * @lport: The local port that recieved the request + * @rx_fp: The FLOGI frame * * A received FLOGI request indicates a point-to-point connection. * Accept it with the common service parameters indicating our N port. @@ -784,13 +776,11 @@ EXPORT_SYMBOL(fc_lport_set_local_id); * Locking Note: The lport lock is expected to be held before calling * this function. */ -static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, - struct fc_frame *rx_fp, - struct fc_lport *lport) +static void fc_lport_recv_flogi_req(struct fc_lport *lport, + struct fc_frame *rx_fp) { struct fc_frame *fp; struct fc_frame_header *fh; - struct fc_seq *sp; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; @@ -850,16 +840,13 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, } fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); - out: - sp = fr_seq(rx_fp); fc_frame_free(rx_fp); } /** * fc_lport_recv_req() - The generic lport request handler * @lport: The local port that received the request - * @sp: The sequence the request is on * @fp: The request frame * * This function will see if the lport handles the request or @@ -868,11 +855,10 @@ out: * Locking Note: This function should not be called with the lport * lock held becuase it will grab the lock. */ -static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) +static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); - void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); + void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); @@ -912,19 +898,13 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, break; } - recv(sp, fp, lport); + recv(lport, fp); } else { FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", fr_eof(fp)); fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); - - /* - * The common exch_done for all request may not be good - * if any request requires longer hold on exhange. XXX - */ - lport->tt.exch_done(sp); } /** diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 598795123211..25479cc7f170 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -68,14 +68,10 @@ static void fc_rport_enter_ready(struct fc_rport_priv *); static void fc_rport_enter_logo(struct fc_rport_priv *); static void fc_rport_enter_adisc(struct fc_rport_priv *); -static void fc_rport_recv_plogi_req(struct fc_lport *, - struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_prli_req(struct fc_rport_priv *, - struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_prlo_req(struct fc_rport_priv *, - struct fc_seq *, struct fc_frame *); -static void fc_rport_recv_logo_req(struct fc_lport *, - struct fc_seq *, struct fc_frame *); +static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *); +static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *); +static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *); static void fc_rport_timeout(struct work_struct *); static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *); static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *); @@ -736,11 +732,10 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) /** * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode * @lport: The local port that received the PLOGI request - * @sp: The sequence that the PLOGI request was on * @rx_fp: The PLOGI request frame */ static void fc_rport_recv_flogi_req(struct fc_lport *lport, - struct fc_seq *sp, struct fc_frame *rx_fp) + struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_els_flogi *flp; @@ -749,7 +744,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, struct fc_seq_els_data rjt_data; u32 sid; - rjt_data.fp = NULL; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); @@ -817,7 +811,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport, if (!fp) goto out; - sp = lport->tt.seq_start_next(sp); fc_flogi_fill(lport, fp); flp = fc_frame_payload_get(fp, sizeof(*flp)); flp->fl_cmd = ELS_LS_ACC; @@ -837,7 +830,7 @@ out: reject: mutex_unlock(&disc->disc_mutex); - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); fc_frame_free(rx_fp); } @@ -1296,13 +1289,12 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) /** * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests * @rdata: The remote port that sent the ADISC request - * @sp: The sequence the ADISC request was on * @in_fp: The ADISC request frame * * Locking Note: Called with the lport and rport locks held. */ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, - struct fc_seq *sp, struct fc_frame *in_fp) + struct fc_frame *in_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; @@ -1313,10 +1305,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); if (!adisc) { - rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); goto drop; } @@ -1335,14 +1326,13 @@ drop: /** * fc_rport_recv_rls_req() - Handle received Read Link Status request * @rdata: The remote port that sent the RLS request - * @sp: The sequence that the RLS was on * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, - struct fc_seq *sp, struct fc_frame *rx_fp) + struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; @@ -1393,8 +1383,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, goto out; out_rjt: - rjt_data.fp = NULL; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); out: fc_frame_free(rx_fp); } @@ -1402,7 +1391,6 @@ out: /** * fc_rport_recv_els_req() - Handler for validated ELS requests * @lport: The local port that received the ELS request - * @sp: The sequence that the ELS request was on * @fp: The ELS request frame * * Handle incoming ELS requests that require port login. @@ -1410,16 +1398,11 @@ out: * * Locking Note: Called with the lport lock held. */ -static void fc_rport_recv_els_req(struct fc_lport *lport, - struct fc_seq *sp, struct fc_frame *fp) +static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; struct fc_seq_els_data els_data; - els_data.fp = NULL; - els_data.reason = ELS_RJT_UNAB; - els_data.explan = ELS_EXPL_PLOGI_REQD; - mutex_lock(&lport->disc.disc_mutex); rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp)); if (!rdata) { @@ -1442,24 +1425,24 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, switch (fc_frame_payload_op(fp)) { case ELS_PRLI: - fc_rport_recv_prli_req(rdata, sp, fp); + fc_rport_recv_prli_req(rdata, fp); break; case ELS_PRLO: - fc_rport_recv_prlo_req(rdata, sp, fp); + fc_rport_recv_prlo_req(rdata, fp); break; case ELS_ADISC: - fc_rport_recv_adisc_req(rdata, sp, fp); + fc_rport_recv_adisc_req(rdata, fp); break; case ELS_RRQ: - els_data.fp = fp; - lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); + lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL); + fc_frame_free(fp); break; case ELS_REC: - els_data.fp = fp; - lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); + lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL); + fc_frame_free(fp); break; case ELS_RLS: - fc_rport_recv_rls_req(rdata, sp, fp); + fc_rport_recv_rls_req(rdata, fp); break; default: fc_frame_free(fp); /* can't happen */ @@ -1470,20 +1453,20 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, return; reject: - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + els_data.reason = ELS_RJT_UNAB; + els_data.explan = ELS_EXPL_PLOGI_REQD; + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); fc_frame_free(fp); } /** * fc_rport_recv_req() - Handler for requests - * @sp: The sequence the request was on - * @fp: The request frame * @lport: The local port that received the request + * @fp: The request frame * * Locking Note: Called with the lport lock held. */ -void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, - struct fc_lport *lport) +void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data els_data; @@ -1495,13 +1478,13 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, */ switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: - fc_rport_recv_flogi_req(lport, sp, fp); + fc_rport_recv_flogi_req(lport, fp); break; case ELS_PLOGI: - fc_rport_recv_plogi_req(lport, sp, fp); + fc_rport_recv_plogi_req(lport, fp); break; case ELS_LOGO: - fc_rport_recv_logo_req(lport, sp, fp); + fc_rport_recv_logo_req(lport, fp); break; case ELS_PRLI: case ELS_PRLO: @@ -1509,14 +1492,13 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, case ELS_RRQ: case ELS_REC: case ELS_RLS: - fc_rport_recv_els_req(lport, sp, fp); + fc_rport_recv_els_req(lport, fp); break; default: - fc_frame_free(fp); - els_data.fp = NULL; els_data.reason = ELS_RJT_UNSUP; els_data.explan = ELS_EXPL_NONE; - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data); + fc_frame_free(fp); break; } } @@ -1524,13 +1506,12 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, /** * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests * @lport: The local port that received the PLOGI request - * @sp: The sequence that the PLOGI request was on * @rx_fp: The PLOGI request frame * * Locking Note: The rport lock is held before calling this function. */ static void fc_rport_recv_plogi_req(struct fc_lport *lport, - struct fc_seq *sp, struct fc_frame *rx_fp) + struct fc_frame *rx_fp) { struct fc_disc *disc; struct fc_rport_priv *rdata; @@ -1539,7 +1520,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, struct fc_seq_els_data rjt_data; u32 sid; - rjt_data.fp = NULL; sid = fc_frame_sid(fp); FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); @@ -1635,21 +1615,20 @@ out: return; reject: - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data); fc_frame_free(fp); } /** * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests * @rdata: The remote port that sent the PRLI request - * @sp: The sequence that the PRLI was on * @rx_fp: The PRLI request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, - struct fc_seq *sp, struct fc_frame *rx_fp) + struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; @@ -1666,7 +1645,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, u32 fcp_parm; u32 roles = FC_RPORT_ROLE_UNKNOWN; - rjt_data.fp = NULL; FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); @@ -1759,7 +1737,7 @@ reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } @@ -1767,18 +1745,15 @@ drop: /** * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests * @rdata: The remote port that sent the PRLO request - * @sp: The sequence that the PRLO was on * @rx_fp: The PRLO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, - struct fc_seq *sp, struct fc_frame *rx_fp) { struct fc_lport *lport = rdata->local_port; - struct fc_exch *ep; struct fc_frame *fp; struct { struct fc_els_prlo prlo; @@ -1790,8 +1765,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, unsigned int plen; struct fc_seq_els_data rjt_data; - rjt_data.fp = NULL; - FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", fc_rport_state(rdata)); @@ -1814,8 +1787,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, goto reject; } - sp = lport->tt.seq_start_next(sp); - WARN_ON(!sp); pp = fc_frame_payload_get(fp, len); WARN_ON(!pp); memset(pp, 0, len); @@ -1829,17 +1800,15 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, fc_rport_enter_delete(rdata, RPORT_EV_LOGO); - ep = fc_seq_exch(sp); - fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, - FC_TYPE_ELS, FC_FCTL_RESP, 0); - lport->tt.seq_send(lport, sp, fp); + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); + lport->tt.frame_send(lport, fp); goto drop; reject_len: rjt_data.reason = ELS_RJT_PROT; rjt_data.explan = ELS_EXPL_INV_LEN; reject: - lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); + lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data); drop: fc_frame_free(rx_fp); } @@ -1847,20 +1816,17 @@ drop: /** * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests * @lport: The local port that received the LOGO request - * @sp: The sequence that the LOGO request was on * @fp: The LOGO request frame * * Locking Note: The rport lock is exected to be held before calling * this function. */ -static void fc_rport_recv_logo_req(struct fc_lport *lport, - struct fc_seq *sp, - struct fc_frame *fp) +static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_rport_priv *rdata; u32 sid; - lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); + lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); sid = fc_frame_sid(fp); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 605f1d7861a7..14be49b44e84 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -249,14 +249,12 @@ struct fcoe_dev_stats { /** * struct fc_seq_els_data - ELS data used for passing ELS specific responses - * @fp: The ELS frame * @reason: The reason for rejection * @explan: The explaination of the rejection * * Mainly used by the exchange manager layer. */ struct fc_seq_els_data { - struct fc_frame *fp; enum fc_els_rjt_reason reason; enum fc_els_rjt_explan explan; }; @@ -519,12 +517,11 @@ struct libfc_function_template { struct fc_frame *); /* - * Send an ELS response using infomation from a previous - * exchange and sequence. + * Send an ELS response using infomation from the received frame. * * STATUS: OPTIONAL */ - void (*seq_els_rsp_send)(struct fc_seq *, enum fc_els_cmd, + void (*seq_els_rsp_send)(struct fc_frame *, enum fc_els_cmd, struct fc_seq_els_data *); /* @@ -583,8 +580,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*lport_recv)(struct fc_lport *, struct fc_seq *, - struct fc_frame *); + void (*lport_recv)(struct fc_lport *, struct fc_frame *); /* * Reset the local port. @@ -646,8 +642,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, - struct fc_lport *); + void (*rport_recv_req)(struct fc_lport *, struct fc_frame *); /* * lookup an rport by it's port ID. @@ -693,8 +688,7 @@ struct libfc_function_template { * * STATUS: OPTIONAL */ - void (*disc_recv_req)(struct fc_seq *, struct fc_frame *, - struct fc_lport *); + void (*disc_recv_req)(struct fc_lport *, struct fc_frame *); /* * Start discovery for a local port. -- cgit v1.2.3 From c01be6dcb2b5cce4feaf48035be6395e5cd7d47c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 22 Jul 2010 16:59:49 +0530 Subject: [SCSI] iscsi_transport: wait on session in error handler path wait for session to come online in eh_device_reset_handler and eh_target_reset_handler Signed-off-by: Mike Christie Signed-off-by: Vikas Chaudhary Signed-off-by: Ravi Anand Signed-off-by: James Bottomley --- drivers/scsi/qla4xxx/ql4_os.c | 11 ++++++++++- drivers/scsi/scsi_transport_iscsi.c | 32 ++++++++++++++++++++++++++++++++ include/scsi/scsi_transport_iscsi.h | 2 ++ 3 files changed, 44 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 821384147a41..5529b2a39741 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2020,6 +2020,11 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) if (!ddb_entry) return ret; + ret = iscsi_block_scsi_eh(cmd); + if (ret) + return ret; + ret = FAILED; + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no, cmd->device->channel, cmd->device->id, cmd->device->lun); @@ -2072,11 +2077,15 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; - int stat; + int stat, ret; if (!ddb_entry) return FAILED; + ret = iscsi_block_scsi_eh(cmd); + if (ret) + return ret; + starget_printk(KERN_INFO, scsi_target(cmd->device), "WARM TARGET RESET ISSUED.\n"); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index d4b96623aa59..e84026def1f4 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -30,6 +30,7 @@ #include #include #include +#include #define ISCSI_SESSION_ATTRS 23 #define ISCSI_CONN_ATTRS 13 @@ -534,6 +535,37 @@ static void iscsi_scan_session(struct work_struct *work) atomic_dec(&ihost->nr_scans); } +/** + * iscsi_block_scsi_eh - block scsi eh until session state has transistioned + * cmd: scsi cmd passed to scsi eh handler + * + * If the session is down this function will wait for the recovery + * timer to fire or for the session to be logged back in. If the + * recovery timer fires then FAST_IO_FAIL is returned. The caller + * should pass this error value to the scsi eh. + */ +int iscsi_block_scsi_eh(struct scsi_cmnd *cmd) +{ + struct iscsi_cls_session *session = + starget_to_session(scsi_target(cmd->device)); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&session->lock, flags); + while (session->state != ISCSI_SESSION_LOGGED_IN) { + if (session->state == ISCSI_SESSION_FREE) { + ret = FAST_IO_FAIL; + break; + } + spin_unlock_irqrestore(&session->lock, flags); + msleep(1000); + spin_lock_irqsave(&session->lock, flags); + } + spin_unlock_irqrestore(&session->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh); + static void session_recovery_timedout(struct work_struct *work) { struct iscsi_cls_session *session = diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 349c7f30720d..7fff94b3b2a8 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -32,6 +32,7 @@ struct scsi_transport_template; struct iscsi_transport; struct iscsi_endpoint; struct Scsi_Host; +struct scsi_cmnd; struct iscsi_cls_conn; struct iscsi_conn; struct iscsi_task; @@ -255,5 +256,6 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); +extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); #endif -- cgit v1.2.3 From df64d3caab8db6ae17dacd229a03d7689a10c432 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Tue, 27 Jul 2010 15:51:13 -0500 Subject: [SCSI] Unify SAM_ and SAM_STAT_ macros We have two separate definitions for identical constants with nearly the same name. One comes from the generic headers in scsi.h; the other is an enum in libsas.h ... it's causing confusion about which one is correct (fortunately they both are). Fix this by eliminating the libsas.h duplicate Signed-off-by: James Bottomley --- drivers/scsi/aic94xx/aic94xx_task.c | 2 +- drivers/scsi/libsas/sas_ata.c | 12 ++++++------ drivers/scsi/libsas/sas_expander.c | 2 +- drivers/scsi/libsas/sas_scsi_host.c | 4 ++-- drivers/scsi/libsas/sas_task.c | 6 +++--- drivers/scsi/mvsas/mv_sas.c | 16 ++++++++-------- drivers/scsi/pm8001/pm8001_hwi.c | 14 +++++++------- drivers/scsi/pm8001/pm8001_sas.c | 4 ++-- include/scsi/libsas.h | 11 +---------- 9 files changed, 31 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 75d20f72501f..532d212b6b2c 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -223,7 +223,7 @@ Again: switch (opcode) { case TC_NO_ERROR: ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_GOOD; + ts->stat = SAM_STAT_GOOD; break; case TC_UNDERRUN: ts->resp = SAS_TASK_COMPLETE; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 8c496b56556c..042153cbbde1 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -71,7 +71,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) case SAS_SG_ERR: return AC_ERR_INVALID; - case SAM_CHECK_COND: + case SAM_STAT_CHECK_CONDITION: case SAS_OPEN_TO: case SAS_OPEN_REJECT: SAS_DPRINTK("%s: Saw error %d. What to do?\n", @@ -107,7 +107,7 @@ static void sas_ata_task_done(struct sas_task *task) sas_ha = dev->port->ha; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); - if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_GOOD) { + if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) { ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); dev->sata_dev.sstatus = resp->sstatus; @@ -511,12 +511,12 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size, goto ex_err; } } - if (task->task_status.stat == SAM_BUSY || - task->task_status.stat == SAM_TASK_SET_FULL || + if (task->task_status.stat == SAM_STAT_BUSY || + task->task_status.stat == SAM_STAT_TASK_SET_FULL || task->task_status.stat == SAS_QUEUE_FULL) { SAS_DPRINTK("task: q busy, sleeping...\n"); schedule_timeout_interruptible(HZ); - } else if (task->task_status.stat == SAM_CHECK_COND) { + } else if (task->task_status.stat == SAM_STAT_CHECK_CONDITION) { struct scsi_sense_hdr shdr; if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size, @@ -549,7 +549,7 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size, shdr.asc, shdr.ascq); } } else if (task->task_status.resp != SAS_TASK_COMPLETE || - task->task_status.stat != SAM_GOOD) { + task->task_status.stat != SAM_STAT_GOOD) { SAS_DPRINTK("task finished with resp:0x%x, " "stat:0x%x\n", task->task_status.resp, diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index c65af02dcfe8..83dd5070a15c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -107,7 +107,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, } } if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAM_GOOD) { + task->task_status.stat == SAM_STAT_GOOD) { res = 0; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index a7890c6d878e..f0cfba9a1fc8 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -113,10 +113,10 @@ static void sas_scsi_task_done(struct sas_task *task) case SAS_ABORTED_TASK: hs = DID_ABORT; break; - case SAM_CHECK_COND: + case SAM_STAT_CHECK_CONDITION: memcpy(sc->sense_buffer, ts->buf, min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); - stat = SAM_CHECK_COND; + stat = SAM_STAT_CHECK_CONDITION; break; default: stat = ts->stat; diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c index 594524d5bfa1..b13a3346894c 100644 --- a/drivers/scsi/libsas/sas_task.c +++ b/drivers/scsi/libsas/sas_task.c @@ -15,13 +15,13 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task, else if (iu->datapres == 1) tstat->stat = iu->resp_data[3]; else if (iu->datapres == 2) { - tstat->stat = SAM_CHECK_COND; + tstat->stat = SAM_STAT_CHECK_CONDITION; tstat->buf_valid_size = min_t(int, SAS_STATUS_BUF_SIZE, be32_to_cpu(iu->sense_data_len)); memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); - if (iu->status != SAM_CHECK_COND) + if (iu->status != SAM_STAT_CHECK_CONDITION) dev_printk(KERN_WARNING, dev, "dev %llx sent sense data, but " "stat(%x) is not CHECK CONDITION\n", @@ -30,7 +30,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task, } else /* when datapres contains corrupt/unknown value... */ - tstat->stat = SAM_CHECK_COND; + tstat->stat = SAM_STAT_CHECK_CONDITION; } EXPORT_SYMBOL_GPL(sas_ssp_task_response); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index cab924239862..adedaa916ecb 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1483,7 +1483,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, } if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAM_GOOD) { + task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } @@ -1758,7 +1758,7 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, struct mvs_device *mvi_dev = task->dev->lldd_dev; struct task_status_struct *tstat = &task->task_status; struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; + int stat = SAM_STAT_GOOD; resp->frame_len = sizeof(struct dev_to_host_fis); @@ -1790,13 +1790,13 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, MVS_CHIP_DISP->command_active(mvi, slot_idx); - stat = SAM_CHECK_COND; + stat = SAM_STAT_CHECK_CONDITION; switch (task->task_proto) { case SAS_PROTOCOL_SSP: stat = SAS_ABORTED_TASK; break; case SAS_PROTOCOL_SMP: - stat = SAM_CHECK_COND; + stat = SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SATA: @@ -1881,7 +1881,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) case SAS_PROTOCOL_SSP: /* hw says status == 0, datapres == 0 */ if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; + tstat->stat = SAM_STAT_GOOD; tstat->resp = SAS_TASK_COMPLETE; } /* response frame present */ @@ -1890,12 +1890,12 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) sizeof(struct mvs_err_info); sas_ssp_task_response(mvi->dev, task, iu); } else - tstat->stat = SAM_CHECK_COND; + tstat->stat = SAM_STAT_CHECK_CONDITION; break; case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; + tstat->stat = SAM_STAT_GOOD; to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); memcpy(to + sg_resp->offset, slot->response + sizeof(struct mvs_err_info), @@ -1912,7 +1912,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) } default: - tstat->stat = SAM_CHECK_COND; + tstat->stat = SAM_STAT_CHECK_CONDITION; break; } if (!slot->port->port_attached) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 5ff8261c5d67..356ad268de6d 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1480,7 +1480,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) ",param = %d \n", param)); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_GOOD; + ts->stat = SAM_STAT_GOOD; } else { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PROTO_RESPONSE; @@ -1909,7 +1909,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_GOOD; + ts->stat = SAM_STAT_GOOD; } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2450,7 +2450,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_GOOD; + ts->stat = SAM_STAT_GOOD; if (pm8001_dev) pm8001_dev->running_req--; break; @@ -2479,19 +2479,19 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_BUSY; + ts->stat = SAM_STAT_BUSY; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_BUSY; + ts->stat = SAM_STAT_BUSY; break; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_BUSY; + ts->stat = SAM_STAT_BUSY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, @@ -3260,7 +3260,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) case IO_SUCCESS: PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAM_GOOD; + ts->stat = SAM_STAT_GOOD; break; case IO_NOT_VALID: PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index cd02ceaf67ff..6ae059ebb4bb 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -763,7 +763,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, } if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAM_GOOD) { + task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } @@ -853,7 +853,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, } if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAM_GOOD) { + task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 3b586859669c..d06e13be717b 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -422,16 +422,7 @@ enum service_response { }; enum exec_status { - SAM_GOOD = 0, - SAM_CHECK_COND = 2, - SAM_COND_MET = 4, - SAM_BUSY = 8, - SAM_INTERMEDIATE = 0x10, - SAM_IM_COND_MET = 0x12, - SAM_RESV_CONFLICT= 0x14, - SAM_TASK_SET_FULL= 0x28, - SAM_ACA_ACTIVE = 0x30, - SAM_TASK_ABORTED = 0x40, + /* The SAM_STAT_.. codes fit in the lower 6 bits */ SAS_DEV_NO_RESPONSE = 0x80, SAS_DATA_UNDERRUN, -- cgit v1.2.3 From bc4f24014de58f045f169742701a6598884d93db Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 17 Jun 2010 10:41:42 -0400 Subject: [SCSI] implement runtime Power Management This patch (as1398b) adds runtime PM support to the SCSI layer. Only the machanism is provided; use of it is up to the various high-level drivers, and the patch doesn't change any of them. Except for sg -- the patch expicitly prevents a device from being runtime-suspended while its sg device file is open. The implementation is simplistic. In general, hosts and targets are automatically suspended when all their children are asleep, but for them the runtime-suspend code doesn't actually do anything. (A host's runtime PM status is propagated up the device tree, though, so a runtime-PM-aware lower-level driver could power down the host adapter hardware at the appropriate times.) There are comments indicating where a transport class might be notified or some other hooks added. LUNs are runtime-suspended by calling the drivers' existing suspend handlers (and likewise for runtime-resume). Somewhat arbitrarily, the implementation delays for 100 ms before suspending an eligible LUN. This is because there typically are occasions during bootup when the same device file is opened and closed several times in quick succession. The way this all works is that the SCSI core increments a device's PM-usage count when it is registered. If a high-level driver does nothing then the device will not be eligible for runtime-suspend because of the elevated usage count. If a high-level driver wants to use runtime PM then it can call scsi_autopm_put_device() in its probe routine to decrement the usage count and scsi_autopm_get_device() in its remove routine to restore the original count. Hosts, targets, and LUNs are not suspended while they are being probed or removed, or while the error handler is running. In fact, a fairly large part of the patch consists of code to make sure that things aren't suspended at such times. [jejb: fix up compile issues in PM config variations] Signed-off-by: Alan Stern Signed-off-by: James Bottomley --- drivers/scsi/hosts.c | 10 ++++- drivers/scsi/scsi_error.c | 16 ++++++- drivers/scsi/scsi_pm.c | 110 +++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/scsi_priv.h | 14 +++++- drivers/scsi/scsi_scan.c | 24 ++++++++-- drivers/scsi/scsi_sysfs.c | 20 ++++++++- drivers/scsi/sg.c | 10 ++++- include/scsi/scsi_device.h | 8 ++++ 8 files changed, 201 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index a2b1414da288..8a8f803439e1 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -156,6 +157,7 @@ EXPORT_SYMBOL(scsi_host_set_state); void scsi_remove_host(struct Scsi_Host *shost) { unsigned long flags; + mutex_lock(&shost->scan_mutex); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_CANCEL)) @@ -165,6 +167,8 @@ void scsi_remove_host(struct Scsi_Host *shost) return; } spin_unlock_irqrestore(shost->host_lock, flags); + + scsi_autopm_get_host(shost); scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); @@ -216,12 +220,14 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, shost->shost_gendev.parent = dev ? dev : &platform_bus; shost->dma_dev = dma_dev; - device_enable_async_suspend(&shost->shost_gendev); - error = device_add(&shost->shost_gendev); if (error) goto out; + pm_runtime_set_active(&shost->shost_gendev); + pm_runtime_enable(&shost->shost_gendev); + device_enable_async_suspend(&shost->shost_gendev); + scsi_host_set_state(shost, SHOST_RUNNING); get_device(shost->shost_gendev.parent); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c60cffbefa3c..2bf98469dc4c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1775,6 +1775,14 @@ int scsi_error_handler(void *data) * what we need to do to get it up and online again (if we can). * If we fail, we end up taking the thing offline. */ + if (scsi_autopm_get_host(shost) != 0) { + SCSI_LOG_ERROR_RECOVERY(1, + printk(KERN_ERR "Error handler scsi_eh_%d " + "unable to autoresume\n", + shost->host_no)); + continue; + } + if (shost->transportt->eh_strategy_handler) shost->transportt->eh_strategy_handler(shost); else @@ -1788,6 +1796,7 @@ int scsi_error_handler(void *data) * which are still online. */ scsi_restart_operations(shost); + scsi_autopm_put_host(shost); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); @@ -1885,12 +1894,16 @@ scsi_reset_provider_done_command(struct scsi_cmnd *scmd) int scsi_reset_provider(struct scsi_device *dev, int flag) { - struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); + struct scsi_cmnd *scmd; struct Scsi_Host *shost = dev->host; struct request req; unsigned long flags; int rtn; + if (scsi_autopm_get_host(shost) < 0) + return FAILED; + + scmd = scsi_get_command(dev, GFP_KERNEL); blk_rq_init(NULL, &req); scmd->request = &req; @@ -1947,6 +1960,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag) scsi_run_host_queues(shost); scsi_next_command(scmd); + scsi_autopm_put_host(shost); return rtn; } EXPORT_SYMBOL(scsi_reset_provider); diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index cd83758ce0a2..d70e91ae60af 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -59,6 +59,12 @@ static int scsi_bus_resume_common(struct device *dev) if (scsi_is_sdev_device(dev)) err = scsi_dev_type_resume(dev); + + if (err == 0) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } return err; } @@ -86,6 +92,107 @@ static int scsi_bus_poweroff(struct device *dev) #endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM_RUNTIME + +static int scsi_runtime_suspend(struct device *dev) +{ + int err = 0; + + dev_dbg(dev, "scsi_runtime_suspend\n"); + if (scsi_is_sdev_device(dev)) { + err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND); + if (err == -EAGAIN) + pm_schedule_suspend(dev, jiffies_to_msecs( + round_jiffies_up_relative(HZ/10))); + } + + /* Insert hooks here for targets, hosts, and transport classes */ + + return err; +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; + + dev_dbg(dev, "scsi_runtime_resume\n"); + if (scsi_is_sdev_device(dev)) + err = scsi_dev_type_resume(dev); + + /* Insert hooks here for targets, hosts, and transport classes */ + + return err; +} + +static int scsi_runtime_idle(struct device *dev) +{ + int err; + + dev_dbg(dev, "scsi_runtime_idle\n"); + + /* Insert hooks here for targets, hosts, and transport classes */ + + if (scsi_is_sdev_device(dev)) + err = pm_schedule_suspend(dev, 100); + else + err = pm_runtime_suspend(dev); + return err; +} + +int scsi_autopm_get_device(struct scsi_device *sdev) +{ + int err; + + err = pm_runtime_get_sync(&sdev->sdev_gendev); + if (err < 0) + pm_runtime_put_sync(&sdev->sdev_gendev); + else if (err > 0) + err = 0; + return err; +} +EXPORT_SYMBOL_GPL(scsi_autopm_get_device); + +void scsi_autopm_put_device(struct scsi_device *sdev) +{ + pm_runtime_put_sync(&sdev->sdev_gendev); +} +EXPORT_SYMBOL_GPL(scsi_autopm_put_device); + +void scsi_autopm_get_target(struct scsi_target *starget) +{ + pm_runtime_get_sync(&starget->dev); +} + +void scsi_autopm_put_target(struct scsi_target *starget) +{ + pm_runtime_put_sync(&starget->dev); +} + +int scsi_autopm_get_host(struct Scsi_Host *shost) +{ + int err; + + err = pm_runtime_get_sync(&shost->shost_gendev); + if (err < 0) + pm_runtime_put_sync(&shost->shost_gendev); + else if (err > 0) + err = 0; + return err; +} + +void scsi_autopm_put_host(struct Scsi_Host *shost) +{ + pm_runtime_put_sync(&shost->shost_gendev); +} + +#else + +#define scsi_runtime_suspend NULL +#define scsi_runtime_resume NULL +#define scsi_runtime_idle NULL + +#endif /* CONFIG_PM_RUNTIME */ + const struct dev_pm_ops scsi_bus_pm_ops = { .suspend = scsi_bus_suspend, .resume = scsi_bus_resume_common, @@ -93,4 +200,7 @@ const struct dev_pm_ops scsi_bus_pm_ops = { .thaw = scsi_bus_resume_common, .poweroff = scsi_bus_poweroff, .restore = scsi_bus_resume_common, + .runtime_suspend = scsi_runtime_suspend, + .runtime_resume = scsi_runtime_resume, + .runtime_idle = scsi_runtime_idle, }; diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index dddacc732550..026295e2c539 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -7,6 +7,7 @@ struct request_queue; struct request; struct scsi_cmnd; struct scsi_device; +struct scsi_target; struct scsi_host_template; struct Scsi_Host; struct scsi_nl_hdr; @@ -147,9 +148,20 @@ static inline void scsi_netlink_exit(void) {} /* scsi_pm.c */ #ifdef CONFIG_PM_OPS extern const struct dev_pm_ops scsi_bus_pm_ops; -#else +#else /* CONFIG_PM_OPS */ #define scsi_bus_pm_ops (*NULL) #endif +#ifdef CONFIG_PM_RUNTIME +extern void scsi_autopm_get_target(struct scsi_target *); +extern void scsi_autopm_put_target(struct scsi_target *); +extern int scsi_autopm_get_host(struct Scsi_Host *); +extern void scsi_autopm_put_host(struct Scsi_Host *); +#else +static inline void scsi_autopm_get_target(struct scsi_target *t) {} +static inline void scsi_autopm_put_target(struct scsi_target *t) {} +static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } +static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} +#endif /* CONFIG_PM_RUNTIME */ /* * internal scsi timeout functions: for use by mid-layer and transport diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1c027a97d8b9..3d0a1e6e9c48 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1513,14 +1513,18 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, starget = scsi_alloc_target(parent, channel, id); if (!starget) return ERR_PTR(-ENOMEM); + scsi_autopm_get_target(starget); mutex_lock(&shost->scan_mutex); if (!shost->async_scan) scsi_complete_async_scans(); - if (scsi_host_scan_allowed(shost)) + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); + scsi_autopm_put_host(shost); + } mutex_unlock(&shost->scan_mutex); + scsi_autopm_put_target(starget); scsi_target_reap(starget); put_device(&starget->dev); @@ -1574,6 +1578,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, starget = scsi_alloc_target(parent, channel, id); if (!starget) return; + scsi_autopm_get_target(starget); if (lun != SCAN_WILD_CARD) { /* @@ -1599,6 +1604,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, } out_reap: + scsi_autopm_put_target(starget); /* now determine if the target has any children at all * and if not, nuke it */ scsi_target_reap(starget); @@ -1633,8 +1639,10 @@ void scsi_scan_target(struct device *parent, unsigned int channel, if (!shost->async_scan) scsi_complete_async_scans(); - if (scsi_host_scan_allowed(shost)) + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { __scsi_scan_target(parent, channel, id, lun, rescan); + scsi_autopm_put_host(shost); + } mutex_unlock(&shost->scan_mutex); } EXPORT_SYMBOL(scsi_scan_target); @@ -1686,7 +1694,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, if (!shost->async_scan) scsi_complete_async_scans(); - if (scsi_host_scan_allowed(shost)) { + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { if (channel == SCAN_WILD_CARD) for (channel = 0; channel <= shost->max_channel; channel++) @@ -1694,6 +1702,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, rescan); else scsi_scan_channel(shost, channel, id, lun, rescan); + scsi_autopm_put_host(shost); } mutex_unlock(&shost->scan_mutex); @@ -1831,8 +1840,11 @@ static void do_scsi_scan_host(struct Scsi_Host *shost) static int do_scan_async(void *_data) { struct async_scan_data *data = _data; - do_scsi_scan_host(data->shost); + struct Scsi_Host *shost = data->shost; + + do_scsi_scan_host(shost); scsi_finish_async_scan(data); + scsi_autopm_put_host(shost); return 0; } @@ -1847,16 +1859,20 @@ void scsi_scan_host(struct Scsi_Host *shost) if (strncmp(scsi_scan_type, "none", 4) == 0) return; + if (scsi_autopm_get_host(shost) < 0) + return; data = scsi_prep_async_scan(shost); if (!data) { do_scsi_scan_host(shost); + scsi_autopm_put_host(shost); return; } p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); if (IS_ERR(p)) do_scan_async(data); + /* scsi_autopm_put_host(shost) is called in do_scan_async() */ } EXPORT_SYMBOL(scsi_scan_host); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5f85f8e831f3..562fb3bce261 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -802,8 +803,6 @@ static int scsi_target_add(struct scsi_target *starget) if (starget->state != STARGET_CREATED) return 0; - device_enable_async_suspend(&starget->dev); - error = device_add(&starget->dev); if (error) { dev_err(&starget->dev, "target device_add failed, error %d\n", error); @@ -812,6 +811,10 @@ static int scsi_target_add(struct scsi_target *starget) transport_add_device(&starget->dev); starget->state = STARGET_RUNNING; + pm_runtime_set_active(&starget->dev); + pm_runtime_enable(&starget->dev); + device_enable_async_suspend(&starget->dev); + return 0; } @@ -841,7 +844,20 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) return error; transport_configure_device(&starget->dev); + device_enable_async_suspend(&sdev->sdev_gendev); + scsi_autopm_get_target(starget); + pm_runtime_set_active(&sdev->sdev_gendev); + pm_runtime_forbid(&sdev->sdev_gendev); + pm_runtime_enable(&sdev->sdev_gendev); + scsi_autopm_put_target(starget); + + /* The following call will keep sdev active indefinitely, until + * its driver does a corresponding scsi_autopm_pm_device(). Only + * drivers supporting autosuspend will do this. + */ + scsi_autopm_get_device(sdev); + error = device_add(&sdev->sdev_gendev); if (error) { printk(KERN_INFO "error 1\n"); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index d4549092400c..2968c6b83ddb 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -245,6 +245,10 @@ sg_open(struct inode *inode, struct file *filp) if (retval) goto sg_put; + retval = scsi_autopm_get_device(sdp->device); + if (retval) + goto sdp_put; + if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; @@ -302,8 +306,11 @@ sg_open(struct inode *inode, struct file *filp) } retval = 0; error_out: - if (retval) + if (retval) { + scsi_autopm_put_device(sdp->device); +sdp_put: scsi_device_put(sdp->device); + } sg_put: if (sdp) sg_put_dev(sdp); @@ -327,6 +334,7 @@ sg_release(struct inode *inode, struct file *filp) sdp->exclude = 0; wake_up_interruptible(&sdp->o_excl_wait); + scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d80b6dbed1ca..50cb34ffef11 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -381,6 +381,14 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, struct scsi_sense_hdr *, int timeout, int retries, int *resid); +#ifdef CONFIG_PM_RUNTIME +extern int scsi_autopm_get_device(struct scsi_device *); +extern void scsi_autopm_put_device(struct scsi_device *); +#else +static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } +static inline void scsi_autopm_put_device(struct scsi_device *d) {} +#endif /* CONFIG_PM_RUNTIME */ + static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) { return device_reprobe(&sdev->sdev_gendev); -- cgit v1.2.3 From 7d14831e21060fbfbfe8453460ac19205f4ce1c2 Mon Sep 17 00:00:00 2001 From: Anuj Aggarwal Date: Mon, 12 Jul 2010 17:54:06 +0530 Subject: regulator: tps6507x: allow driver to use DEFDCDC{2,3}_HIGH register Acked-by: Mark Brown In TPS6507x, depending on the status of DEFDCDC{2,3} pin either DEFDCDC{2,3}_LOW or DEFDCDC{2,3}_HIGH register needs to be read or programmed to change the output voltage. The current driver assumes DEFDCDC{2,3} pins are always tied low and thus operates only on DEFDCDC{2,3}_LOW register. This need not always be the case (as is found on OMAP-L138 EVM). Unfortunately, software cannot read the status of DEFDCDC{2,3} pins. So, this information is passed through platform data depending on how the board is wired. Signed-off-by: Anuj Aggarwal Signed-off-by: Sekhar Nori Signed-off-by: Liam Girdwood --- drivers/regulator/tps6507x-regulator.c | 36 +++++++++++++++++++++++++++------- include/linux/regulator/tps6507x.h | 32 ++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) create mode 100644 include/linux/regulator/tps6507x.h (limited to 'include') diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 14b4576281c5..8152d65220f5 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -101,9 +102,12 @@ struct tps_info { unsigned max_uV; u8 table_len; const u16 *table; + + /* Does DCDC high or the low register defines output voltage? */ + bool defdcdc_default; }; -static const struct tps_info tps6507x_pmic_regs[] = { +static struct tps_info tps6507x_pmic_regs[] = { { .name = "VDCDC1", .min_uV = 725000, @@ -145,7 +149,7 @@ struct tps6507x_pmic { struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; struct tps6507x_dev *mfd; struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; - const struct tps_info *info[TPS6507X_NUM_REGULATOR]; + struct tps_info *info[TPS6507X_NUM_REGULATOR]; struct mutex io_lock; }; static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) @@ -341,10 +345,16 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev) reg = TPS6507X_REG_DEFDCDC1; break; case TPS6507X_DCDC_2: - reg = TPS6507X_REG_DEFDCDC2_LOW; + if (tps->info[dcdc]->defdcdc_default) + reg = TPS6507X_REG_DEFDCDC2_HIGH; + else + reg = TPS6507X_REG_DEFDCDC2_LOW; break; case TPS6507X_DCDC_3: - reg = TPS6507X_REG_DEFDCDC3_LOW; + if (tps->info[dcdc]->defdcdc_default) + reg = TPS6507X_REG_DEFDCDC3_HIGH; + else + reg = TPS6507X_REG_DEFDCDC3_LOW; break; default: return -EINVAL; @@ -370,10 +380,16 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, reg = TPS6507X_REG_DEFDCDC1; break; case TPS6507X_DCDC_2: - reg = TPS6507X_REG_DEFDCDC2_LOW; + if (tps->info[dcdc]->defdcdc_default) + reg = TPS6507X_REG_DEFDCDC2_HIGH; + else + reg = TPS6507X_REG_DEFDCDC2_LOW; break; case TPS6507X_DCDC_3: - reg = TPS6507X_REG_DEFDCDC3_LOW; + if (tps->info[dcdc]->defdcdc_default) + reg = TPS6507X_REG_DEFDCDC3_HIGH; + else + reg = TPS6507X_REG_DEFDCDC3_LOW; break; default: return -EINVAL; @@ -532,7 +548,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev) { struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); static int desc_id; - const struct tps_info *info = &tps6507x_pmic_regs[0]; + struct tps_info *info = &tps6507x_pmic_regs[0]; struct regulator_init_data *init_data; struct regulator_dev *rdev; struct tps6507x_pmic *tps; @@ -569,6 +585,12 @@ int tps6507x_pmic_probe(struct platform_device *pdev) for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { /* Register the regulators */ tps->info[i] = info; + if (init_data->driver_data) { + struct tps6507x_reg_platform_data *data = + init_data->driver_data; + tps->info[i]->defdcdc_default = data->defdcdc_default; + } + tps->desc[i].name = info->name; tps->desc[i].id = desc_id++; tps->desc[i].n_voltages = num_voltages[i]; diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h new file mode 100644 index 000000000000..4892f591bab1 --- /dev/null +++ b/include/linux/regulator/tps6507x.h @@ -0,0 +1,32 @@ +/* + * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X + * + * Copyright (C) 2010 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_TPS6507X +#define REGULATOR_TPS6507X + +/** + * tps6507x_reg_platform_data - platform data for tps6507x + * @defdcdc_default: Defines whether DCDC high or the low register controls + * output voltage by default. Valid for DCDC2 and DCDC3 outputs only. + */ +struct tps6507x_reg_platform_data { + bool defdcdc_default; +}; + +#endif -- cgit v1.2.3 From bb8f563c848faa113059973f68c24a3bb6a9585e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Wed, 21 Jul 2010 12:53:57 +0100 Subject: ARM: 6243/1: mmci: pass power_mode to the translate_vdd callback Platforms may have some external power control which need to be controlled from board specific code. Rename the translate_vdd() callback to vdd_handler() and pass it the power mode. Acked-by: Linus Walleij Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- drivers/mmc/host/mmci.c | 13 +++---------- include/linux/amba/mmci.h | 10 ++++++---- 2 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 3eaa0e9373cd..7ae3eeeefc29 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -493,16 +493,9 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* This implicitly enables the regulator */ mmc_regulator_set_ocr(host->vcc, ios->vdd); #endif - /* - * The translate_vdd function is not used if you have - * an external regulator, or your design is really weird. - * Using it would mean sending in power control BOTH using - * a regulator AND the 4 MMCIPWR bits. If we don't have - * a regulator, we might have some other platform specific - * power control behind this translate function. - */ - if (!host->vcc && host->plat->translate_vdd) - pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); + if (host->plat->vdd_handler) + pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, + ios->power_mode); /* The ST version does not have this, fall through to POWER_ON */ if (host->hw_designer != AMBA_VENDOR_ST) { pwr |= MCI_PWR_UP; diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 7e466fe72025..ca84ce70d5d5 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -15,9 +15,10 @@ * @ocr_mask: available voltages on the 4 pins from the block, this * is ignored if a regulator is used, see the MMC_VDD_* masks in * mmc/host.h - * @translate_vdd: a callback function to translate a MMC_VDD_* - * mask into a value to be binary or:ed and written into the - * MMCIPWR register of the block + * @vdd_handler: a callback function to translate a MMC_VDD_* + * mask into a value to be binary (or set some other custom bits + * in MMCIPWR) or:ed and written into the MMCIPWR register of the + * block. May also control external power based on the power_mode. * @status: if no GPIO read function was given to the block in * gpio_wp (below) this function will be called to determine * whether a card is present in the MMC slot or not @@ -29,7 +30,8 @@ struct mmci_platform_data { unsigned int f_max; unsigned int ocr_mask; - u32 (*translate_vdd)(struct device *, unsigned int); + u32 (*vdd_handler)(struct device *, unsigned int vdd, + unsigned char power_mode); unsigned int (*status)(struct device *); int gpio_wp; int gpio_cd; -- cgit v1.2.3 From de09a9771a5346029f4d11e4ac886be7f9bfdd75 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jul 2010 12:45:49 +0100 Subject: CRED: Fix get_task_cred() and task_state() to not resurrect dead credentials It's possible for get_task_cred() as it currently stands to 'corrupt' a set of credentials by incrementing their usage count after their replacement by the task being accessed. What happens is that get_task_cred() can race with commit_creds(): TASK_1 TASK_2 RCU_CLEANER -->get_task_cred(TASK_2) rcu_read_lock() __cred = __task_cred(TASK_2) -->commit_creds() old_cred = TASK_2->real_cred TASK_2->real_cred = ... put_cred(old_cred) call_rcu(old_cred) [__cred->usage == 0] get_cred(__cred) [__cred->usage == 1] rcu_read_unlock() -->put_cred_rcu() [__cred->usage == 1] panic() However, since a tasks credentials are generally not changed very often, we can reasonably make use of a loop involving reading the creds pointer and using atomic_inc_not_zero() to attempt to increment it if it hasn't already hit zero. If successful, we can safely return the credentials in the knowledge that, even if the task we're accessing has released them, they haven't gone to the RCU cleanup code. We then change task_state() in procfs to use get_task_cred() rather than calling get_cred() on the result of __task_cred(), as that suffers from the same problem. Without this change, a BUG_ON in __put_cred() or in put_cred_rcu() can be tripped when it is noticed that the usage count is not zero as it ought to be, for example: kernel BUG at kernel/cred.c:168! invalid opcode: 0000 [#1] SMP last sysfs file: /sys/kernel/mm/ksm/run CPU 0 Pid: 2436, comm: master Not tainted 2.6.33.3-85.fc13.x86_64 #1 0HR330/OptiPlex 745 RIP: 0010:[] [] __put_cred+0xc/0x45 RSP: 0018:ffff88019e7e9eb8 EFLAGS: 00010202 RAX: 0000000000000001 RBX: ffff880161514480 RCX: 00000000ffffffff RDX: 00000000ffffffff RSI: ffff880140c690c0 RDI: ffff880140c690c0 RBP: ffff88019e7e9eb8 R08: 00000000000000d0 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000040 R12: ffff880140c690c0 R13: ffff88019e77aea0 R14: 00007fff336b0a5c R15: 0000000000000001 FS: 00007f12f50d97c0(0000) GS:ffff880007400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f8f461bc000 CR3: 00000001b26ce000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process master (pid: 2436, threadinfo ffff88019e7e8000, task ffff88019e77aea0) Stack: ffff88019e7e9ec8 ffffffff810698cd ffff88019e7e9ef8 ffffffff81069b45 <0> ffff880161514180 ffff880161514480 ffff880161514180 0000000000000000 <0> ffff88019e7e9f28 ffffffff8106aace 0000000000000001 0000000000000246 Call Trace: [] put_cred+0x13/0x15 [] commit_creds+0x16b/0x175 [] set_current_groups+0x47/0x4e [] sys_setgroups+0xf6/0x105 [] system_call_fastpath+0x16/0x1b Code: 48 8d 71 ff e8 7e 4e 15 00 85 c0 78 0b 8b 75 ec 48 89 df e8 ef 4a 15 00 48 83 c4 18 5b c9 c3 55 8b 07 8b 07 48 89 e5 85 c0 74 04 <0f> 0b eb fe 65 48 8b 04 25 00 cc 00 00 48 3b b8 58 04 00 00 75 RIP [] __put_cred+0xc/0x45 RSP ---[ end trace df391256a100ebdd ]--- Signed-off-by: David Howells Acked-by: Jiri Olsa Signed-off-by: Linus Torvalds --- fs/proc/array.c | 2 +- include/linux/cred.h | 21 +-------------------- kernel/cred.c | 25 +++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/fs/proc/array.c b/fs/proc/array.c index 9b58d38bc911..fff6572676ae 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, if (tracer) tpid = task_pid_nr_ns(tracer, ns); } - cred = get_cred((struct cred *) __task_cred(p)); + cred = get_task_cred(p); seq_printf(m, "State:\t%s\n" "Tgid:\t%d\n" diff --git a/include/linux/cred.h b/include/linux/cred.h index 75c0fa881308..ce40cbc791e2 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -153,6 +153,7 @@ struct cred { extern void __put_cred(struct cred *); extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); +extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); @@ -281,26 +282,6 @@ static inline void put_cred(const struct cred *_cred) #define __task_cred(task) \ ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) -/** - * get_task_cred - Get another task's objective credentials - * @task: The task to query - * - * Get the objective credentials of a task, pinning them so that they can't go - * away. Accessing a task's credentials directly is not permitted. - * - * The caller must make sure task doesn't go away, either by holding a ref on - * task or by holding tasklist_lock to prevent it from being unlinked. - */ -#define get_task_cred(task) \ -({ \ - struct cred *__cred; \ - rcu_read_lock(); \ - __cred = (struct cred *) __task_cred((task)); \ - get_cred(__cred); \ - rcu_read_unlock(); \ - __cred; \ -}) - /** * get_current_cred - Get the current task's subjective credentials * diff --git a/kernel/cred.c b/kernel/cred.c index a2d5504fbcc2..60bc8b1e32e6 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk) } } +/** + * get_task_cred - Get another task's objective credentials + * @task: The task to query + * + * Get the objective credentials of a task, pinning them so that they can't go + * away. Accessing a task's credentials directly is not permitted. + * + * The caller must also make sure task doesn't get deleted, either by holding a + * ref on task or by holding tasklist_lock to prevent it from being unlinked. + */ +const struct cred *get_task_cred(struct task_struct *task) +{ + const struct cred *cred; + + rcu_read_lock(); + + do { + cred = __task_cred((task)); + BUG_ON(!cred); + } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage)); + + rcu_read_unlock(); + return cred; +} + /* * Allocate blank credentials, such that the credentials can be filled in at a * later date without risk of ENOMEM. -- cgit v1.2.3 From 8f92054e7ca1d3a3ae50fb42d2253ac8730d9b2a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jul 2010 12:45:55 +0100 Subject: CRED: Fix __task_cred()'s lockdep check and banner comment Fix __task_cred()'s lockdep check by removing the following validation condition: lockdep_tasklist_lock_is_held() as commit_creds() does not take the tasklist_lock, and nor do most of the functions that call it, so this check is pointless and it can prevent detection of the RCU lock not being held if the tasklist_lock is held. Instead, add the following validation condition: task->exit_state >= 0 to permit the access if the target task is dead and therefore unable to change its own credentials. Fix __task_cred()'s comment to: (1) discard the bit that says that the caller must prevent the target task from being deleted. That shouldn't need saying. (2) Add a comment indicating the result of __task_cred() should not be passed directly to get_cred(), but rather than get_task_cred() should be used instead. Also put a note into the documentation to enforce this point there too. Signed-off-by: David Howells Acked-by: Jiri Olsa Cc: Paul E. McKenney Signed-off-by: Linus Torvalds --- Documentation/credentials.txt | 3 +++ include/linux/cred.h | 15 ++++++++++----- include/linux/sched.h | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/Documentation/credentials.txt b/Documentation/credentials.txt index a2db35287003..995baf379c07 100644 --- a/Documentation/credentials.txt +++ b/Documentation/credentials.txt @@ -417,6 +417,9 @@ reference on them using: This does all the RCU magic inside of it. The caller must call put_cred() on the credentials so obtained when they're finished with. + [*] Note: The result of __task_cred() should not be passed directly to + get_cred() as this may race with commit_cred(). + There are a couple of convenience functions to access bits of another task's credentials, hiding the RCU magic from the caller: diff --git a/include/linux/cred.h b/include/linux/cred.h index ce40cbc791e2..4d2c39573f36 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -274,13 +274,18 @@ static inline void put_cred(const struct cred *_cred) * @task: The task to query * * Access the objective credentials of a task. The caller must hold the RCU - * readlock. + * readlock or the task must be dead and unable to change its own credentials. * - * The caller must make sure task doesn't go away, either by holding a ref on - * task or by holding tasklist_lock to prevent it from being unlinked. + * The result of this function should not be passed directly to get_cred(); + * rather get_task_cred() should be used instead. */ -#define __task_cred(task) \ - ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) +#define __task_cred(task) \ + ({ \ + const struct task_struct *__t = (task); \ + rcu_dereference_check(__t->real_cred, \ + rcu_read_lock_held() || \ + task_is_dead(__t)); \ + }) /** * get_current_cred - Get the current task's subjective credentials diff --git a/include/linux/sched.h b/include/linux/sched.h index 747fcaedddb7..0478888c6899 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -214,6 +214,7 @@ extern char ___assert_task_state[1 - 2*!!( #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +#define task_is_dead(task) ((task)->exit_state != 0) #define task_is_stopped_or_traced(task) \ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ -- cgit v1.2.3 From b608b283a962caaa280756bc8563016a71712acf Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 30 Jul 2010 15:31:54 -0400 Subject: NFS: kswapd must not block in nfs_release_page See https://bugzilla.kernel.org/show_bug.cgi?id=16056 If other processes are blocked waiting for kswapd to free up some memory so that they can make progress, then we cannot allow kswapd to block on those processes. Signed-off-by: Trond Myklebust Cc: stable@kernel.org --- fs/nfs/file.c | 13 +++++++++++-- fs/nfs/write.c | 4 ++-- include/linux/nfs_fs.h | 1 + 3 files changed, 14 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 36a5e74f51b4..f036153d9f50 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -493,11 +494,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) */ static int nfs_release_page(struct page *page, gfp_t gfp) { + struct address_space *mapping = page->mapping; + dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); /* Only do I/O if gfp is a superset of GFP_KERNEL */ - if ((gfp & GFP_KERNEL) == GFP_KERNEL) - nfs_wb_page(page->mapping->host, page); + if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) { + int how = FLUSH_SYNC; + + /* Don't let kswapd deadlock waiting for OOM RPC calls */ + if (current_is_kswapd()) + how = 0; + nfs_commit_inode(mapping->host, how); + } /* If PagePrivate() is set, then the page is not freeable */ if (PagePrivate(page)) return 0; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 91679e2631ee..0a6c65a1f9d7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1379,7 +1379,7 @@ static const struct rpc_call_ops nfs_commit_ops = { .rpc_release = nfs_commit_release, }; -static int nfs_commit_inode(struct inode *inode, int how) +int nfs_commit_inode(struct inode *inode, int how) { LIST_HEAD(head); int may_wait = how & FLUSH_SYNC; @@ -1443,7 +1443,7 @@ out_mark_dirty: return ret; } #else -static int nfs_commit_inode(struct inode *inode, int how) +int nfs_commit_inode(struct inode *inode, int how) { return 0; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 77c2ae53431c..f6e2455f13d1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -493,6 +493,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) +extern int nfs_commit_inode(struct inode *, int); extern struct nfs_write_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_write_data *wdata); #endif -- cgit v1.2.3 From 60347c194acec7ff1b4291ac8e62a5345244c2ee Mon Sep 17 00:00:00 2001 From: Samuli Konttila Date: Fri, 30 Jul 2010 09:02:43 -0700 Subject: Input: cy8ctmg110 - capacitive touchscreen support Add support for the cy8ctmg110 capacitive touchscreen used on some embedded devices. (Some clean up by Alan Cox) Signed-off-by: Alan Cox Signed-off-by: Dmitry Torokhov --- drivers/input/touchscreen/Kconfig | 14 ++ drivers/input/touchscreen/Makefile | 3 +- drivers/input/touchscreen/cy8ctmg110_ts.c | 363 ++++++++++++++++++++++++++++++ include/linux/input/cy8ctmg110_pdata.h | 10 + 4 files changed, 389 insertions(+), 1 deletion(-) create mode 100644 drivers/input/touchscreen/cy8ctmg110_ts.c create mode 100644 include/linux/input/cy8ctmg110_pdata.h (limited to 'include') diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 7bfcfdff6cf8..61f35184f76c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -98,6 +98,20 @@ config TOUCHSCREEN_BITSY To compile this driver as a module, choose M here: the module will be called h3600_ts_input. +config TOUCHSCREEN_CY8CTMG110 + tristate "cy8ctmg110 touchscreen" + depends on I2C + depends on GPIOLIB + + help + Say Y here if you have a cy8ctmg110 capacitive touchscreen on + an AAVA device. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called cy8ctmg110_ts. + config TOUCHSCREEN_DA9034 tristate "Touchscreen support for Dialog Semiconductor DA9034" depends on PMIC_DA903X diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 779de0d9d413..bd6f30b4ff70 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -14,6 +14,8 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o +obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o +obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o @@ -41,7 +43,6 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o -obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c new file mode 100644 index 000000000000..4eb7df0b7f87 --- /dev/null +++ b/drivers/input/touchscreen/cy8ctmg110_ts.c @@ -0,0 +1,363 @@ +/* + * Driver for cypress touch screen controller + * + * Copyright (c) 2009 Aava Mobile + * + * Some cleanups by Alan Cox + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CY8CTMG110_DRIVER_NAME "cy8ctmg110" + +/* Touch coordinates */ +#define CY8CTMG110_X_MIN 0 +#define CY8CTMG110_Y_MIN 0 +#define CY8CTMG110_X_MAX 759 +#define CY8CTMG110_Y_MAX 465 + + +/* cy8ctmg110 register definitions */ +#define CY8CTMG110_TOUCH_WAKEUP_TIME 0 +#define CY8CTMG110_TOUCH_SLEEP_TIME 2 +#define CY8CTMG110_TOUCH_X1 3 +#define CY8CTMG110_TOUCH_Y1 5 +#define CY8CTMG110_TOUCH_X2 7 +#define CY8CTMG110_TOUCH_Y2 9 +#define CY8CTMG110_FINGERS 11 +#define CY8CTMG110_GESTURE 12 +#define CY8CTMG110_REG_MAX 13 + + +/* + * The touch driver structure. + */ +struct cy8ctmg110 { + struct input_dev *input; + char phys[32]; + struct i2c_client *client; + int reset_pin; + int irq_pin; +}; + +/* + * cy8ctmg110_power is the routine that is called when touch hardware + * will powered off or on. + */ +static void cy8ctmg110_power(struct cy8ctmg110 *ts, bool poweron) +{ + if (ts->reset_pin) + gpio_direction_output(ts->reset_pin, 1 - poweron); +} + +static int cy8ctmg110_write_regs(struct cy8ctmg110 *tsc, unsigned char reg, + unsigned char len, unsigned char *value) +{ + struct i2c_client *client = tsc->client; + unsigned int ret; + unsigned char i2c_data[6]; + + BUG_ON(len > 5); + + i2c_data[0] = reg; + memcpy(i2c_data + 1, value, len); + + ret = i2c_master_send(client, i2c_data, len + 1); + if (ret != 1) { + dev_err(&client->dev, "i2c write data cmd failed\n"); + return ret; + } + + return 0; +} + +static int cy8ctmg110_read_regs(struct cy8ctmg110 *tsc, + unsigned char *data, unsigned char len, unsigned char cmd) +{ + struct i2c_client *client = tsc->client; + unsigned int ret; + struct i2c_msg msg[2] = { + /* first write slave position to i2c devices */ + { client->addr, 0, 1, &cmd }, + /* Second read data from position */ + { client->addr, I2C_M_RD, len, data } + }; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret < 0) + return ret; + + return 0; +} + +static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc) +{ + struct input_dev *input = tsc->input; + unsigned char reg_p[CY8CTMG110_REG_MAX]; + int x, y; + + memset(reg_p, 0, CY8CTMG110_REG_MAX); + + /* Reading coordinates */ + if (cy8ctmg110_read_regs(tsc, reg_p, 9, CY8CTMG110_TOUCH_X1) != 0) + return -EIO; + + y = reg_p[2] << 8 | reg_p[3]; + x = reg_p[0] << 8 | reg_p[1]; + + /* Number of touch */ + if (reg_p[8] == 0) { + input_report_key(input, BTN_TOUCH, 0); + } else { + input_report_key(input, BTN_TOUCH, 1); + input_report_abs(input, ABS_X, x); + input_report_abs(input, ABS_Y, y); + } + + input_sync(input); + + return 0; +} + +static int cy8ctmg110_set_sleepmode(struct cy8ctmg110 *ts, bool sleep) +{ + unsigned char reg_p[3]; + + if (sleep) { + reg_p[0] = 0x00; + reg_p[1] = 0xff; + reg_p[2] = 5; + } else { + reg_p[0] = 0x10; + reg_p[1] = 0xff; + reg_p[2] = 0; + } + + return cy8ctmg110_write_regs(ts, CY8CTMG110_TOUCH_WAKEUP_TIME, 3, reg_p); +} + +static irqreturn_t cy8ctmg110_irq_thread(int irq, void *dev_id) +{ + struct cy8ctmg110 *tsc = dev_id; + + cy8ctmg110_touch_pos(tsc); + + return IRQ_HANDLED; +} + +static int __devinit cy8ctmg110_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct cy8ctmg110_pdata *pdata = client->dev.platform_data; + struct cy8ctmg110 *ts; + struct input_dev *input_dev; + int err; + + /* No pdata no way forward */ + if (pdata == NULL) { + dev_err(&client->dev, "no pdata\n"); + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_WORD_DATA)) + return -EIO; + + ts = kzalloc(sizeof(struct cy8ctmg110), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!ts || !input_dev) { + err = -ENOMEM; + goto err_free_mem; + } + + ts->client = client; + ts->input = input_dev; + + snprintf(ts->phys, sizeof(ts->phys), + "%s/input0", dev_name(&client->dev)); + + input_dev->name = CY8CTMG110_DRIVER_NAME " Touchscreen"; + input_dev->phys = ts->phys; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = &client->dev; + + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + + input_set_abs_params(input_dev, ABS_X, + CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 0, 0); + input_set_abs_params(input_dev, ABS_Y, + CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 0, 0); + + if (ts->reset_pin) { + err = gpio_request(ts->reset_pin, NULL); + if (err) { + dev_err(&client->dev, + "Unable to request GPIO pin %d.\n", + ts->reset_pin); + goto err_free_mem; + } + } + + cy8ctmg110_power(ts, true); + cy8ctmg110_set_sleepmode(ts, false); + + err = gpio_request(ts->irq_pin, "touch_irq_key"); + if (err < 0) { + dev_err(&client->dev, + "Failed to request GPIO %d, error %d\n", + ts->irq_pin, err); + goto err_shutoff_device; + } + + err = gpio_direction_input(ts->irq_pin); + if (err < 0) { + dev_err(&client->dev, + "Failed to configure input direction for GPIO %d, error %d\n", + ts->irq_pin, err); + goto err_free_irq_gpio; + } + + client->irq = gpio_to_irq(ts->irq_pin); + if (client->irq < 0) { + err = client->irq; + dev_err(&client->dev, + "Unable to get irq number for GPIO %d, error %d\n", + ts->irq_pin, err); + goto err_free_irq_gpio; + } + + err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread, + IRQF_TRIGGER_RISING, "touch_reset_key", ts); + if (err < 0) { + dev_err(&client->dev, + "irq %d busy? error %d\n", client->irq, err); + goto err_free_irq_gpio; + } + + err = input_register_device(input_dev); + if (err) + goto err_free_irq; + + i2c_set_clientdata(client, ts); + device_init_wakeup(&client->dev, 1); + return 0; + +err_free_irq: + free_irq(client->irq, ts); +err_free_irq_gpio: + gpio_free(ts->irq_pin); +err_shutoff_device: + cy8ctmg110_set_sleepmode(ts, true); + cy8ctmg110_power(ts, false); + if (ts->reset_pin) + gpio_free(ts->reset_pin); +err_free_mem: + input_free_device(input_dev); + kfree(ts); + return err; +} + +#ifdef CONFIG_PM +static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg) +{ + struct cy8ctmg110 *ts = i2c_get_clientdata(client); + + if (device_may_wakeup(&client->dev)) + enable_irq_wake(client->irq); + else { + cy8ctmg110_set_sleepmode(ts, true); + cy8ctmg110_power(ts, false); + } + return 0; +} + +static int cy8ctmg110_resume(struct i2c_client *client) +{ + struct cy8ctmg110 *ts = i2c_get_clientdata(client); + + if (device_may_wakeup(&client->dev)) + disable_irq_wake(client->irq); + else { + cy8ctmg110_power(ts, true); + cy8ctmg110_set_sleepmode(ts, false); + } + return 0; +} +#endif + +static int __devexit cy8ctmg110_remove(struct i2c_client *client) +{ + struct cy8ctmg110 *ts = i2c_get_clientdata(client); + + cy8ctmg110_set_sleepmode(ts, true); + cy8ctmg110_power(ts, false); + + free_irq(client->irq, ts); + input_unregister_device(ts->input); + gpio_free(ts->irq_pin); + if (ts->reset_pin) + gpio_free(ts->reset_pin); + kfree(ts); + + return 0; +} + +static struct i2c_device_id cy8ctmg110_idtable[] = { + { CY8CTMG110_DRIVER_NAME, 1 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, cy8ctmg110_idtable); + +static struct i2c_driver cy8ctmg110_driver = { + .driver = { + .owner = THIS_MODULE, + .name = CY8CTMG110_DRIVER_NAME, + }, + .id_table = cy8ctmg110_idtable, + .probe = cy8ctmg110_probe, + .remove = __devexit_p(cy8ctmg110_remove), +#ifdef CONFIG_PM + .suspend = cy8ctmg110_suspend, + .resume = cy8ctmg110_resume, +#endif +}; + +static int __init cy8ctmg110_init(void) +{ + return i2c_add_driver(&cy8ctmg110_driver); +} + +static void __exit cy8ctmg110_exit(void) +{ + i2c_del_driver(&cy8ctmg110_driver); +} + +module_init(cy8ctmg110_init); +module_exit(cy8ctmg110_exit); + +MODULE_AUTHOR("Samuli Konttila "); +MODULE_DESCRIPTION("cy8ctmg110 TouchScreen Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h new file mode 100644 index 000000000000..09522cb59910 --- /dev/null +++ b/include/linux/input/cy8ctmg110_pdata.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_CY8CTMG110_PDATA_H +#define _LINUX_CY8CTMG110_PDATA_H + +struct cy8ctmg110_pdata +{ + int reset_pin; /* Reset pin is wired to this GPIO (optional) */ + int irq_pin; /* IRQ pin is wired to this GPIO */ +}; + +#endif -- cgit v1.2.3 From e6cc11707661770ca2bd4db4b0256d28f48e7541 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:14:28 +0200 Subject: padata: Rename padata_alloc functions We rename padata_alloc to padata_alloc_possible because this function allocates a padata_instance and uses the cpu_possible mask for parallel and serial workers. Also we rename __padata_alloc to padata_alloc to avoid to export underlined functions. Underlined functions are considered to be private to padata. Users are updated accordingly. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/pcrypt.c | 2 +- include/linux/padata.h | 9 +++++---- kernel/padata.c | 24 ++++++++++++------------ 3 files changed, 18 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 794c172b99f7..55460839624e 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -457,7 +457,7 @@ static int __pcrypt_init_instance(struct pcrypt_instance *pcrypt, if (!pcrypt->wq) goto err; - pcrypt->pinst = padata_alloc(pcrypt->wq); + pcrypt->pinst = padata_alloc_possible(pcrypt->wq); if (!pcrypt->pinst) goto err_destroy_workqueue; diff --git a/include/linux/padata.h b/include/linux/padata.h index 293ad46ffced..71dfc9d1f856 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -165,10 +165,11 @@ struct padata_instance { #define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc(struct workqueue_struct *wq); -extern struct padata_instance *__padata_alloc(struct workqueue_struct *wq, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask); +extern struct padata_instance *padata_alloc_possible( + struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); diff --git a/kernel/padata.c b/kernel/padata.c index 7f895e2b4efb..12860bce6b78 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -1060,29 +1060,29 @@ static struct kobj_type padata_attr_type = { }; /** - * padata_alloc - Allocate and initialize padata instance. - * Use default cpumask(cpu_possible_mask) - * for serial and parallel workes. + * padata_alloc_possible - Allocate and initialize padata instance. + * Use the cpu_possible_mask for serial and + * parallel workers. * * @wq: workqueue to use for the allocated padata instance */ -struct padata_instance *padata_alloc(struct workqueue_struct *wq) +struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) { - return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); + return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); } -EXPORT_SYMBOL(padata_alloc); +EXPORT_SYMBOL(padata_alloc_possible); /** - * __padata_alloc - allocate and initialize a padata instance - * and specify cpumasks for serial and parallel workers. + * padata_alloc - allocate and initialize a padata instance and specify + * cpumasks for serial and parallel workers. * * @wq: workqueue to use for the allocated padata instance * @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization */ -struct padata_instance *__padata_alloc(struct workqueue_struct *wq, - const struct cpumask *pcpumask, - const struct cpumask *cbcpumask) +struct padata_instance *padata_alloc(struct workqueue_struct *wq, + const struct cpumask *pcpumask, + const struct cpumask *cbcpumask) { struct padata_instance *pinst; struct parallel_data *pd = NULL; @@ -1138,7 +1138,7 @@ err_free_inst: err: return NULL; } -EXPORT_SYMBOL(__padata_alloc); +EXPORT_SYMBOL(padata_alloc); /** * padata_free - free a padata instance -- cgit v1.2.3 From 65ff577e6b6e482ee9de3569e058edebdc02f069 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:06 +0200 Subject: padata: Rearrange set_cpumask functions padata_set_cpumask needs to be protected by a lock. We make __padata_set_cpumasks unlocked and static. So this function can be used by the exported and locked padata_set_cpumask and padata_set_cpumasks functions. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 +-- kernel/padata.c | 117 ++++++++++++++++++++++++++++--------------------- 2 files changed, 70 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 71dfc9d1f856..bb0fc5dd0bbb 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -178,9 +178,9 @@ extern int padata_get_cpumask(struct padata_instance *pinst, int cpumask_type, struct cpumask *out_mask); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int __padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, - cpumask_var_t cbcpumask); +extern int padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask); extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); extern int padata_start(struct padata_instance *pinst); diff --git a/kernel/padata.c b/kernel/padata.c index 12860bce6b78..4987203770bc 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -623,6 +623,66 @@ int padata_get_cpumask(struct padata_instance *pinst, } EXPORT_SYMBOL(padata_get_cpumask); +static int __padata_set_cpumasks(struct padata_instance *pinst, + cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int valid; + struct parallel_data *pd; + + valid = padata_validate_cpumask(pinst, pcpumask); + if (!valid) { + __padata_stop(pinst); + goto out_replace; + } + + valid = padata_validate_cpumask(pinst, cbcpumask); + if (!valid) + __padata_stop(pinst); + +out_replace: + pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); + if (!pd) + return -ENOMEM; + + cpumask_copy(pinst->cpumask.pcpu, pcpumask); + cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); + + padata_replace(pinst, pd); + + if (valid) + __padata_start(pinst); + + return 0; +} + +/** + * padata_set_cpumasks - Set both parallel and serial cpumasks. The first + * one is used by parallel workers and the second one + * by the wokers doing serialization. + * + * @pinst: padata instance + * @pcpumask: the cpumask to use for parallel workers + * @cbcpumask: the cpumsak to use for serial workers + */ +int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, + cpumask_var_t cbcpumask) +{ + int err; + + mutex_lock(&pinst->lock); + get_online_cpus(); + + err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); + + put_online_cpus(); + mutex_unlock(&pinst->lock); + + return err; + +} +EXPORT_SYMBOL(padata_set_cpumasks); + /** * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value * equivalent to @cpumask. @@ -636,6 +696,10 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask) { struct cpumask *serial_mask, *parallel_mask; + int err = -EINVAL; + + mutex_lock(&pinst->lock); + get_online_cpus(); switch (cpumask_type) { case PADATA_CPU_PARALLEL: @@ -647,65 +711,18 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, serial_mask = cpumask; break; default: - return -EINVAL; + goto out; } - return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); -} -EXPORT_SYMBOL(padata_set_cpumask); - -/** - * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first - * one is used by parallel workers and the second one - * by the wokers doing serialization. - * - * @pinst: padata instance - * @pcpumask: the cpumask to use for parallel workers - * @cbcpumask: the cpumsak to use for serial workers - */ -int __padata_set_cpumasks(struct padata_instance *pinst, - cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -{ - int valid; - int err = 0; - struct parallel_data *pd = NULL; - - mutex_lock(&pinst->lock); - get_online_cpus(); - - valid = padata_validate_cpumask(pinst, pcpumask); - if (!valid) { - __padata_stop(pinst); - goto out_replace; - } - - valid = padata_validate_cpumask(pinst, cbcpumask); - if (!valid) - __padata_stop(pinst); - -out_replace: - pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); - if (!pd) { - err = -ENOMEM; - goto out; - } - - cpumask_copy(pinst->cpumask.pcpu, pcpumask); - cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); - - padata_replace(pinst, pd); - - if (valid) - __padata_start(pinst); + err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); out: put_online_cpus(); mutex_unlock(&pinst->lock); return err; - } -EXPORT_SYMBOL(__padata_set_cpumasks); +EXPORT_SYMBOL(padata_set_cpumask); static int __padata_add_cpu(struct padata_instance *pinst, int cpu) { -- cgit v1.2.3 From c635696c7c0fbc720698dbec34bb83e53df6a967 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:15:50 +0200 Subject: padata: Pass the padata cpumasks to the cpumask_change_notifier chain We pass a pointer to the new padata cpumasks to the cpumask_change_notifier chain. So users can access the cpumasks without the need of an extra padata_get_cpumask function. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 40 +++++++++++++++++++++------------------- kernel/padata.c | 3 ++- 2 files changed, 23 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index bb0fc5dd0bbb..43db792f44dd 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -98,6 +98,16 @@ struct padata_parallel_queue { int cpu_index; }; +/** + * struct padata_cpumask - The cpumasks for the parallel/serial workers + * + * @pcpu: cpumask for the parallel workers. + * @cbcpu: cpumask for the serial (callback) workers. + */ +struct padata_cpumask { + cpumask_var_t pcpu; + cpumask_var_t cbcpu; +}; /** * struct parallel_data - Internal control structure, covers everything @@ -110,8 +120,7 @@ struct padata_parallel_queue { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. - * @cpumask: Contains two cpumasks: pcpu and cbcpu for - * parallel and serial workers respectively. + * @cpumask: The cpumasks in use for parallel and serial workers. * @lock: Reorder lock. * @processed: Number of already processed objects. * @timer: Reorder timer. @@ -120,17 +129,14 @@ struct parallel_data { struct padata_instance *pinst; struct padata_parallel_queue *pqueue; struct padata_serial_queue *squeue; - atomic_t seq_nr; - atomic_t reorder_objects; - atomic_t refcnt; - unsigned int max_seq_nr; - struct { - cpumask_var_t pcpu; - cpumask_var_t cbcpu; - } cpumask; - spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; + atomic_t seq_nr; + atomic_t reorder_objects; + atomic_t refcnt; + unsigned int max_seq_nr; + struct padata_cpumask cpumask; + spinlock_t lock ____cacheline_aligned; + unsigned int processed; + struct timer_list timer; }; /** @@ -139,8 +145,7 @@ struct parallel_data { * @cpu_notifier: cpu hotplug notifier. * @wq: The workqueue in use. * @pd: The internal control structure. - * @cpumask: User supplied cpumask. Contains two cpumasks: pcpu and - * cbcpu for parallel and serial works respectivly. + * @cpumask: User supplied cpumasks for parallel and serial works. * @cpumask_change_notifier: Notifiers chain for user-defined notify * callbacks that will be called when either @pcpu or @cbcpu * or both cpumasks change. @@ -152,10 +157,7 @@ struct padata_instance { struct notifier_block cpu_notifier; struct workqueue_struct *wq; struct parallel_data *pd; - struct { - cpumask_var_t pcpu; - cpumask_var_t cbcpu; - } cpumask; + struct padata_cpumask cpumask; struct blocking_notifier_head cpumask_change_notifier; struct kobject kobj; struct mutex lock; diff --git a/kernel/padata.c b/kernel/padata.c index 4987203770bc..1c8c1d1d301d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -538,7 +538,8 @@ static void padata_replace(struct padata_instance *pinst, if (notification_mask) blocking_notifier_call_chain(&pinst->cpumask_change_notifier, - notification_mask, pinst); + notification_mask, + &pd_new->cpumask); pinst->flags &= ~PADATA_RESET; } -- cgit v1.2.3 From 0500e9b3f11ce84fc6ee48a3e29909145e58ba48 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 27 Jul 2010 07:19:27 +0200 Subject: padata: Remove padata_get_cpumask A function that copies the padata cpumasks to a user buffer is a bit error prone. The cpumask can change any time so we can't be sure to have the right cpumask when using this function. A user who is interested in the padata cpumasks should register to the padata cpumask notifier chain instead. Users of padata_get_cpumask are already updated, so we can remove it. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- include/linux/padata.h | 2 -- kernel/padata.c | 35 ----------------------------------- 2 files changed, 37 deletions(-) (limited to 'include') diff --git a/include/linux/padata.h b/include/linux/padata.h index 43db792f44dd..bdcd1e9eacea 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -176,8 +176,6 @@ extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, struct padata_priv *padata, int cb_cpu); extern void padata_do_serial(struct padata_priv *padata); -extern int padata_get_cpumask(struct padata_instance *pinst, - int cpumask_type, struct cpumask *out_mask); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); extern int padata_set_cpumasks(struct padata_instance *pinst, diff --git a/kernel/padata.c b/kernel/padata.c index 1c8c1d1d301d..fd4679266ede 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -589,41 +589,6 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, return true; } -/** - * padata_get_cpumask: Fetch serial or parallel cpumask from the - * given padata instance and copy it to @out_mask - * - * @pinst: A pointer to padata instance - * @cpumask_type: Specifies which cpumask will be copied. - * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL - * corresponding to serial and parallel cpumask respectively. - * @out_mask: A pointer to cpumask structure where selected - * cpumask will be copied. - */ -int padata_get_cpumask(struct padata_instance *pinst, - int cpumask_type, struct cpumask *out_mask) -{ - struct parallel_data *pd; - int ret = 0; - - rcu_read_lock_bh(); - pd = rcu_dereference(pinst->pd); - switch (cpumask_type) { - case PADATA_CPU_SERIAL: - cpumask_copy(out_mask, pd->cpumask.cbcpu); - break; - case PADATA_CPU_PARALLEL: - cpumask_copy(out_mask, pd->cpumask.pcpu); - break; - default: - ret = -EINVAL; - } - - rcu_read_unlock_bh(); - return ret; -} -EXPORT_SYMBOL(padata_get_cpumask); - static int __padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, cpumask_var_t cbcpumask) -- cgit v1.2.3 From 7cfe249475fdd82ad3c2767a9b906cc775dab868 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 15 Jul 2010 10:47:14 +0100 Subject: ARM: AMBA: Add pclk support to AMBA bus infrastructure Some platforms gate the pclk (APB - the bus - clock) to the peripherals for power saving, along with the functional clock. When devices are accessed without pclk enabled, the kernel will oops. This gives them two options: 1. Leave all clocks on all the time. 2. Attempt to gate pclk along with the functional clock. (With some hardware, pclk and the functional clock are gated by a single bit in a register.) (1) has the disadvantage that it causes increased power usage, which is bad news for battery operated devices. (2) can lead to kernel oops if registers are accessed without the functional clock being enabled. So, introduce the apb_pclk signal in such a way existing drivers don't need to be updated. Essentially, this means we guarantee that: 1. pclk will be enabled whenever the driver is bound to a device - from probe() to remove() time. 2. pclk will also be enabled when reading the primecell IDs from the device. In order to allow drivers to be incrementally updated to achieve greater power savings, we provide two additional calls to allow drivers to manage the pclk - amba_pclk_enable()/amba_pclk_disable(). Signed-off-by: Russell King --- drivers/amba/bus.c | 88 +++++++++++++++++++++++++++++++++++++----------- include/linux/amba/bus.h | 11 ++++++ 2 files changed, 80 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f60b2b6a0931..d31590e7011b 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -122,6 +122,31 @@ static int __init amba_init(void) postcore_initcall(amba_init); +static int amba_get_enable_pclk(struct amba_device *pcdev) +{ + struct clk *pclk = clk_get(&pcdev->dev, "apb_pclk"); + int ret; + + pcdev->pclk = pclk; + + if (IS_ERR(pclk)) + return PTR_ERR(pclk); + + ret = clk_enable(pclk); + if (ret) + clk_put(pclk); + + return ret; +} + +static void amba_put_disable_pclk(struct amba_device *pcdev) +{ + struct clk *pclk = pcdev->pclk; + + clk_disable(pclk); + clk_put(pclk); +} + /* * These are the device model conversion veneers; they convert the * device model structures to our more specific structures. @@ -130,17 +155,33 @@ static int amba_probe(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(dev->driver); - struct amba_id *id; + struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); + int ret; - id = amba_lookup(pcdrv->id_table, pcdev); + do { + ret = amba_get_enable_pclk(pcdev); + if (ret) + break; + + ret = pcdrv->probe(pcdev, id); + if (ret == 0) + break; - return pcdrv->probe(pcdev, id); + amba_put_disable_pclk(pcdev); + } while (0); + + return ret; } static int amba_remove(struct device *dev) { + struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - return drv->remove(to_amba_device(dev)); + int ret = drv->remove(pcdev); + + amba_put_disable_pclk(pcdev); + + return ret; } static void amba_shutdown(struct device *dev) @@ -203,7 +244,6 @@ static void amba_device_release(struct device *dev) */ int amba_device_register(struct amba_device *dev, struct resource *parent) { - u32 pid, cid; u32 size; void __iomem *tmp; int i, ret; @@ -241,25 +281,35 @@ int amba_device_register(struct amba_device *dev, struct resource *parent) goto err_release; } - /* - * Read pid and cid based on size of resource - * they are located at end of region - */ - for (pid = 0, i = 0; i < 4; i++) - pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); - for (cid = 0, i = 0; i < 4; i++) - cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); + ret = amba_get_enable_pclk(dev); + if (ret == 0) { + u32 pid, cid; - iounmap(tmp); + /* + * Read pid and cid based on size of resource + * they are located at end of region + */ + for (pid = 0, i = 0; i < 4; i++) + pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << + (i * 8); + for (cid = 0, i = 0; i < 4; i++) + cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << + (i * 8); - if (cid == 0xb105f00d) - dev->periphid = pid; + amba_put_disable_pclk(dev); - if (!dev->periphid) { - ret = -ENODEV; - goto err_release; + if (cid == 0xb105f00d) + dev->periphid = pid; + + if (!dev->periphid) + ret = -ENODEV; } + iounmap(tmp); + + if (ret) + goto err_release; + ret = device_add(&dev->dev); if (ret) goto err_release; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 8b1038607831..b0c174012436 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -14,14 +14,19 @@ #ifndef ASMARM_AMBA_H #define ASMARM_AMBA_H +#include #include +#include #include #define AMBA_NR_IRQS 2 +struct clk; + struct amba_device { struct device dev; struct resource res; + struct clk *pclk; u64 dma_mask; unsigned int periphid; unsigned int irq[AMBA_NR_IRQS]; @@ -59,6 +64,12 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int int amba_request_regions(struct amba_device *, const char *); void amba_release_regions(struct amba_device *); +#define amba_pclk_enable(d) \ + (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) + +#define amba_pclk_disable(d) \ + do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) + #define amba_config(d) (((d)->periphid >> 24) & 0xff) #define amba_rev(d) (((d)->periphid >> 20) & 0x0f) #define amba_manf(d) (((d)->periphid >> 12) & 0xff) -- cgit v1.2.3 From bf998156d24bcb127318ad5bf531ac3bdfcd6449 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 31 May 2010 14:28:19 +0800 Subject: KVM: Avoid killing userspace through guest SRAO MCE on unmapped pages In common cases, guest SRAO MCE will cause corresponding poisoned page be un-mapped and SIGBUS be sent to QEMU-KVM, then QEMU-KVM will relay the MCE to guest OS. But it is reported that if the poisoned page is accessed in guest after unmapping and before MCE is relayed to guest OS, userspace will be killed. The reason is as follows. Because poisoned page has been un-mapped, guest access will cause guest exit and kvm_mmu_page_fault will be called. kvm_mmu_page_fault can not get the poisoned page for fault address, so kernel and user space MMIO processing is tried in turn. In user MMIO processing, poisoned page is accessed again, then userspace is killed by force_sig_info. To fix the bug, kvm_mmu_page_fault send HWPOISON signal to QEMU-KVM and do not try kernel and user space MMIO processing for poisoned page. [xiao: fix warning introduced by avi] Reported-by: Max Asbock Signed-off-by: Huang Ying Signed-off-by: Xiao Guangrong Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 34 ++++++++++++++++++++++++++-------- arch/x86/kvm/paging_tmpl.h | 7 ++----- include/linux/kvm_host.h | 1 + include/linux/mm.h | 8 ++++++++ mm/memory-failure.c | 30 ++++++++++++++++++++++++++++++ virt/kvm/kvm_main.c | 30 ++++++++++++++++++++++++++++-- 6 files changed, 95 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b1ed0a1a5913..b666d8d106a9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1960,6 +1961,27 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, return pt_write; } +static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn) +{ + char buf[1]; + void __user *hva; + int r; + + /* Touch the page, so send SIGBUS */ + hva = (void __user *)gfn_to_hva(kvm, gfn); + r = copy_from_user(buf, hva, 1); +} + +static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) +{ + kvm_release_pfn_clean(pfn); + if (is_hwpoison_pfn(pfn)) { + kvm_send_hwpoison_signal(kvm, gfn); + return 0; + } + return 1; +} + static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) { int r; @@ -1983,10 +2005,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) pfn = gfn_to_pfn(vcpu->kvm, gfn); /* mmio */ - if (is_error_pfn(pfn)) { - kvm_release_pfn_clean(pfn); - return 1; - } + if (is_error_pfn(pfn)) + return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) @@ -2198,10 +2218,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); pfn = gfn_to_pfn(vcpu->kvm, gfn); - if (is_error_pfn(pfn)) { - kvm_release_pfn_clean(pfn); - return 1; - } + if (is_error_pfn(pfn)) + return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 2331bdc2b549..c7f27779c998 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -431,11 +431,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); /* mmio */ - if (is_error_pfn(pfn)) { - pgprintk("gfn %lx is mmio\n", walker.gfn); - kvm_release_pfn_clean(pfn); - return 1; - } + if (is_error_pfn(pfn)) + return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7cb116afa1cd..a0e019769f5d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -266,6 +266,7 @@ extern pfn_t bad_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); +int is_hwpoison_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, diff --git a/include/linux/mm.h b/include/linux/mm.h index a2b48041b910..7a9ab7db1975 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1465,6 +1465,14 @@ extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); extern atomic_long_t mce_bad_pages; extern int soft_offline_page(struct page *page, int flags); +#ifdef CONFIG_MEMORY_FAILURE +int is_hwpoison_address(unsigned long addr); +#else +static inline int is_hwpoison_address(unsigned long addr) +{ + return 0; +} +#endif extern void dump_page(struct page *page); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 620b0b461593..378b0f61fd3c 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; @@ -1296,3 +1297,32 @@ done: /* keep elevated page count for bad page */ return ret; } + +int is_hwpoison_address(unsigned long addr) +{ + pgd_t *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t pte, *ptep; + swp_entry_t entry; + + pgdp = pgd_offset(current->mm, addr); + if (!pgd_present(*pgdp)) + return 0; + pudp = pud_offset(pgdp, addr); + pud = *pudp; + if (!pud_present(pud) || pud_large(pud)) + return 0; + pmdp = pmd_offset(pudp, addr); + pmd = *pmdp; + if (!pmd_present(pmd) || pmd_large(pmd)) + return 0; + ptep = pte_offset_map(pmdp, addr); + pte = *ptep; + pte_unmap(ptep); + if (!is_swap_pte(pte)) + return 0; + entry = pte_to_swp_entry(pte); + return is_hwpoison_entry(entry); +} +EXPORT_SYMBOL_GPL(is_hwpoison_address); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f032806a212f..187aa8d984a7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -92,6 +92,9 @@ static bool kvm_rebooting; static bool largepages_enabled = true; +struct page *hwpoison_page; +pfn_t hwpoison_pfn; + inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { @@ -810,16 +813,22 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages); int is_error_page(struct page *page) { - return page == bad_page; + return page == bad_page || page == hwpoison_page; } EXPORT_SYMBOL_GPL(is_error_page); int is_error_pfn(pfn_t pfn) { - return pfn == bad_pfn; + return pfn == bad_pfn || pfn == hwpoison_pfn; } EXPORT_SYMBOL_GPL(is_error_pfn); +int is_hwpoison_pfn(pfn_t pfn) +{ + return pfn == hwpoison_pfn; +} +EXPORT_SYMBOL_GPL(is_hwpoison_pfn); + static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -945,6 +954,11 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) if (unlikely(npages != 1)) { struct vm_area_struct *vma; + if (is_hwpoison_address(addr)) { + get_page(hwpoison_page); + return page_to_pfn(hwpoison_page); + } + down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, addr); @@ -2197,6 +2211,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, bad_pfn = page_to_pfn(bad_page); + hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + + if (hwpoison_page == NULL) { + r = -ENOMEM; + goto out_free_0; + } + + hwpoison_pfn = page_to_pfn(hwpoison_page); + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; @@ -2269,6 +2292,8 @@ out_free_1: out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: + if (hwpoison_page) + __free_page(hwpoison_page); __free_page(bad_page); out: kvm_arch_exit(); @@ -2290,6 +2315,7 @@ void kvm_exit(void) kvm_arch_hardware_unsetup(); kvm_arch_exit(); free_cpumask_var(cpus_hardware_enabled); + __free_page(hwpoison_page); __free_page(bad_page); } EXPORT_SYMBOL_GPL(kvm_exit); -- cgit v1.2.3 From d94e1dc9af60e3431a586c3edfbe42d8a0d3932b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 3 May 2010 16:54:48 +0300 Subject: KVM: Get rid of KVM_REQ_KICK KVM_REQ_KICK poisons vcpu->requests by having a bit set during normal operation. This causes the fast path check for a clear vcpu->requests to fail all the time, triggering tons of atomic operations. Fix by replacing KVM_REQ_KICK with a vcpu->guest_mode atomic. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 17 ++++++++++------- include/linux/kvm_host.h | 1 + 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63c87adcec48..fc5611b4007f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4604,13 +4604,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); - local_irq_disable(); + atomic_set(&vcpu->guest_mode, 1); + smp_wmb(); - clear_bit(KVM_REQ_KICK, &vcpu->requests); - smp_mb__after_clear_bit(); + local_irq_disable(); - if (vcpu->requests || need_resched() || signal_pending(current)) { - set_bit(KVM_REQ_KICK, &vcpu->requests); + if (!atomic_read(&vcpu->guest_mode) || vcpu->requests + || need_resched() || signal_pending(current)) { + atomic_set(&vcpu->guest_mode, 0); + smp_wmb(); local_irq_enable(); preempt_enable(); r = 1; @@ -4655,7 +4657,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (hw_breakpoint_active()) hw_breakpoint_restore(); - set_bit(KVM_REQ_KICK, &vcpu->requests); + atomic_set(&vcpu->guest_mode, 0); + smp_wmb(); local_irq_enable(); ++vcpu->stat.exits; @@ -5580,7 +5583,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) me = get_cpu(); if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) - if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests)) + if (atomic_xchg(&vcpu->guest_mode, 0)) smp_send_reschedule(cpu); put_cpu(); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a0e019769f5d..2c62319727ef 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -81,6 +81,7 @@ struct kvm_vcpu { int vcpu_id; struct mutex mutex; int cpu; + atomic_t guest_mode; struct kvm_run *run; unsigned long requests; unsigned long guest_debug; -- cgit v1.2.3 From 2acf923e38fb6a4ce0c57115decbb38d334902ac Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Thu, 10 Jun 2010 11:27:12 +0800 Subject: KVM: VMX: Enable XSAVE/XRSTOR for guest This patch enable guest to use XSAVE/XRSTOR instructions. We assume that host_xcr0 would use all possible bits that OS supported. And we loaded xcr0 in the same way we handled fpu - do it as late as we can. Signed-off-by: Dexuan Cui Signed-off-by: Sheng Yang Reviewed-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/kvm_cache_regs.h | 6 ++ arch/x86/kvm/vmx.c | 13 ++++ arch/x86/kvm/x86.c | 130 +++++++++++++++++++++++++++++++++++++--- include/linux/kvm_host.h | 2 +- 6 files changed, 146 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0cd0f2923af5..91631b8b2090 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -302,6 +302,7 @@ struct kvm_vcpu_arch { } update_pte; struct fpu guest_fpu; + u64 xcr0; gva_t mmio_fault_cr2; struct kvm_pio_request pio; @@ -605,6 +606,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); +int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 96a5886d384e..9f0cbd987d50 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -267,6 +267,7 @@ enum vmcs_field { #define EXIT_REASON_EPT_VIOLATION 48 #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 +#define EXIT_REASON_XSETBV 55 /* * Interruption-information format diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index d2a98f8f9af5..6491ac8e755b 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -71,4 +71,10 @@ static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) return kvm_read_cr4_bits(vcpu, ~0UL); } +static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) +{ + return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u) + | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); +} + #endif diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 26ba61d6af8c..864a1b6d155a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include "trace.h" @@ -3390,6 +3392,16 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu) return 1; } +static int handle_xsetbv(struct kvm_vcpu *vcpu) +{ + u64 new_bv = kvm_read_edx_eax(vcpu); + u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); + + if (kvm_set_xcr(vcpu, index, new_bv) == 0) + skip_emulated_instruction(vcpu); + return 1; +} + static int handle_apic_access(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; @@ -3668,6 +3680,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_WBINVD] = handle_wbinvd, + [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b08c0052e332..b5e644701cc1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -65,6 +65,7 @@ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ + | X86_CR4_OSXSAVE \ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) @@ -150,6 +151,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; +u64 __read_mostly host_xcr0; + +static inline u32 bit(int bitno) +{ + return 1 << (bitno & 31); +} + static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; @@ -474,6 +482,61 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_GPL(kvm_lmsw); +int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) +{ + u64 xcr0; + + /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ + if (index != XCR_XFEATURE_ENABLED_MASK) + return 1; + xcr0 = xcr; + if (kvm_x86_ops->get_cpl(vcpu) != 0) + return 1; + if (!(xcr0 & XSTATE_FP)) + return 1; + if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) + return 1; + if (xcr0 & ~host_xcr0) + return 1; + vcpu->arch.xcr0 = xcr0; + vcpu->guest_xcr0_loaded = 0; + return 0; +} + +int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) +{ + if (__kvm_set_xcr(vcpu, index, xcr)) { + kvm_inject_gp(vcpu, 0); + return 1; + } + return 0; +} +EXPORT_SYMBOL_GPL(kvm_set_xcr); + +static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 1, 0); + return best && (best->ecx & bit(X86_FEATURE_XSAVE)); +} + +static void update_cpuid(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 1, 0); + if (!best) + return; + + /* Update OSXSAVE bit */ + if (cpu_has_xsave && best->function == 0x1) { + best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) + best->ecx |= bit(X86_FEATURE_OSXSAVE); + } +} + int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); @@ -482,6 +545,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & CR4_RESERVED_BITS) return 1; + if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) + return 1; + if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; @@ -498,6 +564,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); + if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) + update_cpuid(vcpu); + return 0; } @@ -666,11 +735,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) } EXPORT_SYMBOL_GPL(kvm_get_dr); -static inline u32 bit(int bitno) -{ - return 1 << (bitno & 31); -} - /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. @@ -1814,6 +1878,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, r = 0; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); + update_cpuid(vcpu); out_free: vfree(cpuid_entries); @@ -1837,6 +1902,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); + update_cpuid(vcpu); return 0; out: @@ -1917,7 +1983,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | 0 /* Reserved, DCA */ | F(XMM4_1) | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | - 0 /* Reserved, XSAVE, OSXSAVE */; + 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */; /* cpuid 0x80000001.ecx */ const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | @@ -1932,7 +1998,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, switch (function) { case 0: - entry->eax = min(entry->eax, (u32)0xb); + entry->eax = min(entry->eax, (u32)0xd); break; case 1: entry->edx &= kvm_supported_word0_x86_features; @@ -1990,6 +2056,20 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, } break; } + case 0xd: { + int i; + + entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + for (i = 1; *nent < maxnent; ++i) { + if (entry[i - 1].eax == 0 && i != 2) + break; + do_cpuid_1_ent(&entry[i], function, i); + entry[i].flags |= + KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + ++*nent; + } + break; + } case KVM_CPUID_SIGNATURE: { char signature[12] = "KVMKVMKVM\0\0"; u32 *sigptr = (u32 *)signature; @@ -4125,6 +4205,9 @@ int kvm_arch_init(void *opaque) perf_register_guest_info_callbacks(&kvm_guest_cbs); + if (cpu_has_xsave) + host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); + return 0; out: @@ -4523,6 +4606,25 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) } } +static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + !vcpu->guest_xcr0_loaded) { + /* kvm_set_xcr() also depends on this */ + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + vcpu->guest_xcr0_loaded = 1; + } +} + +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (vcpu->guest_xcr0_loaded) { + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); + vcpu->guest_xcr0_loaded = 0; + } +} + static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; @@ -4568,6 +4670,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); + kvm_load_guest_xcr0(vcpu); atomic_set(&vcpu->guest_mode, 1); smp_wmb(); @@ -5124,6 +5227,11 @@ int fx_init(struct kvm_vcpu *vcpu) fpu_finit(&vcpu->arch.guest_fpu); + /* + * Ensure guest xcr0 is valid for loading + */ + vcpu->arch.xcr0 = XSTATE_FP; + vcpu->arch.cr0 |= X86_CR0_ET; return 0; @@ -5140,6 +5248,12 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) if (vcpu->guest_fpu_loaded) return; + /* + * Restore all possible states in the guest, + * and assume host would use all available bits. + * Guest xcr0 would be loaded later. + */ + kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; unlazy_fpu(current); fpu_restore_checking(&vcpu->arch.guest_fpu); @@ -5148,6 +5262,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { + kvm_put_guest_xcr0(vcpu); + if (!vcpu->guest_fpu_loaded) return; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c62319727ef..2d96555cd4ed 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -88,7 +88,7 @@ struct kvm_vcpu { int srcu_idx; int fpu_active; - int guest_fpu_loaded; + int guest_fpu_loaded, guest_xcr0_loaded; wait_queue_head_t wq; int sigset_active; sigset_t sigset; -- cgit v1.2.3 From 2d5b5a665508c60577c1088e0405850a965b6795 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Sun, 13 Jun 2010 17:29:39 +0800 Subject: KVM: x86: XSAVE/XRSTOR live migration support This patch enable save/restore of xsave state. Signed-off-by: Sheng Yang Signed-off-by: Marcelo Tosatti --- Documentation/kvm/api.txt | 74 +++++++++++++++++++++++ arch/x86/include/asm/kvm.h | 22 +++++++ arch/x86/include/asm/xsave.h | 7 ++- arch/x86/kvm/x86.c | 139 +++++++++++++++++++++++++++++++++++++++++++ include/linux/kvm.h | 12 ++++ 5 files changed, 252 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index 159b4efe1b0e..ffba03f55bdf 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt @@ -922,6 +922,80 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default is vcpu 0. +4.41 KVM_GET_XSAVE + +Capability: KVM_CAP_XSAVE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xsave (out) +Returns: 0 on success, -1 on error + +struct kvm_xsave { + __u32 region[1024]; +}; + +This ioctl would copy current vcpu's xsave struct to the userspace. + +4.42 KVM_SET_XSAVE + +Capability: KVM_CAP_XSAVE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xsave (in) +Returns: 0 on success, -1 on error + +struct kvm_xsave { + __u32 region[1024]; +}; + +This ioctl would copy userspace's xsave struct to the kernel. + +4.43 KVM_GET_XCRS + +Capability: KVM_CAP_XCRS +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xcrs (out) +Returns: 0 on success, -1 on error + +struct kvm_xcr { + __u32 xcr; + __u32 reserved; + __u64 value; +}; + +struct kvm_xcrs { + __u32 nr_xcrs; + __u32 flags; + struct kvm_xcr xcrs[KVM_MAX_XCRS]; + __u64 padding[16]; +}; + +This ioctl would copy current vcpu's xcrs to the userspace. + +4.44 KVM_SET_XCRS + +Capability: KVM_CAP_XCRS +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xcrs (in) +Returns: 0 on success, -1 on error + +struct kvm_xcr { + __u32 xcr; + __u32 reserved; + __u64 value; +}; + +struct kvm_xcrs { + __u32 nr_xcrs; + __u32 flags; + struct kvm_xcr xcrs[KVM_MAX_XCRS]; + __u64 padding[16]; +}; + +This ioctl would set vcpu's xcr to the value userspace specified. + 5. The kvm_run structure Application code obtains a pointer to the kvm_run structure by diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index ff90055c7f0b..4d8dcbdfc120 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -22,6 +22,8 @@ #define __KVM_HAVE_XEN_HVM #define __KVM_HAVE_VCPU_EVENTS #define __KVM_HAVE_DEBUGREGS +#define __KVM_HAVE_XSAVE +#define __KVM_HAVE_XCRS /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 @@ -299,4 +301,24 @@ struct kvm_debugregs { __u64 reserved[9]; }; +/* for KVM_CAP_XSAVE */ +struct kvm_xsave { + __u32 region[1024]; +}; + +#define KVM_MAX_XCRS 16 + +struct kvm_xcr { + __u32 xcr; + __u32 reserved; + __u64 value; +}; + +struct kvm_xcrs { + __u32 nr_xcrs; + __u32 flags; + struct kvm_xcr xcrs[KVM_MAX_XCRS]; + __u64 padding[16]; +}; + #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 29ee4e4c64cf..32c36668fa7b 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -13,8 +13,11 @@ #define FXSAVE_SIZE 512 -#define XSTATE_YMM_SIZE 256 -#define XSTATE_YMM_OFFSET (512 + 64) +#define XSAVE_HDR_SIZE 64 +#define XSAVE_HDR_OFFSET FXSAVE_SIZE + +#define XSAVE_YMM_SIZE 256 +#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) /* * These are the features that the OS can handle currently. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 795999e1ac19..0c8dc9614e7d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1680,6 +1680,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: + case KVM_CAP_XSAVE: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -1703,6 +1704,9 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; + case KVM_CAP_XCRS: + r = cpu_has_xsave; + break; default: r = 0; break; @@ -2355,6 +2359,77 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, return 0; } +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, + struct kvm_xsave *guest_xsave) +{ + if (cpu_has_xsave) + memcpy(guest_xsave->region, + &vcpu->arch.guest_fpu.state->xsave, + sizeof(struct xsave_struct)); + else { + memcpy(guest_xsave->region, + &vcpu->arch.guest_fpu.state->fxsave, + sizeof(struct i387_fxsave_struct)); + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = + XSTATE_FPSSE; + } +} + +static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, + struct kvm_xsave *guest_xsave) +{ + u64 xstate_bv = + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; + + if (cpu_has_xsave) + memcpy(&vcpu->arch.guest_fpu.state->xsave, + guest_xsave->region, sizeof(struct xsave_struct)); + else { + if (xstate_bv & ~XSTATE_FPSSE) + return -EINVAL; + memcpy(&vcpu->arch.guest_fpu.state->fxsave, + guest_xsave->region, sizeof(struct i387_fxsave_struct)); + } + return 0; +} + +static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, + struct kvm_xcrs *guest_xcrs) +{ + if (!cpu_has_xsave) { + guest_xcrs->nr_xcrs = 0; + return; + } + + guest_xcrs->nr_xcrs = 1; + guest_xcrs->flags = 0; + guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; + guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; +} + +static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, + struct kvm_xcrs *guest_xcrs) +{ + int i, r = 0; + + if (!cpu_has_xsave) + return -EINVAL; + + if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) + return -EINVAL; + + for (i = 0; i < guest_xcrs->nr_xcrs; i++) + /* Only support XCR0 currently */ + if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { + r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, + guest_xcrs->xcrs[0].value); + break; + } + if (r) + r = -EINVAL; + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2556,6 +2631,70 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } + case KVM_GET_XSAVE: { + struct kvm_xsave *xsave; + + xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); + r = -ENOMEM; + if (!xsave) + break; + + kvm_vcpu_ioctl_x86_get_xsave(vcpu, xsave); + + r = -EFAULT; + if (copy_to_user(argp, xsave, sizeof(struct kvm_xsave))) + break; + r = 0; + break; + } + case KVM_SET_XSAVE: { + struct kvm_xsave *xsave; + + xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); + r = -ENOMEM; + if (!xsave) + break; + + r = -EFAULT; + if (copy_from_user(xsave, argp, sizeof(struct kvm_xsave))) + break; + + r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, xsave); + break; + } + case KVM_GET_XCRS: { + struct kvm_xcrs *xcrs; + + xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); + r = -ENOMEM; + if (!xcrs) + break; + + kvm_vcpu_ioctl_x86_get_xcrs(vcpu, xcrs); + + r = -EFAULT; + if (copy_to_user(argp, xcrs, + sizeof(struct kvm_xcrs))) + break; + r = 0; + break; + } + case KVM_SET_XCRS: { + struct kvm_xcrs *xcrs; + + xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); + r = -ENOMEM; + if (!xcrs) + break; + + r = -EFAULT; + if (copy_from_user(xcrs, argp, + sizeof(struct kvm_xcrs))) + break; + + r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, xcrs); + break; + } default: r = -EINVAL; } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 23ea02253900..6fd40f540a8e 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -524,6 +524,12 @@ struct kvm_enable_cap { #define KVM_CAP_PPC_OSI 52 #define KVM_CAP_PPC_UNSET_IRQ 53 #define KVM_CAP_ENABLE_CAP 54 +#ifdef __KVM_HAVE_XSAVE +#define KVM_CAP_XSAVE 55 +#endif +#ifdef __KVM_HAVE_XCRS +#define KVM_CAP_XCRS 56 +#endif #ifdef KVM_CAP_IRQ_ROUTING @@ -714,6 +720,12 @@ struct kvm_clock_data { #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) +/* Available with KVM_CAP_XSAVE */ +#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave) +#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave) +/* Available with KVM_CAP_XCRS */ +#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) +#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -- cgit v1.2.3 From a1f4d39500ad8ed61825eff061debff42386ab5b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 21 Jun 2010 11:44:20 +0300 Subject: KVM: Remove memory alias support As advertised in feature-removal-schedule.txt. Equivalent support is provided by overlapping memory regions. Signed-off-by: Avi Kivity --- Documentation/feature-removal-schedule.txt | 11 --- Documentation/kvm/api.txt | 12 +-- arch/ia64/kvm/kvm-ia64.c | 5 -- arch/powerpc/kvm/powerpc.c | 5 -- arch/s390/kvm/kvm-s390.c | 5 -- arch/x86/include/asm/kvm_host.h | 21 ----- arch/x86/kvm/mmu.c | 17 +--- arch/x86/kvm/paging_tmpl.h | 3 +- arch/x86/kvm/x86.c | 125 ----------------------------- arch/x86/kvm/x86.h | 7 -- include/linux/kvm.h | 1 + include/linux/kvm_host.h | 6 -- virt/kvm/kvm_main.c | 18 +---- 13 files changed, 11 insertions(+), 225 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 1571c0c83dba..ad1e90dd2780 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -538,17 +538,6 @@ Who: Jan Kiszka ---------------------------- -What: KVM memory aliases support -When: July 2010 -Why: Memory aliasing support is used for speeding up guest vga access - through the vga windows. - - Modern userspace no longer uses this feature, so it's just bitrotted - code and can be removed with no impact. -Who: Avi Kivity - ----------------------------- - What: xtime, wall_to_monotonic When: 2.6.36+ Files: kernel/time/timekeeping.c include/linux/time.h diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index ffba03f55bdf..7e415943a11e 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt @@ -226,17 +226,7 @@ Type: vm ioctl Parameters: struct kvm_memory_alias (in) Returns: 0 (success), -1 (error) -struct kvm_memory_alias { - __u32 slot; /* this has a different namespace than memory slots */ - __u32 flags; - __u64 guest_phys_addr; - __u64 memory_size; - __u64 target_phys_addr; -}; - -Defines a guest physical address space region as an alias to another -region. Useful for aliased address, for example the VGA low memory -window. Should not be used with userspace memory. +This ioctl is obsolete and has been removed. 4.9 KVM_RUN diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 91760e80e268..bd510beb43af 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1946,11 +1946,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return vcpu->arch.timer_fired; } -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b5ebdfbed20b..72a4ad86ee91 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -36,11 +36,6 @@ #define CREATE_TRACE_POINTS #include "trace.h" -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 08a3b35d30be..4fe68650535c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -723,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) { } -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - return gfn; -} - static int __init kvm_s390_init(void) { int ret; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2ec2e27a403e..a57cdeacc4d2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -69,8 +69,6 @@ #define IOPL_SHIFT 12 -#define KVM_ALIAS_SLOTS 4 - #define KVM_PERMILLE_MMU_PAGES 20 #define KVM_MIN_ALLOC_MMU_PAGES 64 #define KVM_MMU_HASH_SHIFT 10 @@ -362,24 +360,7 @@ struct kvm_vcpu_arch { u64 hv_vapic; }; -struct kvm_mem_alias { - gfn_t base_gfn; - unsigned long npages; - gfn_t target_gfn; -#define KVM_ALIAS_INVALID 1UL - unsigned long flags; -}; - -#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION - -struct kvm_mem_aliases { - struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; - int naliases; -}; - struct kvm_arch { - struct kvm_mem_aliases *aliases; - unsigned int n_free_mmu_pages; unsigned int n_requested_mmu_pages; unsigned int n_alloc_mmu_pages; @@ -655,8 +636,6 @@ void kvm_disable_tdp(void); int complete_pio(struct kvm_vcpu *vcpu); bool kvm_check_iopl(struct kvm_vcpu *vcpu); -struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); - static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) { struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8c2f580956d9..c5501bc10106 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -434,9 +434,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) int *write_count; int i; - gfn = unalias_gfn(kvm, gfn); - - slot = gfn_to_memslot_unaliased(kvm, gfn); + slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { write_count = slot_largepage_idx(gfn, slot, i); @@ -450,8 +448,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) int *write_count; int i; - gfn = unalias_gfn(kvm, gfn); - slot = gfn_to_memslot_unaliased(kvm, gfn); + slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { write_count = slot_largepage_idx(gfn, slot, i); @@ -467,8 +464,7 @@ static int has_wrprotected_page(struct kvm *kvm, struct kvm_memory_slot *slot; int *largepage_idx; - gfn = unalias_gfn(kvm, gfn); - slot = gfn_to_memslot_unaliased(kvm, gfn); + slot = gfn_to_memslot(kvm, gfn); if (slot) { largepage_idx = slot_largepage_idx(gfn, slot, level); return *largepage_idx; @@ -521,7 +517,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) /* * Take gfn and return the reverse mapping to it. - * Note: gfn must be unaliased before this function get called */ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) @@ -561,7 +556,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) if (!is_rmap_spte(*spte)) return count; - gfn = unalias_gfn(vcpu->kvm, gfn); sp = page_header(__pa(spte)); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); @@ -698,7 +692,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) u64 *spte; int i, write_protected = 0; - gfn = unalias_gfn(kvm, gfn); rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); spte = rmap_next(kvm, rmapp, NULL); @@ -885,7 +878,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) sp = page_header(__pa(spte)); - gfn = unalias_gfn(vcpu->kvm, gfn); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); @@ -3510,8 +3502,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) if (sp->unsync) continue; - gfn = unalias_gfn(vcpu->kvm, sp->gfn); - slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn); + slot = gfn_to_memslot(vcpu->kvm, sp->gfn); rmapp = &slot->rmap[gfn - slot->base_gfn]; spte = rmap_next(vcpu->kvm, rmapp, NULL); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 863920f649fb..a21a86ef9e20 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -576,7 +576,6 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, * Using the cached information from sp->gfns is safe because: * - The spte has a reference to the struct page, so the pfn for a given gfn * can't change unless all sptes pointing to it are nuked first. - * - Alias changes zap the entire shadow cache. */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) @@ -611,7 +610,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return -EINVAL; gfn = gpte_to_gfn(gpte); - if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] || + if (gfn != sp->gfns[i] || !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { u64 nonpresent; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e60b6c9c0b0..62596d373a49 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2740,115 +2740,6 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) return kvm->arch.n_alloc_mmu_pages; } -gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn) -{ - int i; - struct kvm_mem_alias *alias; - struct kvm_mem_aliases *aliases; - - aliases = kvm_aliases(kvm); - - for (i = 0; i < aliases->naliases; ++i) { - alias = &aliases->aliases[i]; - if (alias->flags & KVM_ALIAS_INVALID) - continue; - if (gfn >= alias->base_gfn - && gfn < alias->base_gfn + alias->npages) - return alias->target_gfn + gfn - alias->base_gfn; - } - return gfn; -} - -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - int i; - struct kvm_mem_alias *alias; - struct kvm_mem_aliases *aliases; - - aliases = kvm_aliases(kvm); - - for (i = 0; i < aliases->naliases; ++i) { - alias = &aliases->aliases[i]; - if (gfn >= alias->base_gfn - && gfn < alias->base_gfn + alias->npages) - return alias->target_gfn + gfn - alias->base_gfn; - } - return gfn; -} - -/* - * Set a new alias region. Aliases map a portion of physical memory into - * another portion. This is useful for memory windows, for example the PC - * VGA region. - */ -static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, - struct kvm_memory_alias *alias) -{ - int r, n; - struct kvm_mem_alias *p; - struct kvm_mem_aliases *aliases, *old_aliases; - - r = -EINVAL; - /* General sanity checks */ - if (alias->memory_size & (PAGE_SIZE - 1)) - goto out; - if (alias->guest_phys_addr & (PAGE_SIZE - 1)) - goto out; - if (alias->slot >= KVM_ALIAS_SLOTS) - goto out; - if (alias->guest_phys_addr + alias->memory_size - < alias->guest_phys_addr) - goto out; - if (alias->target_phys_addr + alias->memory_size - < alias->target_phys_addr) - goto out; - - r = -ENOMEM; - aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); - if (!aliases) - goto out; - - mutex_lock(&kvm->slots_lock); - - /* invalidate any gfn reference in case of deletion/shrinking */ - memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); - aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID; - old_aliases = kvm->arch.aliases; - rcu_assign_pointer(kvm->arch.aliases, aliases); - synchronize_srcu_expedited(&kvm->srcu); - kvm_mmu_zap_all(kvm); - kfree(old_aliases); - - r = -ENOMEM; - aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); - if (!aliases) - goto out_unlock; - - memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); - - p = &aliases->aliases[alias->slot]; - p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; - p->npages = alias->memory_size >> PAGE_SHIFT; - p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; - p->flags &= ~(KVM_ALIAS_INVALID); - - for (n = KVM_ALIAS_SLOTS; n > 0; --n) - if (aliases->aliases[n - 1].npages) - break; - aliases->naliases = n; - - old_aliases = kvm->arch.aliases; - rcu_assign_pointer(kvm->arch.aliases, aliases); - synchronize_srcu_expedited(&kvm->srcu); - kfree(old_aliases); - r = 0; - -out_unlock: - mutex_unlock(&kvm->slots_lock); -out: - return r; -} - static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; @@ -3056,7 +2947,6 @@ long kvm_arch_vm_ioctl(struct file *filp, union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; - struct kvm_memory_alias alias; struct kvm_pit_config pit_config; } u; @@ -3101,14 +2991,6 @@ long kvm_arch_vm_ioctl(struct file *filp, case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; - case KVM_SET_MEMORY_ALIAS: - r = -EFAULT; - if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias))) - goto out; - r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias); - if (r) - goto out; - break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; @@ -5559,12 +5441,6 @@ struct kvm *kvm_arch_create_vm(void) if (!kvm) return ERR_PTR(-ENOMEM); - kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); - if (!kvm->arch.aliases) { - kfree(kvm); - return ERR_PTR(-ENOMEM); - } - INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); @@ -5622,7 +5498,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); cleanup_srcu_struct(&kvm->srcu); - kfree(kvm->arch.aliases); kfree(kvm); } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f4b54458285b..b7a404722d2b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -65,13 +65,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu) return kvm_read_cr0_bits(vcpu, X86_CR0_PG); } -static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm) -{ - return rcu_dereference_check(kvm->arch.aliases, - srcu_read_lock_held(&kvm->srcu) - || lockdep_is_held(&kvm->slots_lock)); -} - void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 6fd40f540a8e..636fc381c897 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -619,6 +619,7 @@ struct kvm_clock_data { */ #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) +/* KVM_SET_MEMORY_ALIAS is obsolete: */ #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2d96555cd4ed..240e460777bc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -286,8 +286,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int user_alloc); void kvm_disable_largepages(void); void kvm_arch_flush_shadow(struct kvm *kvm); -gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); -gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); @@ -564,10 +562,6 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se } #endif -#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION -#define unalias_gfn_instantiation unalias_gfn -#endif - #ifdef CONFIG_HAVE_KVM_IRQCHIP #define KVM_MAX_IRQ_ROUTES 1024 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 84a090644d9d..65417e3d8462 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -841,7 +841,7 @@ int kvm_is_error_hva(unsigned long addr) } EXPORT_SYMBOL_GPL(kvm_is_error_hva); -struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_memslots *slots = kvm_memslots(kvm); @@ -855,20 +855,13 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) } return NULL; } -EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); - -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) -{ - gfn = unalias_gfn(kvm, gfn); - return gfn_to_memslot_unaliased(kvm, gfn); -} +EXPORT_SYMBOL_GPL(gfn_to_memslot); int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) { int i; struct kvm_memslots *slots = kvm_memslots(kvm); - gfn = unalias_gfn_instantiation(kvm, gfn); for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; @@ -913,7 +906,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot = NULL; - gfn = unalias_gfn(kvm, gfn); for (i = 0; i < slots->nmemslots; ++i) { memslot = &slots->memslots[i]; @@ -934,8 +926,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; - gfn = unalias_gfn_instantiation(kvm, gfn); - slot = gfn_to_memslot_unaliased(kvm, gfn); + slot = gfn_to_memslot(kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) return bad_hva(); return gfn_to_hva_memslot(slot, gfn); @@ -1202,8 +1193,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *memslot; - gfn = unalias_gfn(kvm, gfn); - memslot = gfn_to_memslot_unaliased(kvm, gfn); + memslot = gfn_to_memslot(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; -- cgit v1.2.3 From a8eeb04a44dd6dc4c8158953d9bae48849c9a188 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 10 May 2010 12:34:53 +0300 Subject: KVM: Add mini-API for vcpu->requests Makes it a little more readable and hackable. Signed-off-by: Avi Kivity --- arch/x86/kvm/lapic.c | 2 +- arch/x86/kvm/mmu.c | 6 +++--- arch/x86/kvm/svm.c | 2 +- arch/x86/kvm/timer.c | 2 +- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 27 +++++++++++++-------------- include/linux/kvm_host.h | 15 +++++++++++++++ virt/kvm/kvm_main.c | 4 ++-- 8 files changed, 37 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 49573c78c24b..77d8c0f4817d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -534,7 +534,7 @@ static void __report_tpr_access(struct kvm_lapic *apic, bool write) struct kvm_vcpu *vcpu = apic->vcpu; struct kvm_run *run = vcpu->run; - set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); + kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); run->tpr_access.rip = kvm_rip_read(vcpu); run->tpr_access.is_write = write; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c5501bc10106..690a7fc58c17 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1378,7 +1378,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, mmu_page_add_parent_pte(vcpu, sp, parent_pte); if (sp->unsync_children) { - set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_mmu_mark_parents_unsync(sp); } else if (sp->unsync) kvm_mmu_mark_parents_unsync(sp); @@ -2131,7 +2131,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) int ret = 0; if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { - set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); ret = 1; } @@ -2329,7 +2329,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu) void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } static void paging_new_cr3(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f7a6fdcf8ef3..587b99d37d44 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1494,7 +1494,7 @@ static void svm_handle_mce(struct vcpu_svm *svm) */ pr_err("KVM: Guest triggered AMD Erratum 383\n"); - set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); return; } diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c index 564548fbb3d6..e16a0dbe74d8 100644 --- a/arch/x86/kvm/timer.c +++ b/arch/x86/kvm/timer.c @@ -32,7 +32,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer) if (ktimer->reinject || !atomic_read(&ktimer->pending)) { atomic_inc(&ktimer->pending); /* FIXME: this code should not know anything about vcpus */ - set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); + kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); } if (waitqueue_active(q)) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 345a35470511..661c6e199b4a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -899,7 +899,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) unsigned long sysenter_esp; kvm_migrate_timers(vcpu); - set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); local_irq_disable(); list_add(&vmx->local_vcpus_link, &per_cpu(vcpus_on_cpu, cpu)); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9be6e4e5e8ee..7ef44107a14a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -296,7 +296,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ - set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); @@ -948,7 +948,7 @@ static int kvm_request_guest_time_update(struct kvm_vcpu *v) if (!vcpu->time_page) return 0; - set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests); + kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v); return 1; } @@ -2253,7 +2253,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, printk(KERN_DEBUG "kvm: set_mce: " "injects mce exception while " "previous one is in progress!\n"); - set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) @@ -4617,7 +4617,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->run->request_interrupt_window; if (vcpu->requests) - if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); r = kvm_mmu_reload(vcpu); @@ -4625,26 +4625,25 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; if (vcpu->requests) { - if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); - if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) kvm_write_guest_time(vcpu); - if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); - if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); - if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, - &vcpu->requests)) { + if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } - if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { + if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } - if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) { + if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } @@ -4773,7 +4772,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) + if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: @@ -5255,7 +5254,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); ++vcpu->stat.fpu_reload; - set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests); + kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 240e460777bc..c8a9d628898e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -624,5 +624,20 @@ static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, #endif +static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + set_bit(req, &vcpu->requests); +} + +static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu) +{ + return test_and_set_bit(req, &vcpu->requests); +} + +static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) +{ + return test_and_clear_bit(req, &vcpu->requests); +} + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 65417e3d8462..5bd2f34ba576 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -145,7 +145,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) raw_spin_lock(&kvm->requests_lock); me = smp_processor_id(); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_and_set_bit(req, &vcpu->requests)) + if (kvm_make_check_request(req, vcpu)) continue; cpu = vcpu->cpu; if (cpus != NULL && cpu != -1 && cpu != me) @@ -1212,7 +1212,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); if (kvm_arch_vcpu_runnable(vcpu)) { - set_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_make_request(KVM_REQ_UNHALT, vcpu); break; } if (kvm_cpu_has_pending_timer(vcpu)) -- cgit v1.2.3 From 0719837c0832a7b305e42327caa7d330462360ea Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 10 May 2010 13:08:26 +0300 Subject: KVM: Reduce atomic operations on vcpu->requests Usually the vcpu->requests bitmap is sparse, so a test_and_clear_bit() for each request generates a large number of unneeded atomics if a bit is set. Replace with a separate test/clear sequence. This is safe since there is no clear_bit() outside the vcpu thread. Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c8a9d628898e..e820eb579108 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -636,7 +636,12 @@ static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu) static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) { - return test_and_clear_bit(req, &vcpu->requests); + if (test_bit(req, &vcpu->requests)) { + clear_bit(req, &vcpu->requests); + return true; + } else { + return false; + } } #endif -- cgit v1.2.3 From e36d96f7cfaa71870c407131eb4fbd38ea285c01 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 21 Jun 2010 10:56:36 +0300 Subject: KVM: Keep slot ID in memory slot structure May be used for distinguishing between internal and user slots, or for sorting slots in size order. Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e820eb579108..e796326f3646 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -124,6 +124,7 @@ struct kvm_memory_slot { } *lpage_info[KVM_NR_PAGE_SIZES - 1]; unsigned long userspace_addr; int user_alloc; + int id; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5bd2f34ba576..74f731920945 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -570,6 +570,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new = old = *memslot; + new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; -- cgit v1.2.3 From 77a63f3d1e0a3e7ede8d10f569e8481b13ff47c5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 1 Aug 2010 13:40:40 -0400 Subject: NFS: Fix a typo in include/linux/nfs_fs.h nfs_commit_inode() needs to be defined irrespectively of whether or not we are supporting NFSv3 and NFSv4. Allow the compiler to optimise away code in the NFSv2-only case by converting it into an inlined stub function. Reported-and-tested-by: Ingo Molnar Signed-off-by: Trond Myklebust Signed-off-by: Linus Torvalds --- fs/nfs/write.c | 5 ----- include/linux/nfs_fs.h | 6 ++++++ 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bb72ad34d51d..9f81bdd91c55 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1454,11 +1454,6 @@ out_mark_dirty: return ret; } #else -int nfs_commit_inode(struct inode *inode, int how) -{ - return 0; -} - static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) { return 0; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f6e2455f13d1..bad4d121b16e 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -496,6 +496,12 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_write_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_write_data *wdata); +#else +static inline int +nfs_commit_inode(struct inode *inode, int how) +{ + return 0; +} #endif static inline int -- cgit v1.2.3 From 5689cc53fa9d09b5bf41b1b1a7c90bd6c112ab40 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 1 Jul 2010 16:00:12 +0200 Subject: KVM: Use u64 for frame data types For 32bit machines where the physical address width is larger than the virtual address width the frame number types in KVM may overflow. Fix this by changing them to u64. [sfr: fix build on 32-bit ppc] Signed-off-by: Joerg Roedel Signed-off-by: Stephen Rothwell Signed-off-by: Marcelo Tosatti --- arch/powerpc/kvm/44x_tlb.c | 3 ++- include/linux/kvm_types.h | 4 ++-- virt/kvm/iommu.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 812312542e50..9b9b5cdea840 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -316,7 +316,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, gfn = gpaddr >> PAGE_SHIFT; new_page = gfn_to_page(vcpu->kvm, gfn); if (is_error_page(new_page)) { - printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); + printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", + (unsigned long long)gfn); kvm_release_page_clean(new_page); return; } diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fb46efbeabec..7ac0d4eee430 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -32,11 +32,11 @@ typedef unsigned long gva_t; typedef u64 gpa_t; -typedef unsigned long gfn_t; +typedef u64 gfn_t; typedef unsigned long hva_t; typedef u64 hpa_t; -typedef unsigned long hfn_t; +typedef u64 hfn_t; typedef hfn_t pfn_t; diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 779559552ce7..62a9caf0563c 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -108,7 +108,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) get_order(page_size), flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" - "iommu failed to map pfn=%lx\n", pfn); + "iommu failed to map pfn=%llx\n", pfn); goto unmap_pages; } -- cgit v1.2.3 From edba23e51578f7cb6781461568489fc1825db4ac Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Wed, 7 Jul 2010 20:16:45 +0300 Subject: KVM: Return EFAULT from kvm ioctl when guest accesses bad area Currently if guest access address that belongs to memory slot but is not backed up by page or page is read only KVM treats it like MMIO access. Remove that capability. It was never part of the interface and should not be relied upon. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 4 +++- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 28 ++++++++++++++++++++++++---- 3 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d8d48329cb82..89d7a2cae53b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2078,7 +2078,9 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) if (is_hwpoison_pfn(pfn)) { kvm_send_hwpoison_signal(kvm, gfn); return 0; - } + } else if (is_fault_pfn(pfn)) + return -EFAULT; + return 1; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e796326f3646..8055067b6bec 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -269,6 +269,7 @@ extern pfn_t bad_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); int is_hwpoison_pfn(pfn_t pfn); +int is_fault_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 630d1224f187..b78b794c1039 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -96,6 +96,9 @@ static bool largepages_enabled = true; static struct page *hwpoison_page; static pfn_t hwpoison_pfn; +static struct page *fault_page; +static pfn_t fault_pfn; + inline int kvm_is_mmio_pfn(pfn_t pfn) { if (pfn_valid(pfn)) { @@ -815,13 +818,13 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages); int is_error_page(struct page *page) { - return page == bad_page || page == hwpoison_page; + return page == bad_page || page == hwpoison_page || page == fault_page; } EXPORT_SYMBOL_GPL(is_error_page); int is_error_pfn(pfn_t pfn) { - return pfn == bad_pfn || pfn == hwpoison_pfn; + return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn; } EXPORT_SYMBOL_GPL(is_error_pfn); @@ -831,6 +834,12 @@ int is_hwpoison_pfn(pfn_t pfn) } EXPORT_SYMBOL_GPL(is_hwpoison_pfn); +int is_fault_pfn(pfn_t pfn) +{ + return pfn == fault_pfn; +} +EXPORT_SYMBOL_GPL(is_fault_pfn); + static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -959,8 +968,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr) if (vma == NULL || addr < vma->vm_start || !(vma->vm_flags & VM_PFNMAP)) { up_read(¤t->mm->mmap_sem); - get_page(bad_page); - return page_to_pfn(bad_page); + get_page(fault_page); + return page_to_pfn(fault_page); } pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; @@ -2226,6 +2235,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, hwpoison_pfn = page_to_pfn(hwpoison_page); + fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + + if (fault_page == NULL) { + r = -ENOMEM; + goto out_free_0; + } + + fault_pfn = page_to_pfn(fault_page); + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; @@ -2298,6 +2316,8 @@ out_free_1: out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: + if (fault_page) + __free_page(fault_page); if (hwpoison_page) __free_page(hwpoison_page); __free_page(bad_page); -- cgit v1.2.3 From 4a994358b919c3b14de61be5e30d9edc9089ba3f Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 11 Jul 2010 15:32:23 +0300 Subject: KVM: Convert mask notifiers to use irqchip/pin instead of gsi Devices register mask notifier using gsi, but irqchip knows about irqchip/pin, so conversion from irqchip/pin to gsi should be done before looking for mask notifier to call. Signed-off-by: Gleb Natapov Signed-off-by: Marcelo Tosatti --- include/linux/kvm_host.h | 3 ++- virt/kvm/ioapic.c | 2 +- virt/kvm/irq_comm.c | 12 ++++++++---- 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8055067b6bec..c13cc48697aa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -447,7 +447,8 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn); void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn); -void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); +void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, + bool mask); #ifdef __KVM_HAVE_IOAPIC void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 1149c60b198f..0b9df8303dcf 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -152,7 +152,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) update_handled_vectors(ioapic); mask_after = e->fields.mask; if (mask_before != mask_after) - kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); + kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 06cf61e729d2..369e38010ad5 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -279,15 +279,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, synchronize_rcu(); } -void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) +void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, + bool mask) { struct kvm_irq_mask_notifier *kimn; struct hlist_node *n; + int gsi; rcu_read_lock(); - hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) - if (kimn->irq == irq) - kimn->func(kimn, mask); + gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; + if (gsi != -1) + hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) + if (kimn->irq == gsi) + kimn->func(kimn, mask); rcu_read_unlock(); } -- cgit v1.2.3 From ea0d3ab239fba48d6e998b19c28d78f765963007 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 2 Jun 2010 13:24:43 +0900 Subject: LSM: Remove unused arguments from security_path_truncate(). When commit be6d3e56a6b9b3a4ee44a0685e39e595073c6f0d "introduce new LSM hooks where vfsmount is available." was proposed, regarding security_path_truncate(), only "struct file *" argument (which AppArmor wanted to use) was removed. But length and time_attrs arguments are not used by TOMOYO nor AppArmor. Thus, let's remove these arguments. Signed-off-by: Tetsuo Handa Acked-by: Nick Piggin Signed-off-by: James Morris --- fs/namei.c | 3 +-- fs/open.c | 5 ++--- include/linux/security.h | 11 +++-------- security/capability.c | 3 +-- security/security.c | 5 ++--- security/tomoyo/tomoyo.c | 3 +-- 6 files changed, 10 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/fs/namei.c b/fs/namei.c index 868d0cb9d473..fe34c2b879f4 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1484,8 +1484,7 @@ static int handle_truncate(struct path *path) */ error = locks_verify_locked(inode); if (!error) - error = security_path_truncate(path, 0, - ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); + error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, diff --git a/fs/open.c b/fs/open.c index 5463266db9e6..a54ed85209c1 100644 --- a/fs/open.c +++ b/fs/open.c @@ -110,7 +110,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) error = locks_verify_truncate(inode, NULL, length); if (!error) - error = security_path_truncate(&path, length, 0); + error = security_path_truncate(&path); if (!error) error = do_truncate(path.dentry, length, 0, NULL); @@ -165,8 +165,7 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) error = locks_verify_truncate(inode, file, length); if (!error) - error = security_path_truncate(&file->f_path, length, - ATTR_MTIME|ATTR_CTIME); + error = security_path_truncate(&file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); out_putf: diff --git a/include/linux/security.h b/include/linux/security.h index 0c8819170463..723a93df756a 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -470,8 +470,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @path_truncate: * Check permission before truncating a file. * @path contains the path structure for the file. - * @length is the new length of the file. - * @time_attrs is the flags passed to do_truncate(). * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. @@ -1412,8 +1410,7 @@ struct security_operations { int (*path_rmdir) (struct path *dir, struct dentry *dentry); int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, unsigned int dev); - int (*path_truncate) (struct path *path, loff_t length, - unsigned int time_attrs); + int (*path_truncate) (struct path *path); int (*path_symlink) (struct path *dir, struct dentry *dentry, const char *old_name); int (*path_link) (struct dentry *old_dentry, struct path *new_dir, @@ -2806,8 +2803,7 @@ int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); int security_path_rmdir(struct path *dir, struct dentry *dentry); int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, unsigned int dev); -int security_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs); +int security_path_truncate(struct path *path); int security_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name); int security_path_link(struct dentry *old_dentry, struct path *new_dir, @@ -2841,8 +2837,7 @@ static inline int security_path_mknod(struct path *dir, struct dentry *dentry, return 0; } -static inline int security_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs) +static inline int security_path_truncate(struct path *path) { return 0; } diff --git a/security/capability.c b/security/capability.c index 8168e3ecd5bf..4aeb699da1b3 100644 --- a/security/capability.c +++ b/security/capability.c @@ -268,8 +268,7 @@ static int cap_path_rename(struct path *old_path, struct dentry *old_dentry, return 0; } -static int cap_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs) +static int cap_path_truncate(struct path *path) { return 0; } diff --git a/security/security.c b/security/security.c index 351942a4ca0e..e8c87b8601b4 100644 --- a/security/security.c +++ b/security/security.c @@ -417,12 +417,11 @@ int security_path_rename(struct path *old_dir, struct dentry *old_dentry, new_dentry); } -int security_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs) +int security_path_truncate(struct path *path) { if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; - return security_ops->path_truncate(path, length, time_attrs); + return security_ops->path_truncate(path); } int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 57d442e7339b..7be732cadd47 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -93,8 +93,7 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) return tomoyo_check_open_permission(domain, &bprm->file->f_path, O_RDONLY); } -static int tomoyo_path_truncate(struct path *path, loff_t length, - unsigned int time_attrs) +static int tomoyo_path_truncate(struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path); } -- cgit v1.2.3 From af4f136056c984b0aa67feed7d3170b958370b2f Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Thu, 1 Jul 2010 15:07:43 -0400 Subject: security: move LSM xattrnames to xattr.h Make the security extended attributes names global. Updated to move the remaining Smack xattrs. Signed-off-by: Mimi Zohar Acked-by: Serge Hallyn Signed-off-by: James Morris --- include/linux/capability.h | 3 --- include/linux/xattr.h | 14 ++++++++++++++ security/selinux/hooks.c | 3 --- security/smack/smack.h | 10 ---------- 4 files changed, 14 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/capability.h b/include/linux/capability.h index 39e5ff512fbe..90012b9ddbf3 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -49,9 +49,6 @@ typedef struct __user_cap_data_struct { } __user *cap_user_data_t; -#define XATTR_CAPS_SUFFIX "capability" -#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX - #define VFS_CAP_REVISION_MASK 0xFF000000 #define VFS_CAP_REVISION_SHIFT 24 #define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 0cfa1e9c4cc1..f1e5bde4b35a 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -33,6 +33,20 @@ #define XATTR_USER_PREFIX "user." #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) +/* Security namespace */ +#define XATTR_SELINUX_SUFFIX "selinux" +#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX + +#define XATTR_SMACK_SUFFIX "SMACK64" +#define XATTR_SMACK_IPIN "SMACK64IPIN" +#define XATTR_SMACK_IPOUT "SMACK64IPOUT" +#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX +#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN +#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT + +#define XATTR_CAPS_SUFFIX "capability" +#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX + struct inode; struct dentry; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 0f524b7d102e..85338f0c0481 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -87,9 +87,6 @@ #include "netlabel.h" #include "audit.h" -#define XATTR_SELINUX_SUFFIX "selinux" -#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX - #define NUM_SEL_MNT_OPTS 5 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); diff --git a/security/smack/smack.h b/security/smack/smack.h index c6e9acae72e4..43ae747a5aa4 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -123,16 +123,6 @@ struct smack_known { #define SMK_FSHAT "smackfshat=" #define SMK_FSROOT "smackfsroot=" -/* - * xattr names - */ -#define XATTR_SMACK_SUFFIX "SMACK64" -#define XATTR_SMACK_IPIN "SMACK64IPIN" -#define XATTR_SMACK_IPOUT "SMACK64IPOUT" -#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX -#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN -#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT - #define SMACK_CIPSO_OPTION "-CIPSO" /* -- cgit v1.2.3 From 9cfcac810e8993fa7a5bfd24b1a21f1dbbb03a7b Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 23 Jul 2010 11:43:51 -0400 Subject: vfs: re-introduce MAY_CHDIR Currently MAY_ACCESS means that filesystems must check the permissions right then and not rely on cached results or the results of future operations on the object. This can be because of a call to sys_access() or because of a call to chdir() which needs to check search without relying on any future operations inside that dir. I plan to use MAY_ACCESS for other purposes in the security system, so I split the MAY_ACCESS and the MAY_CHDIR cases. Signed-off-by: Eric Paris Acked-by: Stephen D. Smalley Signed-off-by: James Morris --- fs/fuse/dir.c | 2 +- fs/nfs/dir.c | 2 +- fs/open.c | 6 +++--- include/linux/fs.h | 1 + 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 3cdc5f78a406..431be0795b6b 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1016,7 +1016,7 @@ static int fuse_permission(struct inode *inode, int mask) exist. So if permissions are revoked this won't be noticed immediately, only after the attribute timeout has expired */ - } else if (mask & MAY_ACCESS) { + } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { err = fuse_access(inode, mask); } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { if (!(inode->i_mode & S_IXUGO)) { diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e60416d3f818..832e9e239324 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1953,7 +1953,7 @@ int nfs_permission(struct inode *inode, int mask) if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) goto out; /* Is this sys_access() ? */ - if (mask & MAY_ACCESS) + if (mask & (MAY_ACCESS | MAY_CHDIR)) goto force_lookup; switch (inode->i_mode & S_IFMT) { diff --git a/fs/open.c b/fs/open.c index a54ed85209c1..0d1fa3dc0efb 100644 --- a/fs/open.c +++ b/fs/open.c @@ -366,7 +366,7 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename) if (error) goto out; - error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS); + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; @@ -395,7 +395,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd) if (!S_ISDIR(inode->i_mode)) goto out_putf; - error = inode_permission(inode, MAY_EXEC | MAY_ACCESS); + error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); if (!error) set_fs_pwd(current->fs, &file->f_path); out_putf: @@ -413,7 +413,7 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename) if (error) goto out; - error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS); + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; diff --git a/include/linux/fs.h b/include/linux/fs.h index 68ca1b0491af..7d94b72f0346 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -53,6 +53,7 @@ struct inodes_stat_t { #define MAY_APPEND 8 #define MAY_ACCESS 16 #define MAY_OPEN 32 +#define MAY_CHDIR 64 /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond -- cgit v1.2.3 From b782e0a68d17894d9a618ffea55b33639faa6bb4 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 23 Jul 2010 11:44:03 -0400 Subject: SELinux: special dontaudit for access checks Currently there are a number of applications (nautilus being the main one) which calls access() on files in order to determine how they should be displayed. It is normal and expected that nautilus will want to see if files are executable or if they are really read/write-able. access() should return the real permission. SELinux policy checks are done in access() and can result in lots of AVC denials as policy denies RWX on files which DAC allows. Currently SELinux must dontaudit actual attempts to read/write/execute a file in order to silence these messages (and not flood the logs.) But dontaudit rules like that can hide real attacks. This patch addes a new common file permission audit_access. This permission is special in that it is meaningless and should never show up in an allow rule. Instead the only place this permission has meaning is in a dontaudit rule like so: dontaudit nautilus_t sbin_t:file audit_access With such a rule if nautilus just checks access() we will still get denied and thus userspace will still get the correct answer but we will not log the denial. If nautilus attempted to actually perform one of the forbidden actions (rather than just querying access(2) about it) we would still log a denial. This type of dontaudit rule should be used sparingly, as it could be a method for an attacker to probe the system permissions without detection. Signed-off-by: Eric Paris Acked-by: Stephen D. Smalley Signed-off-by: James Morris --- include/linux/lsm_audit.h | 5 +++++ security/selinux/avc.c | 24 ++++++++++++++++++++++-- security/selinux/hooks.c | 20 +++++++++++++++----- security/selinux/include/classmap.h | 2 +- 4 files changed, 43 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 6907251d5200..788f0ab937aa 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -90,6 +90,11 @@ struct common_audit_data { u32 requested; u32 audited; u32 denied; + /* + * auditdeny is a bit tricky and unintuitive. See the + * comments in avc.c for it's meaning and usage. + */ + u32 auditdeny; struct av_decision *avd; int result; } selinux_audit_data; diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 3662b0f15ec5..9da6420e2056 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -488,9 +488,29 @@ void avc_audit(u32 ssid, u32 tsid, struct common_audit_data stack_data; u32 denied, audited; denied = requested & ~avd->allowed; - if (denied) + if (denied) { audited = denied & avd->auditdeny; - else if (result) + /* + * a->selinux_audit_data.auditdeny is TRICKY! Setting a bit in + * this field means that ANY denials should NOT be audited if + * the policy contains an explicit dontaudit rule for that + * permission. Take notice that this is unrelated to the + * actual permissions that were denied. As an example lets + * assume: + * + * denied == READ + * avd.auditdeny & ACCESS == 0 (not set means explicit rule) + * selinux_audit_data.auditdeny & ACCESS == 1 + * + * We will NOT audit the denial even though the denied + * permission was READ and the auditdeny checks were for + * ACCESS + */ + if (a && + a->selinux_audit_data.auditdeny && + !(a->selinux_audit_data.auditdeny & avd->auditdeny)) + audited = 0; + } else if (result) audited = denied = requested; else audited = requested & avd->auditallow; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 0c98846f188d..650947a72a2b 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2644,16 +2644,26 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na static int selinux_inode_permission(struct inode *inode, int mask) { const struct cred *cred = current_cred(); + struct common_audit_data ad; + u32 perms; + bool from_access; + from_access = mask & MAY_ACCESS; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); - if (!mask) { - /* No permission to check. Existence test. */ + /* No permission to check. Existence test. */ + if (!mask) return 0; - } - return inode_has_perm(cred, inode, - file_mask_to_av(inode->i_mode, mask), NULL); + COMMON_AUDIT_DATA_INIT(&ad, FS); + ad.u.fs.inode = inode; + + if (from_access) + ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS; + + perms = file_mask_to_av(inode->i_mode, mask); + + return inode_has_perm(cred, inode, perms, &ad); } static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 8b32e959bb2e..d64603e10dbe 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -2,7 +2,7 @@ "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append" #define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \ - "rename", "execute", "swapon", "quotaon", "mounton" + "rename", "execute", "swapon", "quotaon", "mounton", "audit_access" #define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \ "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \ -- cgit v1.2.3 From 67012e8209df95a8290d135753ff5145431a666e Mon Sep 17 00:00:00 2001 From: John Johansen Date: Thu, 29 Jul 2010 14:47:58 -0700 Subject: AppArmor: basic auditing infrastructure. Update lsm_audit for AppArmor specific data, and add the core routines for AppArmor uses for auditing. Signed-off-by: John Johansen Signed-off-by: James Morris --- include/linux/lsm_audit.h | 27 +++++ security/apparmor/audit.c | 215 ++++++++++++++++++++++++++++++++++++++ security/apparmor/include/audit.h | 123 ++++++++++++++++++++++ 3 files changed, 365 insertions(+) create mode 100644 security/apparmor/audit.c create mode 100644 security/apparmor/include/audit.h (limited to 'include') diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 788f0ab937aa..112a55033352 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -98,6 +98,33 @@ struct common_audit_data { struct av_decision *avd; int result; } selinux_audit_data; +#endif +#ifdef CONFIG_SECURITY_APPARMOR + struct { + int error; + int op; + int type; + void *profile; + const char *name; + const char *info; + union { + void *target; + struct { + long pos; + void *target; + } iface; + struct { + int rlim; + unsigned long max; + } rlim; + struct { + const char *target; + u32 request; + u32 denied; + uid_t ouid; + } fs; + }; + } apparmor_audit_data; #endif }; /* these callback will be implemented by a specific LSM */ diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c new file mode 100644 index 000000000000..96502b22b268 --- /dev/null +++ b/security/apparmor/audit.c @@ -0,0 +1,215 @@ +/* + * AppArmor security module + * + * This file contains AppArmor auditing functions + * + * Copyright (C) 1998-2008 Novell/SUSE + * Copyright 2009-2010 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include +#include + +#include "include/apparmor.h" +#include "include/audit.h" +#include "include/policy.h" + +const char *op_table[] = { + "null", + + "sysctl", + "capable", + + "unlink", + "mkdir", + "rmdir", + "mknod", + "truncate", + "link", + "symlink", + "rename_src", + "rename_dest", + "chmod", + "chown", + "getattr", + "open", + + "file_perm", + "file_lock", + "file_mmap", + "file_mprotect", + + "create", + "post_create", + "bind", + "connect", + "listen", + "accept", + "sendmsg", + "recvmsg", + "getsockname", + "getpeername", + "getsockopt", + "setsockopt", + "socket_shutdown", + + "ptrace", + + "exec", + "change_hat", + "change_profile", + "change_onexec", + + "setprocattr", + "setrlimit", + + "profile_replace", + "profile_load", + "profile_remove" +}; + +const char *audit_mode_names[] = { + "normal", + "quiet_denied", + "quiet", + "noquiet", + "all" +}; + +static char *aa_audit_type[] = { + "AUDIT", + "ALLOWED", + "DENIED", + "HINT", + "STATUS", + "ERROR", + "KILLED" +}; + +/* + * Currently AppArmor auditing is fed straight into the audit framework. + * + * TODO: + * netlink interface for complain mode + * user auditing, - send user auditing to netlink interface + * system control of whether user audit messages go to system log + */ + +/** + * audit_base - core AppArmor function. + * @ab: audit buffer to fill (NOT NULL) + * @ca: audit structure containing data to audit (NOT NULL) + * + * Record common AppArmor audit data from @sa + */ +static void audit_pre(struct audit_buffer *ab, void *ca) +{ + struct common_audit_data *sa = ca; + struct task_struct *tsk = sa->tsk ? sa->tsk : current; + + if (aa_g_audit_header) { + audit_log_format(ab, "apparmor="); + audit_log_string(ab, aa_audit_type[sa->aad.type]); + } + + if (sa->aad.op) { + audit_log_format(ab, " operation="); + audit_log_string(ab, op_table[sa->aad.op]); + } + + if (sa->aad.info) { + audit_log_format(ab, " info="); + audit_log_string(ab, sa->aad.info); + if (sa->aad.error) + audit_log_format(ab, " error=%d", sa->aad.error); + } + + if (sa->aad.profile) { + struct aa_profile *profile = sa->aad.profile; + pid_t pid; + rcu_read_lock(); + pid = tsk->real_parent->pid; + rcu_read_unlock(); + audit_log_format(ab, " parent=%d", pid); + if (profile->ns != root_ns) { + audit_log_format(ab, " namespace="); + audit_log_untrustedstring(ab, profile->ns->base.hname); + } + audit_log_format(ab, " profile="); + audit_log_untrustedstring(ab, profile->base.hname); + } + + if (sa->aad.name) { + audit_log_format(ab, " name="); + audit_log_untrustedstring(ab, sa->aad.name); + } +} + +/** + * aa_audit_msg - Log a message to the audit subsystem + * @sa: audit event structure (NOT NULL) + * @cb: optional callback fn for type specific fields (MAYBE NULL) + */ +void aa_audit_msg(int type, struct common_audit_data *sa, + void (*cb) (struct audit_buffer *, void *)) +{ + sa->aad.type = type; + sa->lsm_pre_audit = audit_pre; + sa->lsm_post_audit = cb; + common_lsm_audit(sa); +} + +/** + * aa_audit - Log a profile based audit event to the audit subsystem + * @type: audit type for the message + * @profile: profile to check against (NOT NULL) + * @gfp: allocation flags to use + * @sa: audit event (NOT NULL) + * @cb: optional callback fn for type specific fields (MAYBE NULL) + * + * Handle default message switching based off of audit mode flags + * + * Returns: error on failure + */ +int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, + struct common_audit_data *sa, + void (*cb) (struct audit_buffer *, void *)) +{ + BUG_ON(!profile); + + if (type == AUDIT_APPARMOR_AUTO) { + if (likely(!sa->aad.error)) { + if (AUDIT_MODE(profile) != AUDIT_ALL) + return 0; + type = AUDIT_APPARMOR_AUDIT; + } else if (COMPLAIN_MODE(profile)) + type = AUDIT_APPARMOR_ALLOWED; + else + type = AUDIT_APPARMOR_DENIED; + } + if (AUDIT_MODE(profile) == AUDIT_QUIET || + (type == AUDIT_APPARMOR_DENIED && + AUDIT_MODE(profile) == AUDIT_QUIET)) + return sa->aad.error; + + if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) + type = AUDIT_APPARMOR_KILL; + + if (!unconfined(profile)) + sa->aad.profile = profile; + + aa_audit_msg(type, sa, cb); + + if (sa->aad.type == AUDIT_APPARMOR_KILL) + (void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current); + + if (sa->aad.type == AUDIT_APPARMOR_ALLOWED) + return complain_error(sa->aad.error); + + return sa->aad.error; +} diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h new file mode 100644 index 000000000000..1951786d32e9 --- /dev/null +++ b/security/apparmor/include/audit.h @@ -0,0 +1,123 @@ +/* + * AppArmor security module + * + * This file contains AppArmor auditing function definitions. + * + * Copyright (C) 1998-2008 Novell/SUSE + * Copyright 2009-2010 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#ifndef __AA_AUDIT_H +#define __AA_AUDIT_H + +#include +#include +#include +#include +#include + +#include "file.h" + +struct aa_profile; + +extern const char *audit_mode_names[]; +#define AUDIT_MAX_INDEX 5 + +#define AUDIT_APPARMOR_AUTO 0 /* auto choose audit message type */ + +enum audit_mode { + AUDIT_NORMAL, /* follow normal auditing of accesses */ + AUDIT_QUIET_DENIED, /* quiet all denied access messages */ + AUDIT_QUIET, /* quiet all messages */ + AUDIT_NOQUIET, /* do not quiet audit messages */ + AUDIT_ALL /* audit all accesses */ +}; + +enum audit_type { + AUDIT_APPARMOR_AUDIT, + AUDIT_APPARMOR_ALLOWED, + AUDIT_APPARMOR_DENIED, + AUDIT_APPARMOR_HINT, + AUDIT_APPARMOR_STATUS, + AUDIT_APPARMOR_ERROR, + AUDIT_APPARMOR_KILL +}; + +extern const char *op_table[]; +enum aa_ops { + OP_NULL, + + OP_SYSCTL, + OP_CAPABLE, + + OP_UNLINK, + OP_MKDIR, + OP_RMDIR, + OP_MKNOD, + OP_TRUNC, + OP_LINK, + OP_SYMLINK, + OP_RENAME_SRC, + OP_RENAME_DEST, + OP_CHMOD, + OP_CHOWN, + OP_GETATTR, + OP_OPEN, + + OP_FPERM, + OP_FLOCK, + OP_FMMAP, + OP_FMPROT, + + OP_CREATE, + OP_POST_CREATE, + OP_BIND, + OP_CONNECT, + OP_LISTEN, + OP_ACCEPT, + OP_SENDMSG, + OP_RECVMSG, + OP_GETSOCKNAME, + OP_GETPEERNAME, + OP_GETSOCKOPT, + OP_SETSOCKOPT, + OP_SOCK_SHUTDOWN, + + OP_PTRACE, + + OP_EXEC, + OP_CHANGE_HAT, + OP_CHANGE_PROFILE, + OP_CHANGE_ONEXEC, + + OP_SETPROCATTR, + OP_SETRLIMIT, + + OP_PROF_REPL, + OP_PROF_LOAD, + OP_PROF_RM, +}; + + +/* define a short hand for apparmor_audit_data portion of common_audit_data */ +#define aad apparmor_audit_data + +void aa_audit_msg(int type, struct common_audit_data *sa, + void (*cb) (struct audit_buffer *, void *)); +int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, + struct common_audit_data *sa, + void (*cb) (struct audit_buffer *, void *)); + +static inline int complain_error(int error) +{ + if (error == -EPERM || error == -EACCES) + return 0; + return error; +} + +#endif /* __AA_AUDIT_H */ -- cgit v1.2.3 From b126468e08d92aaeffa58ef04d70e417241dadc1 Mon Sep 17 00:00:00 2001 From: Fang Wenqi Date: Tue, 1 Jun 2010 02:43:06 +0000 Subject: virtio_9p.h needs Found with makes headers_check: include/linux/virtio_9p.h:15: found __[us]{8,16,32,64} type without #include Signed-off-by: Fang Wenqi Signed-off-by: Eric Van Hensbergen --- include/linux/virtio_9p.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 5cf11765146b..395c38a47adb 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -4,6 +4,7 @@ * compatible drivers/servers. */ #include #include +#include /* The feature bitmap for virtio 9P */ -- cgit v1.2.3 From 0204fe2a20da12ddae1b564712ceeebc55214f97 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 1 Jun 2010 17:30:35 -0300 Subject: V4L/DVB: IR: add RC6 keymap for Windows Media Center Ed. remotes This is the RC6 keymap for the Windows Media Center Edition remotes that come bundled with MCE/eHome Infrared Remote transceivers. Tested with 3 different variants of the remote, but its possible there are still some additional keys missing, but its simple enough to add them in later... This patch also adds an IR_TYPE_ALL convenience macro to make life easier for receivers that support all IR protocols. v2: fix an erroneous comment that referred to imon devices Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/keymaps/Makefile | 1 + drivers/media/IR/keymaps/rc-rc6-mce.c | 105 ++++++++++++++++++++++++++++++++++ include/media/rc-map.h | 4 ++ 3 files changed, 110 insertions(+) create mode 100644 drivers/media/IR/keymaps/rc-rc6-mce.c (limited to 'include') diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index aea649fbcf5a..c3def729d759 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-pv951.o \ rc-rc5-hauppauge-new.o \ rc-rc5-tv.o \ + rc-rc6-mce.o \ rc-real-audio-220-32-keys.o \ rc-tbs-nec.o \ rc-terratec-cinergy-xs.o \ diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c new file mode 100644 index 000000000000..c6726a8039be --- /dev/null +++ b/drivers/media/IR/keymaps/rc-rc6-mce.c @@ -0,0 +1,105 @@ +/* rc-rc6-mce.c - Keytable for Windows Media Center RC-6 remotes for use + * with the Media Center Edition eHome Infrared Transceiver. + * + * Copyright (c) 2010 by Jarod Wilson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +static struct ir_scancode rc6_mce[] = { + { 0x800f0415, KEY_REWIND }, + { 0x800f0414, KEY_FASTFORWARD }, + { 0x800f041b, KEY_PREVIOUS }, + { 0x800f041a, KEY_NEXT }, + + { 0x800f0416, KEY_PLAY }, + { 0x800f0418, KEY_PAUSE }, + { 0x800f0419, KEY_STOP }, + { 0x800f0417, KEY_RECORD }, + + { 0x800f041e, KEY_UP }, + { 0x800f041f, KEY_DOWN }, + { 0x800f0420, KEY_LEFT }, + { 0x800f0421, KEY_RIGHT }, + + { 0x800f040b, KEY_ENTER }, + { 0x800f0422, KEY_OK }, + { 0x800f0423, KEY_EXIT }, + { 0x800f040a, KEY_DELETE }, + + { 0x800f040e, KEY_MUTE }, + { 0x800f0410, KEY_VOLUMEUP }, + { 0x800f0411, KEY_VOLUMEDOWN }, + { 0x800f0412, KEY_CHANNELUP }, + { 0x800f0413, KEY_CHANNELDOWN }, + + { 0x800f0401, KEY_NUMERIC_1 }, + { 0x800f0402, KEY_NUMERIC_2 }, + { 0x800f0403, KEY_NUMERIC_3 }, + { 0x800f0404, KEY_NUMERIC_4 }, + { 0x800f0405, KEY_NUMERIC_5 }, + { 0x800f0406, KEY_NUMERIC_6 }, + { 0x800f0407, KEY_NUMERIC_7 }, + { 0x800f0408, KEY_NUMERIC_8 }, + { 0x800f0409, KEY_NUMERIC_9 }, + { 0x800f0400, KEY_NUMERIC_0 }, + + { 0x800f041d, KEY_NUMERIC_STAR }, + { 0x800f041c, KEY_NUMERIC_POUND }, + + { 0x800f0446, KEY_TV }, + { 0x800f0447, KEY_AUDIO }, /* My Music */ + { 0x800f0448, KEY_PVR }, /* RecordedTV */ + { 0x800f0449, KEY_CAMERA }, + { 0x800f044a, KEY_VIDEO }, + { 0x800f0424, KEY_DVD }, + { 0x800f0425, KEY_TUNER }, /* LiveTV */ + { 0x800f0450, KEY_RADIO }, + + { 0x800f044c, KEY_LANGUAGE }, + { 0x800f0427, KEY_ZOOM }, /* Aspect */ + + { 0x800f045b, KEY_RED }, + { 0x800f045c, KEY_GREEN }, + { 0x800f045d, KEY_YELLOW }, + { 0x800f045e, KEY_BLUE }, + + { 0x800f040f, KEY_INFO }, + { 0x800f0426, KEY_EPG }, /* Guide */ + { 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */ + { 0x800f044d, KEY_TITLE }, + + { 0x800f040c, KEY_POWER }, + { 0x800f040d, KEY_PROG1 }, /* Windows MCE button */ + +}; + +static struct rc_keymap rc6_mce_map = { + .map = { + .scan = rc6_mce, + .size = ARRAY_SIZE(rc6_mce), + .ir_type = IR_TYPE_RC6, + .name = RC_MAP_RC6_MCE, + } +}; + +static int __init init_rc_map_rc6_mce(void) +{ + return ir_register_map(&rc6_mce_map); +} + +static void __exit exit_rc_map_rc6_mce(void) +{ + ir_unregister_map(&rc6_mce_map); +} + +module_init(init_rc_map_rc6_mce) +module_exit(exit_rc_map_rc6_mce) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jarod Wilson "); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index c78e99a435b6..36ee280d42a9 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -19,6 +19,9 @@ #define IR_TYPE_SONY (1 << 4) /* Sony12/15/20 protocol */ #define IR_TYPE_OTHER (1u << 31) +#define IR_TYPE_ALL (IR_TYPE_RC5 | IR_TYPE_NEC | IR_TYPE_RC6 | \ + IR_TYPE_JVC | IR_TYPE_SONY | IR_TYPE_OTHER) + struct ir_scancode { u32 scancode; u32 keycode; @@ -107,6 +110,7 @@ void rc_map_init(void); #define RC_MAP_PV951 "rc-pv951" #define RC_MAP_RC5_HAUPPAUGE_NEW "rc-rc5-hauppauge-new" #define RC_MAP_RC5_TV "rc-rc5-tv" +#define RC_MAP_RC6_MCE "rc-rc6-mce" #define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys" #define RC_MAP_TBS_NEC "rc-tbs-nec" #define RC_MAP_TERRATEC_CINERGY_XS "rc-terratec-cinergy-xs" -- cgit v1.2.3 From 0dc50942d6f23989ffb3024aa2271941ec44aea8 Mon Sep 17 00:00:00 2001 From: David Härdeman Date: Mon, 7 Jun 2010 16:32:33 -0300 Subject: V4L/DVB: ir-core: partially convert ir-kbd-i2c.c to not use ir-functions.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Partially convert drivers/media/video/ir-kbd-i2c.c to not use ir-functions.c Signed-off-by: David Härdeman Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/ir-kbd-i2c.c | 14 ++++---------- include/media/ir-kbd-i2c.h | 2 +- 2 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 29d439742653..27ae8bbfb477 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c @@ -47,7 +47,7 @@ #include #include -#include +#include #include /* ----------------------------------------------------------------------- */ @@ -272,11 +272,8 @@ static void ir_key_poll(struct IR_i2c *ir) return; } - if (0 == rc) { - ir_input_nokey(ir->input, &ir->ir); - } else { - ir_input_keydown(ir->input, &ir->ir, ir_key); - } + if (rc) + ir_keydown(ir->input, ir_key, 0); } static void ir_work(struct work_struct *work) @@ -439,10 +436,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_name(&client->dev)); /* init + register input device */ - err = ir_input_init(input_dev, &ir->ir, ir_type); - if (err < 0) - goto err_out_free; - + ir->ir_type = ir_type; input_dev->id.bustype = BUS_I2C; input_dev->name = ir->name; input_dev->phys = ir->phys; diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h index 0506e45c9a4f..5e96d7a430be 100644 --- a/include/media/ir-kbd-i2c.h +++ b/include/media/ir-kbd-i2c.h @@ -11,7 +11,7 @@ struct IR_i2c { struct i2c_client *c; struct input_dev *input; struct ir_input_state ir; - + u64 ir_type; /* Used to avoid fast repeating */ unsigned char old; -- cgit v1.2.3 From 9b7c54d926284c5277cff3ef3cfe29f26568306a Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Wed, 16 Jun 2010 17:55:25 -0300 Subject: V4L/DVB: IR: add tx callbacks to ir-core Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- include/media/ir-core.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/ir-core.h b/include/media/ir-core.h index ad1303f20e00..9b957af21588 100644 --- a/include/media/ir-core.h +++ b/include/media/ir-core.h @@ -47,15 +47,21 @@ enum rc_driver_type { * is opened. * @close: callback to allow drivers to disable polling/irq when IR input device * is opened. + * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs) + * @s_tx_carrier: set transmit carrier frequency + * @tx_ir: transmit IR */ struct ir_dev_props { enum rc_driver_type driver_type; unsigned long allowed_protos; u32 scanmask; - void *priv; + void *priv; int (*change_protocol)(void *priv, u64 ir_type); int (*open)(void *priv); void (*close)(void *priv); + int (*s_tx_mask)(void *priv, u32 mask); + int (*s_tx_carrier)(void *priv, u32 carrier); + int (*tx_ir)(void *priv, const char *buf, u32 n); }; struct ir_input_dev { -- cgit v1.2.3 From f6a20eb1a2d35660240cd1eb8dc2bd6504a0c6c5 Mon Sep 17 00:00:00 2001 From: Klaus Schmidinger Date: Thu, 1 Jul 2010 01:37:34 -0300 Subject: V4L/DVB: Add FE_CAN_TURBO_FEC Some (North American) providers use a non-standard mode called "8psk turbo fec". Since there is no flag in the driver that would allow an application to determine whether a particular device can handle "turbo fec", the attached patch introduces FE_CAN_TURBO_FEC. Since there is no flag in the SI data that would indicate that a transponder uses "turbo fec", VDR will assume that all 8psk transponders on DVB-S use "turbo fec". Tested-by: Derek Kelly Signed-off-by: Klaus Schmidinger Signed-off-by: Douglas Schilling Landgraf Signed-off-by: Mauro Carvalho Chehab --- drivers/media/dvb/dvb-usb/gp8psk-fe.c | 2 +- include/linux/dvb/frontend.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c index 7a7f1b2b681c..dbdb5347b2a8 100644 --- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c +++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c @@ -349,7 +349,7 @@ static struct dvb_frontend_ops gp8psk_fe_ops = { * FE_CAN_QAM_16 is for compatibility * (Myth incorrectly detects Turbo-QPSK as plain QAM-16) */ - FE_CAN_QPSK | FE_CAN_QAM_16 + FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_TURBO_FEC }, .release = gp8psk_fe_release, diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index b6cb5425cde3..493a2bf85f62 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -62,6 +62,7 @@ typedef enum fe_caps { FE_CAN_8VSB = 0x200000, FE_CAN_16VSB = 0x400000, FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ + FE_CAN_TURBO_FEC = 0x8000000, /* frontend supports "turbo fec modulation" */ FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ -- cgit v1.2.3 From 1ece36097d0170a41fc129b8b1823a36ec2fb5c6 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 3 Jul 2010 18:06:13 -0300 Subject: V4L/DVB: Increment DVB API version A new flag were added at the Frontend capabilities. Increment API minor revision. Signed-off-by: Mauro Carvalho Chehab --- include/linux/dvb/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/dvb/version.h b/include/linux/dvb/version.h index 540b0583d9fb..5a7546c12688 100644 --- a/include/linux/dvb/version.h +++ b/include/linux/dvb/version.h @@ -24,6 +24,6 @@ #define _DVBVERSION_H_ #define DVB_API_VERSION 5 -#define DVB_API_VERSION_MINOR 1 +#define DVB_API_VERSION_MINOR 2 #endif /*_DVBVERSION_H_*/ -- cgit v1.2.3 From 4a62a5ab59742331a4e17ccaa894968d40ed9b16 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Sat, 3 Jul 2010 01:06:57 -0300 Subject: V4L/DVB: IR: add lirc device interface v2: currently unused ioctls are included, but #if 0'd out Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/Kconfig | 11 + drivers/media/IR/Makefile | 1 + drivers/media/IR/lirc_dev.c | 761 ++++++++++++++++++++++++++++++++++++++++++++ drivers/media/IR/lirc_dev.h | 226 +++++++++++++ include/media/lirc.h | 163 ++++++++++ 5 files changed, 1162 insertions(+) create mode 100644 drivers/media/IR/lirc_dev.c create mode 100644 drivers/media/IR/lirc_dev.h create mode 100644 include/media/lirc.h (limited to 'include') diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig index 797a6c36d984..5d2c37dcf11a 100644 --- a/drivers/media/IR/Kconfig +++ b/drivers/media/IR/Kconfig @@ -8,6 +8,17 @@ config VIDEO_IR depends on IR_CORE default IR_CORE +config LIRC + tristate + default y + + ---help--- + Enable this option to build the Linux Infrared Remote + Control (LIRC) core device interface driver. The LIRC + interface passes raw IR to and from userspace, where the + LIRC daemon handles protocol decoding for IR reception ann + encoding for IR transmitting (aka "blasting"). + source "drivers/media/IR/keymaps/Kconfig" config IR_NEC_DECODER diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile index b43fe36d88b2..3ba00bb8bea0 100644 --- a/drivers/media/IR/Makefile +++ b/drivers/media/IR/Makefile @@ -5,6 +5,7 @@ obj-y += keymaps/ obj-$(CONFIG_IR_CORE) += ir-core.o obj-$(CONFIG_VIDEO_IR) += ir-common.o +obj-$(CONFIG_LIRC) += lirc_dev.o obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c new file mode 100644 index 000000000000..9e141d51df91 --- /dev/null +++ b/drivers/media/IR/lirc_dev.c @@ -0,0 +1,761 @@ +/* + * LIRC base driver + * + * by Artur Lipowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "lirc_dev.h" + +static int debug; + +#define IRCTL_DEV_NAME "BaseRemoteCtl" +#define NOPLUG -1 +#define LOGHEAD "lirc_dev (%s[%d]): " + +static dev_t lirc_base_dev; + +struct irctl { + struct lirc_driver d; + int attached; + int open; + + struct mutex irctl_lock; + struct lirc_buffer *buf; + unsigned int chunk_size; + + struct task_struct *task; + long jiffies_to_wait; + + struct cdev cdev; +}; + +static DEFINE_MUTEX(lirc_dev_lock); + +static struct irctl *irctls[MAX_IRCTL_DEVICES]; + +/* Only used for sysfs but defined to void otherwise */ +static struct class *lirc_class; + +/* helper function + * initializes the irctl structure + */ +static void init_irctl(struct irctl *ir) +{ + dev_dbg(ir->d.dev, LOGHEAD "initializing irctl\n", + ir->d.name, ir->d.minor); + mutex_init(&ir->irctl_lock); + ir->d.minor = NOPLUG; +} + +static void cleanup(struct irctl *ir) +{ + dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor); + + device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); + + if (ir->buf != ir->d.rbuf) { + lirc_buffer_free(ir->buf); + kfree(ir->buf); + } + ir->buf = NULL; +} + +/* helper function + * reads key codes from driver and puts them into buffer + * returns 0 on success + */ +static int add_to_buf(struct irctl *ir) +{ + if (ir->d.add_to_buf) { + int res = -ENODATA; + int got_data = 0; + + /* + * service the device as long as it is returning + * data and we have space + */ +get_data: + res = ir->d.add_to_buf(ir->d.data, ir->buf); + if (res == 0) { + got_data++; + goto get_data; + } + + if (res == -ENODEV) + kthread_stop(ir->task); + + return got_data ? 0 : res; + } + + return 0; +} + +/* main function of the polling thread + */ +static int lirc_thread(void *irctl) +{ + struct irctl *ir = irctl; + + dev_dbg(ir->d.dev, LOGHEAD "poll thread started\n", + ir->d.name, ir->d.minor); + + do { + if (ir->open) { + if (ir->jiffies_to_wait) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(ir->jiffies_to_wait); + } + if (kthread_should_stop()) + break; + if (!add_to_buf(ir)) + wake_up_interruptible(&ir->buf->wait_poll); + } else { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } while (!kthread_should_stop()); + + dev_dbg(ir->d.dev, LOGHEAD "poll thread ended\n", + ir->d.name, ir->d.minor); + + return 0; +} + + +static struct file_operations fops = { + .owner = THIS_MODULE, + .read = lirc_dev_fop_read, + .write = lirc_dev_fop_write, + .poll = lirc_dev_fop_poll, + .ioctl = lirc_dev_fop_ioctl, + .open = lirc_dev_fop_open, + .release = lirc_dev_fop_close, +}; + +static int lirc_cdev_add(struct irctl *ir) +{ + int retval; + struct lirc_driver *d = &ir->d; + + if (d->fops) { + cdev_init(&ir->cdev, d->fops); + ir->cdev.owner = d->owner; + } else { + cdev_init(&ir->cdev, &fops); + ir->cdev.owner = THIS_MODULE; + } + kobject_set_name(&ir->cdev.kobj, "lirc%d", d->minor); + + retval = cdev_add(&ir->cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1); + if (retval) + kobject_put(&ir->cdev.kobj); + + return retval; +} + +int lirc_register_driver(struct lirc_driver *d) +{ + struct irctl *ir; + int minor; + int bytes_in_key; + unsigned int chunk_size; + unsigned int buffer_size; + int err; + + if (!d) { + printk(KERN_ERR "lirc_dev: lirc_register_driver: " + "driver pointer must be not NULL!\n"); + err = -EBADRQC; + goto out; + } + + if (MAX_IRCTL_DEVICES <= d->minor) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "\"minor\" must be between 0 and %d (%d)!\n", + MAX_IRCTL_DEVICES-1, d->minor); + err = -EBADRQC; + goto out; + } + + if (1 > d->code_length || (BUFLEN * 8) < d->code_length) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "code length in bits for minor (%d) " + "must be less than %d!\n", + d->minor, BUFLEN * 8); + err = -EBADRQC; + goto out; + } + + dev_dbg(d->dev, "lirc_dev: lirc_register_driver: sample_rate: %d\n", + d->sample_rate); + if (d->sample_rate) { + if (2 > d->sample_rate || HZ < d->sample_rate) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "sample_rate must be between 2 and %d!\n", HZ); + err = -EBADRQC; + goto out; + } + if (!d->add_to_buf) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "add_to_buf cannot be NULL when " + "sample_rate is set\n"); + err = -EBADRQC; + goto out; + } + } else if (!(d->fops && d->fops->read) && !d->rbuf) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "fops->read and rbuf cannot all be NULL!\n"); + err = -EBADRQC; + goto out; + } else if (!d->rbuf) { + if (!(d->fops && d->fops->read && d->fops->poll && + d->fops->ioctl)) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "neither read, poll nor ioctl can be NULL!\n"); + err = -EBADRQC; + goto out; + } + } + + mutex_lock(&lirc_dev_lock); + + minor = d->minor; + + if (minor < 0) { + /* find first free slot for driver */ + for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++) + if (!irctls[minor]) + break; + if (MAX_IRCTL_DEVICES == minor) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "no free slots for drivers!\n"); + err = -ENOMEM; + goto out_lock; + } + } else if (irctls[minor]) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "minor (%d) just registered!\n", minor); + err = -EBUSY; + goto out_lock; + } + + ir = kzalloc(sizeof(struct irctl), GFP_KERNEL); + if (!ir) { + err = -ENOMEM; + goto out_lock; + } + init_irctl(ir); + irctls[minor] = ir; + d->minor = minor; + + if (d->sample_rate) { + ir->jiffies_to_wait = HZ / d->sample_rate; + } else { + /* it means - wait for external event in task queue */ + ir->jiffies_to_wait = 0; + } + + /* some safety check 8-) */ + d->name[sizeof(d->name)-1] = '\0'; + + bytes_in_key = BITS_TO_LONGS(d->code_length) + + (d->code_length % 8 ? 1 : 0); + buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key; + chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key; + + if (d->rbuf) { + ir->buf = d->rbuf; + } else { + ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL); + if (!ir->buf) { + err = -ENOMEM; + goto out_lock; + } + err = lirc_buffer_init(ir->buf, chunk_size, buffer_size); + if (err) { + kfree(ir->buf); + goto out_lock; + } + } + ir->chunk_size = ir->buf->chunk_size; + + if (d->features == 0) + d->features = LIRC_CAN_REC_LIRCCODE; + + ir->d = *d; + ir->d.minor = minor; + + device_create(lirc_class, ir->d.dev, + MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL, + "lirc%u", ir->d.minor); + + if (d->sample_rate) { + /* try to fire up polling thread */ + ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev"); + if (IS_ERR(ir->task)) { + dev_err(d->dev, "lirc_dev: lirc_register_driver: " + "cannot run poll thread for minor = %d\n", + d->minor); + err = -ECHILD; + goto out_sysfs; + } + } + + err = lirc_cdev_add(ir); + if (err) + goto out_sysfs; + + ir->attached = 1; + mutex_unlock(&lirc_dev_lock); + + dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n", + ir->d.name, ir->d.minor); + return minor; + +out_sysfs: + device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor)); +out_lock: + mutex_unlock(&lirc_dev_lock); +out: + return err; +} +EXPORT_SYMBOL(lirc_register_driver); + +int lirc_unregister_driver(int minor) +{ + struct irctl *ir; + + if (minor < 0 || minor >= MAX_IRCTL_DEVICES) { + printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " + "\"minor (%d)\" must be between 0 and %d!\n", + minor, MAX_IRCTL_DEVICES-1); + return -EBADRQC; + } + + ir = irctls[minor]; + + mutex_lock(&lirc_dev_lock); + + if (ir->d.minor != minor) { + printk(KERN_ERR "lirc_dev: lirc_unregister_driver: " + "minor (%d) device not registered!", minor); + mutex_unlock(&lirc_dev_lock); + return -ENOENT; + } + + /* end up polling thread */ + if (ir->task) + kthread_stop(ir->task); + + dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n", + ir->d.name, ir->d.minor); + + ir->attached = 0; + if (ir->open) { + dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n", + ir->d.name, ir->d.minor); + wake_up_interruptible(&ir->buf->wait_poll); + mutex_lock(&ir->irctl_lock); + ir->d.set_use_dec(ir->d.data); + module_put(ir->d.owner); + mutex_unlock(&ir->irctl_lock); + cdev_del(&ir->cdev); + } else { + cleanup(ir); + cdev_del(&ir->cdev); + kfree(ir); + irctls[minor] = NULL; + } + + mutex_unlock(&lirc_dev_lock); + + return 0; +} +EXPORT_SYMBOL(lirc_unregister_driver); + +int lirc_dev_fop_open(struct inode *inode, struct file *file) +{ + struct irctl *ir; + int retval = 0; + + if (iminor(inode) >= MAX_IRCTL_DEVICES) { + printk(KERN_WARNING "lirc_dev [%d]: open result = -ENODEV\n", + iminor(inode)); + return -ENODEV; + } + + if (mutex_lock_interruptible(&lirc_dev_lock)) + return -ERESTARTSYS; + + ir = irctls[iminor(inode)]; + if (!ir) { + retval = -ENODEV; + goto error; + } + + dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor); + + if (ir->d.minor == NOPLUG) { + retval = -ENODEV; + goto error; + } + + if (ir->open) { + retval = -EBUSY; + goto error; + } + + if (try_module_get(ir->d.owner)) { + ++ir->open; + retval = ir->d.set_use_inc(ir->d.data); + + if (retval) { + module_put(ir->d.owner); + --ir->open; + } else { + lirc_buffer_clear(ir->buf); + } + if (ir->task) + wake_up_process(ir->task); + } + +error: + if (ir) + dev_dbg(ir->d.dev, LOGHEAD "open result = %d\n", + ir->d.name, ir->d.minor, retval); + + mutex_unlock(&lirc_dev_lock); + + return retval; +} +EXPORT_SYMBOL(lirc_dev_fop_open); + +int lirc_dev_fop_close(struct inode *inode, struct file *file) +{ + struct irctl *ir = irctls[iminor(inode)]; + + dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor); + + WARN_ON(mutex_lock_killable(&lirc_dev_lock)); + + --ir->open; + if (ir->attached) { + ir->d.set_use_dec(ir->d.data); + module_put(ir->d.owner); + } else { + cleanup(ir); + irctls[ir->d.minor] = NULL; + kfree(ir); + } + + mutex_unlock(&lirc_dev_lock); + + return 0; +} +EXPORT_SYMBOL(lirc_dev_fop_close); + +unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait) +{ + struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; + unsigned int ret; + + dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor); + + if (!ir->attached) { + mutex_unlock(&ir->irctl_lock); + return POLLERR; + } + + poll_wait(file, &ir->buf->wait_poll, wait); + + if (ir->buf) + if (lirc_buffer_empty(ir->buf)) + ret = 0; + else + ret = POLLIN | POLLRDNORM; + else + ret = POLLERR; + + dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n", + ir->d.name, ir->d.minor, ret); + + return ret; +} +EXPORT_SYMBOL(lirc_dev_fop_poll); + +int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + unsigned long mode; + int result = 0; + struct irctl *ir = irctls[iminor(inode)]; + + dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n", + ir->d.name, ir->d.minor, cmd); + + if (ir->d.minor == NOPLUG || !ir->attached) { + dev_dbg(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n", + ir->d.name, ir->d.minor); + return -ENODEV; + } + + mutex_lock(&ir->irctl_lock); + + switch (cmd) { + case LIRC_GET_FEATURES: + result = put_user(ir->d.features, (unsigned long *)arg); + break; + case LIRC_GET_REC_MODE: + if (!(ir->d.features & LIRC_CAN_REC_MASK)) { + result = -ENOSYS; + break; + } + + result = put_user(LIRC_REC2MODE + (ir->d.features & LIRC_CAN_REC_MASK), + (unsigned long *)arg); + break; + case LIRC_SET_REC_MODE: + if (!(ir->d.features & LIRC_CAN_REC_MASK)) { + result = -ENOSYS; + break; + } + + result = get_user(mode, (unsigned long *)arg); + if (!result && !(LIRC_MODE2REC(mode) & ir->d.features)) + result = -EINVAL; + /* + * FIXME: We should actually set the mode somehow but + * for now, lirc_serial doesn't support mode changing either + */ + break; + case LIRC_GET_LENGTH: + result = put_user(ir->d.code_length, (unsigned long *)arg); + break; + case LIRC_GET_MIN_TIMEOUT: + if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || + ir->d.min_timeout == 0) { + result = -ENOSYS; + break; + } + + result = put_user(ir->d.min_timeout, (unsigned long *)arg); + break; + case LIRC_GET_MAX_TIMEOUT: + if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) || + ir->d.max_timeout == 0) { + result = -ENOSYS; + break; + } + + result = put_user(ir->d.max_timeout, (unsigned long *)arg); + break; + default: + result = -EINVAL; + } + + dev_dbg(ir->d.dev, LOGHEAD "ioctl result = %d\n", + ir->d.name, ir->d.minor, result); + + mutex_unlock(&ir->irctl_lock); + + return result; +} +EXPORT_SYMBOL(lirc_dev_fop_ioctl); + +ssize_t lirc_dev_fop_read(struct file *file, + char *buffer, + size_t length, + loff_t *ppos) +{ + struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; + unsigned char buf[ir->chunk_size]; + int ret = 0, written = 0; + DECLARE_WAITQUEUE(wait, current); + + dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor); + + if (mutex_lock_interruptible(&ir->irctl_lock)) + return -ERESTARTSYS; + if (!ir->attached) { + mutex_unlock(&ir->irctl_lock); + return -ENODEV; + } + + if (length % ir->chunk_size) { + dev_dbg(ir->d.dev, LOGHEAD "read result = -EINVAL\n", + ir->d.name, ir->d.minor); + mutex_unlock(&ir->irctl_lock); + return -EINVAL; + } + + /* + * we add ourselves to the task queue before buffer check + * to avoid losing scan code (in case when queue is awaken somewhere + * between while condition checking and scheduling) + */ + add_wait_queue(&ir->buf->wait_poll, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + /* + * while we didn't provide 'length' bytes, device is opened in blocking + * mode and 'copy_to_user' is happy, wait for data. + */ + while (written < length && ret == 0) { + if (lirc_buffer_empty(ir->buf)) { + /* According to the read(2) man page, 'written' can be + * returned as less than 'length', instead of blocking + * again, returning -EWOULDBLOCK, or returning + * -ERESTARTSYS */ + if (written) + break; + if (file->f_flags & O_NONBLOCK) { + ret = -EWOULDBLOCK; + break; + } + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + mutex_unlock(&ir->irctl_lock); + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + + if (mutex_lock_interruptible(&ir->irctl_lock)) { + ret = -ERESTARTSYS; + break; + } + + if (!ir->attached) { + ret = -ENODEV; + break; + } + } else { + lirc_buffer_read(ir->buf, buf); + ret = copy_to_user((void *)buffer+written, buf, + ir->buf->chunk_size); + written += ir->buf->chunk_size; + } + } + + remove_wait_queue(&ir->buf->wait_poll, &wait); + set_current_state(TASK_RUNNING); + mutex_unlock(&ir->irctl_lock); + + dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n", + ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret); + + return ret ? ret : written; +} +EXPORT_SYMBOL(lirc_dev_fop_read); + +void *lirc_get_pdata(struct file *file) +{ + void *data = NULL; + + if (file && file->f_dentry && file->f_dentry->d_inode && + file->f_dentry->d_inode->i_rdev) { + struct irctl *ir; + ir = irctls[iminor(file->f_dentry->d_inode)]; + data = ir->d.data; + } + + return data; +} +EXPORT_SYMBOL(lirc_get_pdata); + + +ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, + size_t length, loff_t *ppos) +{ + struct irctl *ir = irctls[iminor(file->f_dentry->d_inode)]; + + dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor); + + if (!ir->attached) + return -ENODEV; + + return -EINVAL; +} +EXPORT_SYMBOL(lirc_dev_fop_write); + + +static int __init lirc_dev_init(void) +{ + int retval; + + lirc_class = class_create(THIS_MODULE, "lirc"); + if (IS_ERR(lirc_class)) { + retval = PTR_ERR(lirc_class); + printk(KERN_ERR "lirc_dev: class_create failed\n"); + goto error; + } + + retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES, + IRCTL_DEV_NAME); + if (retval) { + class_destroy(lirc_class); + printk(KERN_ERR "lirc_dev: alloc_chrdev_region failed\n"); + goto error; + } + + + printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, " + "major %d \n", MAJOR(lirc_base_dev)); + +error: + return retval; +} + + + +static void __exit lirc_dev_exit(void) +{ + class_destroy(lirc_class); + unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES); + printk(KERN_INFO "lirc_dev: module unloaded\n"); +} + +module_init(lirc_dev_init); +module_exit(lirc_dev_exit); + +MODULE_DESCRIPTION("LIRC base driver module"); +MODULE_AUTHOR("Artur Lipowski"); +MODULE_LICENSE("GPL"); + +module_param(debug, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Enable debugging messages"); diff --git a/drivers/media/IR/lirc_dev.h b/drivers/media/IR/lirc_dev.h new file mode 100644 index 000000000000..4afd96a38a43 --- /dev/null +++ b/drivers/media/IR/lirc_dev.h @@ -0,0 +1,226 @@ +/* + * LIRC base driver + * + * by Artur Lipowski + * This code is licensed under GNU GPL + * + */ + +#ifndef _LINUX_LIRC_DEV_H +#define _LINUX_LIRC_DEV_H + +#define MAX_IRCTL_DEVICES 4 +#define BUFLEN 16 + +#define mod(n, div) ((n) % (div)) + +#include +#include +#include +#include +#include +#include + +struct lirc_buffer { + wait_queue_head_t wait_poll; + spinlock_t fifo_lock; + unsigned int chunk_size; + unsigned int size; /* in chunks */ + /* Using chunks instead of bytes pretends to simplify boundary checking + * And should allow for some performance fine tunning later */ + struct kfifo fifo; + u8 fifo_initialized; +}; + +static inline void lirc_buffer_clear(struct lirc_buffer *buf) +{ + unsigned long flags; + + if (buf->fifo_initialized) { + spin_lock_irqsave(&buf->fifo_lock, flags); + kfifo_reset(&buf->fifo); + spin_unlock_irqrestore(&buf->fifo_lock, flags); + } else + WARN(1, "calling %s on an uninitialized lirc_buffer\n", + __func__); +} + +static inline int lirc_buffer_init(struct lirc_buffer *buf, + unsigned int chunk_size, + unsigned int size) +{ + int ret; + + init_waitqueue_head(&buf->wait_poll); + spin_lock_init(&buf->fifo_lock); + buf->chunk_size = chunk_size; + buf->size = size; + ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); + if (ret == 0) + buf->fifo_initialized = 1; + + return ret; +} + +static inline void lirc_buffer_free(struct lirc_buffer *buf) +{ + if (buf->fifo_initialized) { + kfifo_free(&buf->fifo); + buf->fifo_initialized = 0; + } else + WARN(1, "calling %s on an uninitialized lirc_buffer\n", + __func__); +} + +static inline int lirc_buffer_len(struct lirc_buffer *buf) +{ + int len; + unsigned long flags; + + spin_lock_irqsave(&buf->fifo_lock, flags); + len = kfifo_len(&buf->fifo); + spin_unlock_irqrestore(&buf->fifo_lock, flags); + + return len; +} + +static inline int lirc_buffer_full(struct lirc_buffer *buf) +{ + return lirc_buffer_len(buf) == buf->size * buf->chunk_size; +} + +static inline int lirc_buffer_empty(struct lirc_buffer *buf) +{ + return !lirc_buffer_len(buf); +} + +static inline int lirc_buffer_available(struct lirc_buffer *buf) +{ + return buf->size - (lirc_buffer_len(buf) / buf->chunk_size); +} + +static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf, + unsigned char *dest) +{ + unsigned int ret = 0; + + if (lirc_buffer_len(buf) >= buf->chunk_size) + ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size, + &buf->fifo_lock); + return ret; + +} + +static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, + unsigned char *orig) +{ + unsigned int ret; + + ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size, + &buf->fifo_lock); + + return ret; +} + +struct lirc_driver { + char name[40]; + int minor; + unsigned long code_length; + unsigned int buffer_size; /* in chunks holding one code each */ + int sample_rate; + unsigned long features; + + unsigned int chunk_size; + + void *data; + int min_timeout; + int max_timeout; + int (*add_to_buf) (void *data, struct lirc_buffer *buf); + struct lirc_buffer *rbuf; + int (*set_use_inc) (void *data); + void (*set_use_dec) (void *data); + struct file_operations *fops; + struct device *dev; + struct module *owner; +}; + +/* name: + * this string will be used for logs + * + * minor: + * indicates minor device (/dev/lirc) number for registered driver + * if caller fills it with negative value, then the first free minor + * number will be used (if available) + * + * code_length: + * length of the remote control key code expressed in bits + * + * sample_rate: + * + * data: + * it may point to any driver data and this pointer will be passed to + * all callback functions + * + * add_to_buf: + * add_to_buf will be called after specified period of the time or + * triggered by the external event, this behavior depends on value of + * the sample_rate this function will be called in user context. This + * routine should return 0 if data was added to the buffer and + * -ENODATA if none was available. This should add some number of bits + * evenly divisible by code_length to the buffer + * + * rbuf: + * if not NULL, it will be used as a read buffer, you will have to + * write to the buffer by other means, like irq's (see also + * lirc_serial.c). + * + * set_use_inc: + * set_use_inc will be called after device is opened + * + * set_use_dec: + * set_use_dec will be called after device is closed + * + * fops: + * file_operations for drivers which don't fit the current driver model. + * + * Some ioctl's can be directly handled by lirc_dev if the driver's + * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also + * lirc_serial.c). + * + * owner: + * the module owning this struct + * + */ + + +/* following functions can be called ONLY from user context + * + * returns negative value on error or minor number + * of the registered device if success + * contents of the structure pointed by p is copied + */ +extern int lirc_register_driver(struct lirc_driver *d); + +/* returns negative value on error or 0 if success +*/ +extern int lirc_unregister_driver(int minor); + +/* Returns the private data stored in the lirc_driver + * associated with the given device file pointer. + */ +void *lirc_get_pdata(struct file *file); + +/* default file operations + * used by drivers if they override only some operations + */ +int lirc_dev_fop_open(struct inode *inode, struct file *file); +int lirc_dev_fop_close(struct inode *inode, struct file *file); +unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); +int lirc_dev_fop_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); +ssize_t lirc_dev_fop_read(struct file *file, char *buffer, size_t length, + loff_t *ppos); +ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, size_t length, + loff_t *ppos); + +#endif diff --git a/include/media/lirc.h b/include/media/lirc.h new file mode 100644 index 000000000000..8dffd4f47bf6 --- /dev/null +++ b/include/media/lirc.h @@ -0,0 +1,163 @@ +/* + * lirc.h - linux infrared remote control header file + * last modified 2010/06/03 by Jarod Wilson + */ + +#ifndef _LINUX_LIRC_H +#define _LINUX_LIRC_H + +#include +#include + +#define PULSE_BIT 0x01000000 +#define PULSE_MASK 0x00FFFFFF + +#define LIRC_MODE2_SPACE 0x00000000 +#define LIRC_MODE2_PULSE 0x01000000 +#define LIRC_MODE2_FREQUENCY 0x02000000 +#define LIRC_MODE2_TIMEOUT 0x03000000 + +#define LIRC_VALUE_MASK 0x00FFFFFF +#define LIRC_MODE2_MASK 0xFF000000 + +#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) +#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) +#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) +#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) + +#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) +#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) + +#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) +#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) +#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) +#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) + +/*** lirc compatible hardware features ***/ + +#define LIRC_MODE2SEND(x) (x) +#define LIRC_SEND2MODE(x) (x) +#define LIRC_MODE2REC(x) ((x) << 16) +#define LIRC_REC2MODE(x) ((x) >> 16) + +#define LIRC_MODE_RAW 0x00000001 +#define LIRC_MODE_PULSE 0x00000002 +#define LIRC_MODE_MODE2 0x00000004 +#define LIRC_MODE_LIRCCODE 0x00000010 + + +#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) +#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) +#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) +#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_SEND_MASK 0x0000003f + +#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 +#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 +#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 + +#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) +#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) +#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) +#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) + +#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) + +#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) +#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) + +#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 +#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 +#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 +#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 +#define LIRC_CAN_SET_REC_FILTER 0x08000000 + +#define LIRC_CAN_MEASURE_CARRIER 0x02000000 + +#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) +#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) + +#define LIRC_CAN_NOTIFY_DECODE 0x01000000 + +/*** IOCTL commands for lirc driver ***/ + +#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) + +#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) +#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) +#define LIRC_GET_SEND_CARRIER _IOR('i', 0x00000003, __u32) +#define LIRC_GET_REC_CARRIER _IOR('i', 0x00000004, __u32) +#define LIRC_GET_SEND_DUTY_CYCLE _IOR('i', 0x00000005, __u32) +#define LIRC_GET_REC_DUTY_CYCLE _IOR('i', 0x00000006, __u32) +#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) + +#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) +#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) + +#if 0 /* these ioctls are not used at the moment */ +#define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) +#define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) +#define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) +#define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) +#endif + +/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ +#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) + +#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) +#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) +/* Note: these can reset the according pulse_width */ +#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) +#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) +#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) +#define LIRC_SET_REC_DUTY_CYCLE _IOW('i', 0x00000016, __u32) +#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) + +/* + * when a timeout != 0 is set the driver will send a + * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is + * never sent, timeout is disabled by default + */ +#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) + +#if 0 /* these ioctls are not used at the moment */ +/* + * pulses shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x00000019, __u32) +/* + * spaces shorter than this are filtered out by hardware (software + * emulation in lirc_dev?) + */ +#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001a, __u32) +/* + * if filter cannot be set independantly for pulse/space, this should + * be used + */ +#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001b, __u32) +#endif + +/* + * to set a range use + * LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE with the + * lower bound first and later + * LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER with the upper bound + */ + +#define LIRC_SET_REC_DUTY_CYCLE_RANGE _IOW('i', 0x0000001e, __u32) +#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) + +#define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) + +#if 0 /* these ioctls are not used at the moment */ +/* + * from the next key press on the driver will send + * LIRC_MODE2_FREQUENCY packets + */ +#define LIRC_MEASURE_CARRIER_ENABLE _IO('i', 0x00000021) +#define LIRC_MEASURE_CARRIER_DISABLE _IO('i', 0x00000022) +#endif + +#endif -- cgit v1.2.3 From ca4146985db7cbb97816e9b961b8db79e63d9e86 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Sat, 3 Jul 2010 01:07:53 -0300 Subject: V4L/DVB: IR: add ir-core to lirc userspace decoder bridge driver v2: copy of buffer data from userspace done inside this plugin/driver, keeping the actual drivers minimal, and more flexible in what we can deliver to them later on (they may be fed from within kernelspace later on, by an in-kernel IR encoder). Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/Kconfig | 10 ++ drivers/media/IR/Makefile | 1 + drivers/media/IR/ir-core-priv.h | 13 ++ drivers/media/IR/ir-lirc-codec.c | 284 +++++++++++++++++++++++++++++++++++++++ drivers/media/IR/ir-raw-event.c | 1 + drivers/media/IR/ir-sysfs.c | 1 + include/media/rc-map.h | 4 +- 7 files changed, 313 insertions(+), 1 deletion(-) create mode 100644 drivers/media/IR/ir-lirc-codec.c (limited to 'include') diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig index 5d2c37dcf11a..40094c007acf 100644 --- a/drivers/media/IR/Kconfig +++ b/drivers/media/IR/Kconfig @@ -69,6 +69,16 @@ config IR_SONY_DECODER Enable this option if you have an infrared remote control which uses the Sony protocol, and you need software decoding support. +config IR_LIRC_CODEC + tristate "Enable IR to LIRC bridge" + depends on IR_CORE + depends on LIRC + default y + + ---help--- + Enable this option to pass raw IR to and from userspace via + the LIRC interface. + config IR_IMON tristate "SoundGraph iMON Receiver and Display" depends on USB_ARCH_HAS_HCD diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile index 3ba00bb8bea0..2ae4f3abfdbd 100644 --- a/drivers/media/IR/Makefile +++ b/drivers/media/IR/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o +obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o # stand-alone IR receivers/transmitters obj-$(CONFIG_IR_IMON) += imon.o diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h index 0a82b22d3828..babd52061bc3 100644 --- a/drivers/media/IR/ir-core-priv.h +++ b/drivers/media/IR/ir-core-priv.h @@ -73,6 +73,11 @@ struct ir_raw_event_ctrl { bool first; bool toggle; } jvc; + struct lirc_codec { + struct ir_input_dev *ir_dev; + struct lirc_driver *drv; + int lircdata; + } lirc; }; /* macros for IR decoders */ @@ -164,4 +169,12 @@ void ir_raw_init(void); #define load_sony_decode() 0 #endif +/* from ir-lirc-codec.c */ +#ifdef CONFIG_IR_LIRC_CODEC_MODULE +#define load_lirc_codec() request_module("ir-lirc-codec") +#else +#define load_lirc_codec() 0 +#endif + + #endif /* _IR_RAW_EVENT */ diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c new file mode 100644 index 000000000000..aff31d1b13d5 --- /dev/null +++ b/drivers/media/IR/ir-lirc-codec.c @@ -0,0 +1,284 @@ +/* ir-lirc-codec.c - ir-core to classic lirc interface bridge + * + * Copyright (C) 2010 by Jarod Wilson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include "ir-core-priv.h" +#include "lirc_dev.h" + +#define LIRCBUF_SIZE 256 + +/** + * ir_lirc_decode() - Send raw IR data to lirc_dev to be relayed to the + * lircd userspace daemon for decoding. + * @input_dev: the struct input_dev descriptor of the device + * @duration: the struct ir_raw_event descriptor of the pulse/space + * + * This function returns -EINVAL if the lirc interfaces aren't wired up. + */ +static int ir_lirc_decode(struct input_dev *input_dev, struct ir_raw_event ev) +{ + struct ir_input_dev *ir_dev = input_get_drvdata(input_dev); + + if (!(ir_dev->raw->enabled_protocols & IR_TYPE_LIRC)) + return 0; + + if (!ir_dev->raw->lirc.drv || !ir_dev->raw->lirc.drv->rbuf) + return -EINVAL; + + IR_dprintk(2, "LIRC data transfer started (%uus %s)\n", + TO_US(ev.duration), TO_STR(ev.pulse)); + + ir_dev->raw->lirc.lircdata += ev.duration / 1000; + if (ev.pulse) + ir_dev->raw->lirc.lircdata |= PULSE_BIT; + + lirc_buffer_write(ir_dev->raw->lirc.drv->rbuf, + (unsigned char *) &ir_dev->raw->lirc.lircdata); + wake_up(&ir_dev->raw->lirc.drv->rbuf->wait_poll); + + ir_dev->raw->lirc.lircdata = 0; + + return 0; +} + +static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf, + size_t n, loff_t *ppos) +{ + struct lirc_codec *lirc; + struct ir_input_dev *ir_dev; + int *txbuf; /* buffer with values to transmit */ + int ret = 0, count; + + lirc = lirc_get_pdata(file); + if (!lirc) + return -EFAULT; + + if (n % sizeof(int)) + return -EINVAL; + + count = n / sizeof(int); + if (count > LIRCBUF_SIZE || count % 2 == 0) + return -EINVAL; + + txbuf = kzalloc(sizeof(int) * LIRCBUF_SIZE, GFP_KERNEL); + if (!txbuf) + return -ENOMEM; + + if (copy_from_user(txbuf, buf, n)) { + ret = -EFAULT; + goto out; + } + + ir_dev = lirc->ir_dev; + if (!ir_dev) { + ret = -EFAULT; + goto out; + } + + if (ir_dev->props && ir_dev->props->tx_ir) + ret = ir_dev->props->tx_ir(ir_dev->props->priv, txbuf, (u32)n); + +out: + kfree(txbuf); + return ret; +} + +static int ir_lirc_ioctl(struct inode *node, struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct lirc_codec *lirc; + struct ir_input_dev *ir_dev; + int ret = 0; + void *drv_data; + unsigned long val; + + lirc = lirc_get_pdata(filep); + if (!lirc) + return -EFAULT; + + ir_dev = lirc->ir_dev; + if (!ir_dev || !ir_dev->props || !ir_dev->props->priv) + return -EFAULT; + + drv_data = ir_dev->props->priv; + + switch (cmd) { + case LIRC_SET_TRANSMITTER_MASK: + ret = get_user(val, (unsigned long *)arg); + if (ret) + return ret; + + if (ir_dev->props && ir_dev->props->s_tx_mask) + ret = ir_dev->props->s_tx_mask(drv_data, (u32)val); + else + return -EINVAL; + break; + + case LIRC_SET_SEND_CARRIER: + ret = get_user(val, (unsigned long *)arg); + if (ret) + return ret; + + if (ir_dev->props && ir_dev->props->s_tx_carrier) + ir_dev->props->s_tx_carrier(drv_data, (u32)val); + else + return -EINVAL; + break; + + case LIRC_GET_SEND_MODE: + val = LIRC_CAN_SEND_PULSE & LIRC_CAN_SEND_MASK; + ret = put_user(val, (unsigned long *)arg); + break; + + case LIRC_SET_SEND_MODE: + ret = get_user(val, (unsigned long *)arg); + if (ret) + return ret; + + if (val != (LIRC_MODE_PULSE & LIRC_CAN_SEND_MASK)) + return -EINVAL; + break; + + default: + return lirc_dev_fop_ioctl(node, filep, cmd, arg); + } + + return ret; +} + +static int ir_lirc_open(void *data) +{ + return 0; +} + +static void ir_lirc_close(void *data) +{ + return; +} + +static struct file_operations lirc_fops = { + .owner = THIS_MODULE, + .write = ir_lirc_transmit_ir, + .ioctl = ir_lirc_ioctl, + .read = lirc_dev_fop_read, + .poll = lirc_dev_fop_poll, + .open = lirc_dev_fop_open, + .release = lirc_dev_fop_close, +}; + +static int ir_lirc_register(struct input_dev *input_dev) +{ + struct ir_input_dev *ir_dev = input_get_drvdata(input_dev); + struct lirc_driver *drv; + struct lirc_buffer *rbuf; + int rc = -ENOMEM; + unsigned long features; + + drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); + if (!drv) + return rc; + + rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL); + if (!drv) + goto rbuf_alloc_failed; + + rc = lirc_buffer_init(rbuf, sizeof(int), LIRCBUF_SIZE); + if (rc) + goto rbuf_init_failed; + + features = LIRC_CAN_REC_MODE2; + if (ir_dev->props->tx_ir) { + features |= LIRC_CAN_SEND_PULSE; + if (ir_dev->props->s_tx_mask) + features |= LIRC_CAN_SET_TRANSMITTER_MASK; + if (ir_dev->props->s_tx_carrier) + features |= LIRC_CAN_SET_SEND_CARRIER; + } + + snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)", + ir_dev->driver_name); + drv->minor = -1; + drv->features = features; + drv->data = &ir_dev->raw->lirc; + drv->rbuf = rbuf; + drv->set_use_inc = &ir_lirc_open; + drv->set_use_dec = &ir_lirc_close; + drv->code_length = sizeof(struct ir_raw_event) * 8; + drv->fops = &lirc_fops; + drv->dev = &ir_dev->dev; + drv->owner = THIS_MODULE; + + drv->minor = lirc_register_driver(drv); + if (drv->minor < 0) { + rc = -ENODEV; + goto lirc_register_failed; + } + + ir_dev->raw->lirc.drv = drv; + ir_dev->raw->lirc.ir_dev = ir_dev; + ir_dev->raw->lirc.lircdata = PULSE_MASK; + + return 0; + +lirc_register_failed: +rbuf_init_failed: + kfree(rbuf); +rbuf_alloc_failed: + kfree(drv); + + return rc; +} + +static int ir_lirc_unregister(struct input_dev *input_dev) +{ + struct ir_input_dev *ir_dev = input_get_drvdata(input_dev); + struct lirc_codec *lirc = &ir_dev->raw->lirc; + + lirc_unregister_driver(lirc->drv->minor); + lirc_buffer_free(lirc->drv->rbuf); + kfree(lirc->drv); + + return 0; +} + +static struct ir_raw_handler lirc_handler = { + .protocols = IR_TYPE_LIRC, + .decode = ir_lirc_decode, + .raw_register = ir_lirc_register, + .raw_unregister = ir_lirc_unregister, +}; + +static int __init ir_lirc_codec_init(void) +{ + ir_raw_handler_register(&lirc_handler); + + printk(KERN_INFO "IR LIRC bridge handler initialized\n"); + return 0; +} + +static void __exit ir_lirc_codec_exit(void) +{ + ir_raw_handler_unregister(&lirc_handler); +} + +module_init(ir_lirc_codec_init); +module_exit(ir_lirc_codec_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jarod Wilson "); +MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); +MODULE_DESCRIPTION("LIRC IR handler bridge"); diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c index 5f98ab823057..6f192ef31db1 100644 --- a/drivers/media/IR/ir-raw-event.c +++ b/drivers/media/IR/ir-raw-event.c @@ -253,6 +253,7 @@ static void init_decoders(struct work_struct *work) load_rc6_decode(); load_jvc_decode(); load_sony_decode(); + load_lirc_codec(); /* If needed, we may later add some init code. In this case, it is needed to change the CONFIG_MODULE test at ir-core.h diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c index f176fbff9488..6273047e915b 100644 --- a/drivers/media/IR/ir-sysfs.c +++ b/drivers/media/IR/ir-sysfs.c @@ -43,6 +43,7 @@ static struct { { IR_TYPE_RC6, "rc-6" }, { IR_TYPE_JVC, "jvc" }, { IR_TYPE_SONY, "sony" }, + { IR_TYPE_LIRC, "lirc" }, }; #define PROTO_NONE "none" diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 36ee280d42a9..f982144685e7 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -17,10 +17,12 @@ #define IR_TYPE_RC6 (1 << 2) /* Philips RC6 protocol */ #define IR_TYPE_JVC (1 << 3) /* JVC protocol */ #define IR_TYPE_SONY (1 << 4) /* Sony12/15/20 protocol */ +#define IR_TYPE_LIRC (1 << 30) /* Pass raw IR to lirc userspace */ #define IR_TYPE_OTHER (1u << 31) #define IR_TYPE_ALL (IR_TYPE_RC5 | IR_TYPE_NEC | IR_TYPE_RC6 | \ - IR_TYPE_JVC | IR_TYPE_SONY | IR_TYPE_OTHER) + IR_TYPE_JVC | IR_TYPE_SONY | IR_TYPE_LIRC | \ + IR_TYPE_OTHER) struct ir_scancode { u32 scancode; -- cgit v1.2.3 From 30eb1be718a4753dd1912eb35af4cdaa25cefea9 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 2 Jul 2010 00:38:09 -0300 Subject: V4L/DVB: IR TX: incoming IR buffer now an int pointer incoming IR buffer now an int pointer, and not fed from userspace Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- include/media/ir-core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/media/ir-core.h b/include/media/ir-core.h index 9b957af21588..513e60dd1010 100644 --- a/include/media/ir-core.h +++ b/include/media/ir-core.h @@ -61,7 +61,7 @@ struct ir_dev_props { void (*close)(void *priv); int (*s_tx_mask)(void *priv, u32 mask); int (*s_tx_carrier)(void *priv, u32 carrier); - int (*tx_ir)(void *priv, const char *buf, u32 n); + int (*tx_ir)(void *priv, int *txbuf, u32 n); }; struct ir_input_dev { -- cgit v1.2.3 From 15f135d0cfc1ce762889bb804549da4081087597 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Sat, 3 Jul 2010 01:08:52 -0300 Subject: V4L/DVB: IR: add empty lirc pseudo-keymap Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/keymaps/Makefile | 1 + drivers/media/IR/keymaps/rc-lirc.c | 41 ++++++++++++++++++++++++++++++++++++++ include/media/rc-map.h | 1 + 3 files changed, 43 insertions(+) create mode 100644 drivers/media/IR/keymaps/rc-lirc.c (limited to 'include') diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index c3def729d759..86d3d1f2eaa9 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-kaiomy.o \ rc-kworld-315u.o \ rc-kworld-plus-tv-analog.o \ + rc-lirc.o \ rc-manli.o \ rc-msi-tvanywhere.o \ rc-msi-tvanywhere-plus.o \ diff --git a/drivers/media/IR/keymaps/rc-lirc.c b/drivers/media/IR/keymaps/rc-lirc.c new file mode 100644 index 000000000000..43fcf9035082 --- /dev/null +++ b/drivers/media/IR/keymaps/rc-lirc.c @@ -0,0 +1,41 @@ +/* rc-lirc.c - Empty dummy keytable, for use when its preferred to pass + * all raw IR data to the lirc userspace decoder. + * + * Copyright (c) 2010 by Jarod Wilson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +static struct ir_scancode lirc[] = { + { }, +}; + +static struct rc_keymap lirc_map = { + .map = { + .scan = lirc, + .size = ARRAY_SIZE(lirc), + .ir_type = IR_TYPE_LIRC, + .name = RC_MAP_LIRC, + } +}; + +static int __init init_rc_map_lirc(void) +{ + return ir_register_map(&lirc_map); +} + +static void __exit exit_rc_map_lirc(void) +{ + ir_unregister_map(&lirc_map); +} + +module_init(init_rc_map_lirc) +module_exit(exit_rc_map_lirc) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jarod Wilson "); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index f982144685e7..a329858c4b42 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -92,6 +92,7 @@ void rc_map_init(void); #define RC_MAP_KAIOMY "rc-kaiomy" #define RC_MAP_KWORLD_315U "rc-kworld-315u" #define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog" +#define RC_MAP_LIRC "rc-lirc" #define RC_MAP_MANLI "rc-manli" #define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus" #define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere" -- cgit v1.2.3 From 33c38283f03d8ea0358229fc03c1beebe67aed0e Mon Sep 17 00:00:00 2001 From: Pawel Osciak Date: Tue, 11 May 2010 10:36:28 -0300 Subject: V4L/DVB: videobuf: rename videobuf_alloc to videobuf_alloc_vb These functions allocate videobuf_buffer structures only. Renaming in order to prevent confusion with functions allocating actual video buffer memory. Rename the functions in videobuf-core.h videobuf-dma-sg.c as well. Signed-off-by: Pawel Osciak Signed-off-by: Laurent Pinchart Signed-off-by: Kyungmin Park Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/videobuf-core.c | 14 +++++++------- drivers/media/video/videobuf-dma-contig.c | 4 ++-- drivers/media/video/videobuf-dma-sg.c | 6 +++--- drivers/media/video/videobuf-vmalloc.c | 4 ++-- include/media/videobuf-core.h | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c index 7d3378437ded..4d5658387955 100644 --- a/drivers/media/video/videobuf-core.c +++ b/drivers/media/video/videobuf-core.c @@ -52,18 +52,18 @@ MODULE_LICENSE("GPL"); #define CALL(q, f, arg...) \ ((q->int_ops->f) ? q->int_ops->f(arg) : 0) -struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q) +struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) { struct videobuf_buffer *vb; BUG_ON(q->msize < sizeof(*vb)); - if (!q->int_ops || !q->int_ops->alloc) { + if (!q->int_ops || !q->int_ops->alloc_vb) { printk(KERN_ERR "No specific ops defined!\n"); BUG(); } - vb = q->int_ops->alloc(q->msize); + vb = q->int_ops->alloc_vb(q->msize); if (NULL != vb) { init_waitqueue_head(&vb->done); vb->magic = MAGIC_BUFFER; @@ -71,7 +71,7 @@ struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q) return vb; } -EXPORT_SYMBOL_GPL(videobuf_alloc); +EXPORT_SYMBOL_GPL(videobuf_alloc_vb); #define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\ vb->state != VIDEOBUF_QUEUED) @@ -359,7 +359,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q, /* Allocate and initialize buffers */ for (i = 0; i < bcount; i++) { - q->bufs[i] = videobuf_alloc(q); + q->bufs[i] = videobuf_alloc_vb(q); if (NULL == q->bufs[i]) break; @@ -766,7 +766,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); /* setup stuff */ - q->read_buf = videobuf_alloc(q); + q->read_buf = videobuf_alloc_vb(q); if (NULL == q->read_buf) return -ENOMEM; @@ -871,7 +871,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q, if (NULL == q->read_buf) { /* need to capture a new frame */ retval = -ENOMEM; - q->read_buf = videobuf_alloc(q); + q->read_buf = videobuf_alloc_vb(q); dprintk(1, "video alloc=0x%p\n", q->read_buf); if (NULL == q->read_buf) diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c index 74730c624cfc..98e292c35185 100644 --- a/drivers/media/video/videobuf-dma-contig.c +++ b/drivers/media/video/videobuf-dma-contig.c @@ -190,7 +190,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem, return ret; } -static struct videobuf_buffer *__videobuf_alloc(size_t size) +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) { struct videobuf_dma_contig_memory *mem; struct videobuf_buffer *vb; @@ -338,7 +338,7 @@ error: static struct videobuf_qtype_ops qops = { .magic = MAGIC_QTYPE_OPS, - .alloc = __videobuf_alloc, + .alloc_vb = __videobuf_alloc_vb, .iolock = __videobuf_iolock, .mmap_mapper = __videobuf_mmap_mapper, .vaddr = __videobuf_to_vaddr, diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 8359e6badd36..a9b109178578 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -428,7 +428,7 @@ static const struct vm_operations_struct videobuf_vm_ops = { struct videobuf_dma_sg_memory */ -static struct videobuf_buffer *__videobuf_alloc(size_t size) +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) { struct videobuf_dma_sg_memory *mem; struct videobuf_buffer *vb; @@ -638,7 +638,7 @@ done: static struct videobuf_qtype_ops sg_ops = { .magic = MAGIC_QTYPE_OPS, - .alloc = __videobuf_alloc, + .alloc_vb = __videobuf_alloc_vb, .iolock = __videobuf_iolock, .sync = __videobuf_sync, .mmap_mapper = __videobuf_mmap_mapper, @@ -654,7 +654,7 @@ void *videobuf_sg_alloc(size_t size) q.msize = size; - return videobuf_alloc(&q); + return videobuf_alloc_vb(&q); } EXPORT_SYMBOL_GPL(videobuf_sg_alloc); diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c index 583728f4c221..cf5be6bfd745 100644 --- a/drivers/media/video/videobuf-vmalloc.c +++ b/drivers/media/video/videobuf-vmalloc.c @@ -135,7 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = { struct videobuf_dma_sg_memory */ -static struct videobuf_buffer *__videobuf_alloc(size_t size) +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) { struct videobuf_vmalloc_memory *mem; struct videobuf_buffer *vb; @@ -293,7 +293,7 @@ error: static struct videobuf_qtype_ops qops = { .magic = MAGIC_QTYPE_OPS, - .alloc = __videobuf_alloc, + .alloc_vb = __videobuf_alloc_vb, .iolock = __videobuf_iolock, .mmap_mapper = __videobuf_mmap_mapper, .vaddr = videobuf_to_vmalloc, diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h index f91a736c133d..a157cd166e6e 100644 --- a/include/media/videobuf-core.h +++ b/include/media/videobuf-core.h @@ -127,7 +127,7 @@ struct videobuf_queue_ops { struct videobuf_qtype_ops { u32 magic; - struct videobuf_buffer *(*alloc)(size_t size); + struct videobuf_buffer *(*alloc_vb)(size_t size); void *(*vaddr) (struct videobuf_buffer *buf); int (*iolock) (struct videobuf_queue *q, struct videobuf_buffer *vb, @@ -173,7 +173,7 @@ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr); int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf); -struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q); +struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q); /* Used on videobuf-dvb */ void *videobuf_queue_to_vaddr(struct videobuf_queue *q, -- cgit v1.2.3 From 952684035a91334dbe33b15063514cab5e7c6907 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 11 May 2010 10:36:30 -0300 Subject: V4L/DVB: videobuf: Remove the videobuf_sg_dma_map/unmap functions Instead of creating dirty wrappers around videobuf_dma_map/unmap that create a dummy videobuf_queue structure, modify videobuf_dma_map/unmap to take a device pointer argument and use it directly. The videobuf_sg_dma_map/unmap then become unused and can be removed. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/common/saa7146_fops.c | 2 +- drivers/media/video/bt8xx/bttv-risc.c | 2 +- drivers/media/video/cx23885/cx23885-core.c | 2 +- drivers/media/video/cx88/cx88-alsa.c | 4 ++-- drivers/media/video/cx88/cx88-core.c | 2 +- drivers/media/video/omap24xxcam.c | 2 +- drivers/media/video/pxa_camera.c | 2 +- drivers/media/video/saa7134/saa7134-alsa.c | 10 +++++----- drivers/media/video/saa7134/saa7134-core.c | 2 +- drivers/media/video/videobuf-dma-sg.c | 32 +++++------------------------- drivers/staging/cx25821/cx25821-alsa.c | 4 ++-- drivers/staging/cx25821/cx25821-core.c | 2 +- include/media/videobuf-dma-sg.h | 20 +++++++++++-------- 13 files changed, 34 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c index 7364b9642d00..4da2a54cb8bd 100644 --- a/drivers/media/common/saa7146_fops.c +++ b/drivers/media/common/saa7146_fops.c @@ -57,7 +57,7 @@ void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; } diff --git a/drivers/media/video/bt8xx/bttv-risc.c b/drivers/media/video/bt8xx/bttv-risc.c index c24b1c100e13..0fa9f39f37a3 100644 --- a/drivers/media/video/bt8xx/bttv-risc.c +++ b/drivers/media/video/bt8xx/bttv-risc.c @@ -583,7 +583,7 @@ bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(btv->c.pci,&buf->bottom); btcx_riscmem_free(btv->c.pci,&buf->top); diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c index 0dde57e96d30..161ae7316c95 100644 --- a/drivers/media/video/cx23885/cx23885-core.c +++ b/drivers/media/video/cx23885/cx23885-core.c @@ -1142,7 +1142,7 @@ void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf) BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb, 0, 0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc); buf->vb.state = VIDEOBUF_NEEDS_INIT; diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c index 33082c96745e..07fe905f6575 100644 --- a/drivers/media/video/cx88/cx88-alsa.c +++ b/drivers/media/video/cx88/cx88-alsa.c @@ -283,7 +283,7 @@ static int dsp_buffer_free(snd_cx88_card_t *chip) BUG_ON(!chip->dma_size); dprintk(2,"Freeing buffer\n"); - videobuf_sg_dma_unmap(&chip->pci->dev, chip->dma_risc); + videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci,&chip->buf->risc); kfree(chip->buf); @@ -409,7 +409,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream, if (ret < 0) goto error; - ret = videobuf_sg_dma_map(&chip->pci->dev, dma); + ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c index 8b21457111b1..85eb266fb351 100644 --- a/drivers/media/video/cx88/cx88-core.c +++ b/drivers/media/video/cx88/cx88-core.c @@ -218,7 +218,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf) BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc); buf->vb.state = VIDEOBUF_NEEDS_INIT; diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c index f85b2ed8a2d8..926a5aa6f7f8 100644 --- a/drivers/media/video/omap24xxcam.c +++ b/drivers/media/video/omap24xxcam.c @@ -426,7 +426,7 @@ static void omap24xxcam_vbq_release(struct videobuf_queue *vbq, dma->direction); dma->direction = DMA_NONE; } else { - videobuf_dma_unmap(vbq, videobuf_to_dma(vb)); + videobuf_dma_unmap(vbq->dev, videobuf_to_dma(vb)); videobuf_dma_free(videobuf_to_dma(vb)); } diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index fb242f6cfb1f..5835acf7fa7a 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -276,7 +276,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) * longer in STATE_QUEUED or STATE_ACTIVE */ videobuf_waiton(&buf->vb, 0, 0); - videobuf_dma_unmap(vq, dma); + videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c index d3bd82ad010a..5bca2abb31e6 100644 --- a/drivers/media/video/saa7134/saa7134-alsa.c +++ b/drivers/media/video/saa7134/saa7134-alsa.c @@ -630,7 +630,7 @@ static int snd_card_saa7134_hw_params(struct snd_pcm_substream * substream, /* release the old buffer */ if (substream->runtime->dma_area) { saa7134_pgtable_free(dev->pci, &dev->dmasound.pt); - videobuf_sg_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); + videobuf_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); dsp_buffer_free(dev); substream->runtime->dma_area = NULL; } @@ -646,12 +646,12 @@ static int snd_card_saa7134_hw_params(struct snd_pcm_substream * substream, return err; } - if (0 != (err = videobuf_sg_dma_map(&dev->pci->dev, &dev->dmasound.dma))) { + if (0 != (err = videobuf_dma_map(&dev->pci->dev, &dev->dmasound.dma))) { dsp_buffer_free(dev); return err; } if (0 != (err = saa7134_pgtable_alloc(dev->pci,&dev->dmasound.pt))) { - videobuf_sg_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); + videobuf_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); dsp_buffer_free(dev); return err; } @@ -660,7 +660,7 @@ static int snd_card_saa7134_hw_params(struct snd_pcm_substream * substream, dev->dmasound.dma.sglen, 0))) { saa7134_pgtable_free(dev->pci, &dev->dmasound.pt); - videobuf_sg_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); + videobuf_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); dsp_buffer_free(dev); return err; } @@ -696,7 +696,7 @@ static int snd_card_saa7134_hw_free(struct snd_pcm_substream * substream) if (substream->runtime->dma_area) { saa7134_pgtable_free(dev->pci, &dev->dmasound.pt); - videobuf_sg_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); + videobuf_dma_unmap(&dev->pci->dev, &dev->dmasound.dma); dsp_buffer_free(dev); substream->runtime->dma_area = NULL; } diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c index 90f231881297..40bc635e8a3f 100644 --- a/drivers/media/video/saa7134/saa7134-core.c +++ b/drivers/media/video/saa7134/saa7134-core.c @@ -256,7 +256,7 @@ void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf) BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; } diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index a9b109178578..17b1f89e8133 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -235,7 +235,7 @@ int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction, } EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay); -int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma) +int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma) { MAGIC_CHECK(dma->magic, MAGIC_DMABUF); BUG_ON(0 == dma->nr_pages); @@ -263,7 +263,7 @@ int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma) return -ENOMEM; } if (!dma->bus_addr) { - dma->sglen = dma_map_sg(q->dev, dma->sglist, + dma->sglen = dma_map_sg(dev, dma->sglist, dma->nr_pages, dma->direction); if (0 == dma->sglen) { printk(KERN_WARNING @@ -279,14 +279,14 @@ int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma) } EXPORT_SYMBOL_GPL(videobuf_dma_map); -int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma) +int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma) { MAGIC_CHECK(dma->magic, MAGIC_DMABUF); if (!dma->sglen) return 0; - dma_unmap_sg(q->dev, dma->sglist, dma->sglen, dma->direction); + dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction); vfree(dma->sglist); dma->sglist = NULL; @@ -322,28 +322,6 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free); /* --------------------------------------------------------------------- */ -int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma) -{ - struct videobuf_queue q; - - q.dev = dev; - - return videobuf_dma_map(&q, dma); -} -EXPORT_SYMBOL_GPL(videobuf_sg_dma_map); - -int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma) -{ - struct videobuf_queue q; - - q.dev = dev; - - return videobuf_dma_unmap(&q, dma); -} -EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap); - -/* --------------------------------------------------------------------- */ - static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; @@ -520,7 +498,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, default: BUG(); } - err = videobuf_dma_map(q, &mem->dma); + err = videobuf_dma_map(q->dev, &mem->dma); if (0 != err) return err; diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c index 1798975a69bd..4ce8790b05ec 100644 --- a/drivers/staging/cx25821/cx25821-alsa.c +++ b/drivers/staging/cx25821/cx25821-alsa.c @@ -331,7 +331,7 @@ static int dsp_buffer_free(struct cx25821_audio_dev *chip) BUG_ON(!chip->dma_size); dprintk(2, "Freeing buffer\n"); - videobuf_sg_dma_unmap(&chip->pci->dev, chip->dma_risc); + videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci, &chip->buf->risc); kfree(chip->buf); @@ -470,7 +470,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream, if (ret < 0) goto error; - ret = videobuf_sg_dma_map(&chip->pci->dev, dma); + ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/staging/cx25821/cx25821-core.c index be44195783d4..c487c19256b9 100644 --- a/drivers/staging/cx25821/cx25821-core.c +++ b/drivers/staging/cx25821/cx25821-core.c @@ -1320,7 +1320,7 @@ void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf) BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb, 0, 0); - videobuf_dma_unmap(q, dma); + videobuf_dma_unmap(q->dev, dma); videobuf_dma_free(dma); btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc); buf->vb.state = VIDEOBUF_NEEDS_INIT; diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index a195f3b9c00a..80130100e450 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -87,6 +87,16 @@ struct videobuf_dma_sg_memory { struct videobuf_dmabuf dma; }; +/* + * Scatter-gather DMA buffer API. + * + * These functions provide a simple way to create a page list and a + * scatter-gather list from a kernel, userspace of physical address and map the + * memory for DMA operation. + * + * Despite the name, this is totally unrelated to videobuf, except that + * videobuf-dma-sg uses the same API internally. + */ void videobuf_dma_init(struct videobuf_dmabuf *dma); int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction, unsigned long data, unsigned long size); @@ -96,8 +106,8 @@ int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction, dma_addr_t addr, int nr_pages); int videobuf_dma_free(struct videobuf_dmabuf *dma); -int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma); -int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma); +int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma); +int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma); struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf); void *videobuf_sg_alloc(size_t size); @@ -111,11 +121,5 @@ void videobuf_queue_sg_init(struct videobuf_queue *q, unsigned int msize, void *priv); -/*FIXME: these variants are used only on *-alsa code, where videobuf is - * used without queue - */ -int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma); -int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma); - #endif /* _VIDEOBUF_DMA_SG_H */ -- cgit v1.2.3 From 7181772d8915e6025ee4f2f6c5b16064689646f0 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 11 May 2010 10:36:32 -0300 Subject: V4L/DVB: videobuf: Don't export videobuf_(vmalloc|pages)_to_sg Those functions are only called inside videobuf-dma-sg.c, make them static. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/videobuf-dma-sg.c | 18 ++++++++++++++---- include/media/videobuf-dma-sg.h | 17 ----------------- 2 files changed, 14 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 17b1f89e8133..8924e51408c1 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -57,7 +57,13 @@ MODULE_LICENSE("GPL"); /* --------------------------------------------------------------------- */ -struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) +/* + * Return a scatterlist for some page-aligned vmalloc()'ed memory + * block (NULL on errors). Memory for the scatterlist is allocated + * using kmalloc. The caller must free the memory. + */ +static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, + int nr_pages) { struct scatterlist *sglist; struct page *pg; @@ -81,10 +87,14 @@ err: vfree(sglist); return NULL; } -EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg); -struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages, - int offset) +/* + * Return a scatterlist for a an array of userpages (NULL on errors). + * Memory for the scatterlist is allocated using kmalloc. The caller + * must free the memory. + */ +static struct scatterlist *videobuf_pages_to_sg(struct page **pages, + int nr_pages, int offset) { struct scatterlist *sglist; int i; diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 80130100e450..913860e9c845 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -24,23 +24,6 @@ /* --------------------------------------------------------------------- */ -/* - * Return a scatterlist for some page-aligned vmalloc()'ed memory - * block (NULL on errors). Memory for the scatterlist is allocated - * using kmalloc. The caller must free the memory. - */ -struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages); - -/* - * Return a scatterlist for a an array of userpages (NULL on errors). - * Memory for the scatterlist is allocated using kmalloc. The caller - * must free the memory. - */ -struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages, - int offset); - -/* --------------------------------------------------------------------- */ - /* * A small set of helper functions to manage buffers (both userland * and kernel) for DMA. -- cgit v1.2.3 From 959794ddc05ab6fbcd458bc093e7f0b92633d052 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 11 May 2010 10:36:33 -0300 Subject: V4L/DVB: videobuf: Remove videobuf_mapping start and end fields The fields are assigned but never used, remove them. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/videobuf-dma-contig.c | 2 -- drivers/media/video/videobuf-dma-sg.c | 2 -- drivers/media/video/videobuf-vmalloc.c | 2 -- include/media/videobuf-core.h | 2 -- 4 files changed, 8 deletions(-) (limited to 'include') diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c index 98e292c35185..372b87efcd05 100644 --- a/drivers/media/video/videobuf-dma-contig.c +++ b/drivers/media/video/videobuf-dma-contig.c @@ -280,8 +280,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, return -ENOMEM; buf->map = map; - map->start = vma->vm_start; - map->end = vma->vm_end; map->q = q; buf->baddr = vma->vm_start; diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 8924e51408c1..2d64040594b8 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -608,8 +608,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, } map->count = 1; - map->start = vma->vm_start; - map->end = vma->vm_end; map->q = q; vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c index cf5be6bfd745..f0d7cb8d4c72 100644 --- a/drivers/media/video/videobuf-vmalloc.c +++ b/drivers/media/video/videobuf-vmalloc.c @@ -245,8 +245,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, return -ENOMEM; buf->map = map; - map->start = vma->vm_start; - map->end = vma->vm_end; map->q = q; buf->baddr = vma->vm_start; diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h index a157cd166e6e..f2c41cebf453 100644 --- a/include/media/videobuf-core.h +++ b/include/media/videobuf-core.h @@ -54,8 +54,6 @@ struct videobuf_queue; struct videobuf_mapping { unsigned int count; - unsigned long start; - unsigned long end; struct videobuf_queue *q; }; -- cgit v1.2.3 From bb6dbe74806a17bcec8396c57ca7fd9a889e3b27 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 11 May 2010 10:36:34 -0300 Subject: V4L/DVB: videobuf: Rename vmalloc fields to vaddr The videobuf_dmabuf and videobuf_vmalloc_memory fields have a vmalloc field to store the kernel virtual address of vmalloc'ed buffers. Rename the field to vaddr. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/cx88/cx88-alsa.c | 2 +- drivers/media/video/saa7134/saa7134-alsa.c | 2 +- drivers/media/video/videobuf-dma-sg.c | 18 +++++++++--------- drivers/media/video/videobuf-vmalloc.c | 30 +++++++++++++++--------------- drivers/staging/cx25821/cx25821-alsa.c | 2 +- include/media/videobuf-dma-sg.h | 2 +- include/media/videobuf-vmalloc.h | 2 +- 7 files changed, 29 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c index ba499d04de4d..9209d5b87e04 100644 --- a/drivers/media/video/cx88/cx88-alsa.c +++ b/drivers/media/video/cx88/cx88-alsa.c @@ -426,7 +426,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream, chip->buf = buf; chip->dma_risc = dma; - substream->runtime->dma_area = chip->dma_risc->vmalloc; + substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c index 5bca2abb31e6..68b7e8d10ded 100644 --- a/drivers/media/video/saa7134/saa7134-alsa.c +++ b/drivers/media/video/saa7134/saa7134-alsa.c @@ -669,7 +669,7 @@ static int snd_card_saa7134_hw_params(struct snd_pcm_substream * substream, byte, but it doesn't work. So I allocate the DMA using the V4L functions, and force ALSA to use that as the DMA area */ - substream->runtime->dma_area = dev->dmasound.dma.vmalloc; + substream->runtime->dma_area = dev->dmasound.dma.vaddr; substream->runtime->dma_bytes = dev->dmasound.bufsize; substream->runtime->dma_addr = 0; diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 2d64040594b8..06f9a9c2a39a 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -211,17 +211,17 @@ int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction, dprintk(1, "init kernel [%d pages]\n", nr_pages); dma->direction = direction; - dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT); - if (NULL == dma->vmalloc) { + dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT); + if (NULL == dma->vaddr) { dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); return -ENOMEM; } dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n", - (unsigned long)dma->vmalloc, + (unsigned long)dma->vaddr, nr_pages << PAGE_SHIFT); - memset(dma->vmalloc, 0, nr_pages << PAGE_SHIFT); + memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); dma->nr_pages = nr_pages; return 0; @@ -254,8 +254,8 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma) dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, dma->offset); } - if (dma->vmalloc) { - dma->sglist = videobuf_vmalloc_to_sg(dma->vmalloc, + if (dma->vaddr) { + dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, dma->nr_pages); } if (dma->bus_addr) { @@ -319,8 +319,8 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma) dma->pages = NULL; } - vfree(dma->vmalloc); - dma->vmalloc = NULL; + vfree(dma->vaddr); + dma->vaddr = NULL; if (dma->bus_addr) dma->bus_addr = 0; @@ -444,7 +444,7 @@ static void *__videobuf_to_vaddr(struct videobuf_buffer *buf) MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); - return mem->dma.vmalloc; + return mem->dma.vaddr; } static int __videobuf_iolock(struct videobuf_queue *q, diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c index f0d7cb8d4c72..e7fe31d54f07 100644 --- a/drivers/media/video/videobuf-vmalloc.c +++ b/drivers/media/video/videobuf-vmalloc.c @@ -102,10 +102,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma) called with IRQ's disabled */ dprintk(1, "%s: buf[%d] freeing (%p)\n", - __func__, i, mem->vmalloc); + __func__, i, mem->vaddr); - vfree(mem->vmalloc); - mem->vmalloc = NULL; + vfree(mem->vaddr); + mem->vaddr = NULL; } q->bufs[i]->map = NULL; @@ -170,7 +170,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, dprintk(1, "%s memory method MMAP\n", __func__); /* All handling should be done by __videobuf_mmap_mapper() */ - if (!mem->vmalloc) { + if (!mem->vaddr) { printk(KERN_ERR "memory is not alloced/mmapped.\n"); return -EINVAL; } @@ -189,13 +189,13 @@ static int __videobuf_iolock(struct videobuf_queue *q, * read() method. */ - mem->vmalloc = vmalloc_user(pages); - if (!mem->vmalloc) { + mem->vaddr = vmalloc_user(pages); + if (!mem->vaddr) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); return -ENOMEM; } dprintk(1, "vmalloc is at addr %p (%d pages)\n", - mem->vmalloc, pages); + mem->vaddr, pages); #if 0 int rc; @@ -254,18 +254,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); - mem->vmalloc = vmalloc_user(pages); - if (!mem->vmalloc) { + mem->vaddr = vmalloc_user(pages); + if (!mem->vaddr) { printk(KERN_ERR "vmalloc (%d pages) failed\n", pages); goto error; } - dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages); + dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); /* Try to remap memory */ - retval = remap_vmalloc_range(vma, mem->vmalloc, 0); + retval = remap_vmalloc_range(vma, mem->vaddr, 0); if (retval < 0) { printk(KERN_ERR "mmap: remap failed with error %d. ", retval); - vfree(mem->vmalloc); + vfree(mem->vaddr); goto error; } @@ -317,7 +317,7 @@ void *videobuf_to_vmalloc(struct videobuf_buffer *buf) BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); - return mem->vmalloc; + return mem->vaddr; } EXPORT_SYMBOL_GPL(videobuf_to_vmalloc); @@ -339,8 +339,8 @@ void videobuf_vmalloc_free(struct videobuf_buffer *buf) MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM); - vfree(mem->vmalloc); - mem->vmalloc = NULL; + vfree(mem->vaddr); + mem->vaddr = NULL; return; } diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c index 0771a6a313ad..a43b18816fa5 100644 --- a/drivers/staging/cx25821/cx25821-alsa.c +++ b/drivers/staging/cx25821/cx25821-alsa.c @@ -492,7 +492,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream, chip->buf = buf; chip->dma_risc = dma; - substream->runtime->dma_area = chip->dma_risc->vmalloc; + substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 913860e9c845..97e07f46a0fa 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -51,7 +51,7 @@ struct videobuf_dmabuf { struct page **pages; /* for kernel buffers */ - void *vmalloc; + void *vaddr; /* for overlay buffers (pci-pci dma) */ dma_addr_t bus_addr; diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h index 851eb1a2ff2a..e19403c18dae 100644 --- a/include/media/videobuf-vmalloc.h +++ b/include/media/videobuf-vmalloc.h @@ -22,7 +22,7 @@ struct videobuf_vmalloc_memory { u32 magic; - void *vmalloc; + void *vaddr; /* remap_vmalloc_range seems to need to run * after mmap() on some cases */ -- cgit v1.2.3 From 1b4e21c4f62eae6bdcb3e7bfdfc52171a24f3689 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 17 Jun 2010 11:11:51 -0300 Subject: V4L/DVB: uvcvideo: Define control information bits using macros Use the macros instead of hardcoding numerical constants for the controls information bitfield. Signed-off-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/uvc/uvc_ctrl.c | 12 ++++++------ include/linux/usb/video.h | 7 +++++++ 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c index bd72100a21dd..fa06cf512ecf 100644 --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c @@ -1324,9 +1324,8 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev, /* Check if the device control information and length match * the user supplied information. */ - __u32 flags; __le16 size; - __u8 inf; + __u8 _info; ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum, info->selector, (__u8 *)&size, 2); @@ -1345,7 +1344,7 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev, } ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, - dev->intfnum, info->selector, &inf, 1); + dev->intfnum, info->selector, &_info, 1); if (ret < 0) { uvc_trace(UVC_TRACE_CONTROL, "GET_INFO failed on control %pUl/%u (%d).\n", @@ -1353,9 +1352,10 @@ static void uvc_ctrl_add_ctrl(struct uvc_device *dev, return; } - flags = info->flags; - if (((flags & UVC_CONTROL_GET_CUR) && !(inf & (1 << 0))) || - ((flags & UVC_CONTROL_SET_CUR) && !(inf & (1 << 1)))) { + if (((info->flags & UVC_CONTROL_GET_CUR) && + !(_info & UVC_CONTROL_CAP_GET)) || + ((info->flags & UVC_CONTROL_SET_CUR) && + !(_info & UVC_CONTROL_CAP_SET))) { uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u flags " "don't match supported operations.\n", info->entity, info->selector); diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h index be436d9ee479..2d5b7fc6a265 100644 --- a/include/linux/usb/video.h +++ b/include/linux/usb/video.h @@ -160,5 +160,12 @@ #define UVC_STATUS_TYPE_CONTROL 1 #define UVC_STATUS_TYPE_STREAMING 2 +/* 4.1.2. Control Capabilities */ +#define UVC_CONTROL_CAP_GET (1 << 0) +#define UVC_CONTROL_CAP_SET (1 << 1) +#define UVC_CONTROL_CAP_DISABLED (1 << 2) +#define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) +#define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) + #endif /* __LINUX_USB_VIDEO_H */ -- cgit v1.2.3 From 7751bdb3a095ad32dd4fcff3443cf8dd4cb1e748 Mon Sep 17 00:00:00 2001 From: Sripathi Kodi Date: Fri, 4 Jun 2010 13:41:26 +0000 Subject: 9p: readdir implementation for 9p2000.L This patch implements the kernel part of readdir() implementation for 9p2000.L Change from V3: Instead of inode, server now sends qids for each dirent SYNOPSIS size[4] Treaddir tag[2] fid[4] offset[8] count[4] size[4] Rreaddir tag[2] count[4] data[count] DESCRIPTION The readdir request asks the server to read the directory specified by 'fid' at an offset specified by 'offset' and return as many dirent structures as possible that fit into count bytes. Each dirent structure is laid out as follows. qid.type[1] the type of the file (directory, etc.), represented as a bit vector corresponding to the high 8 bits of the file's mode word. qid.vers[4] version number for given path qid.path[8] the file server's unique identification for the file offset[8] offset into the next dirent. type[1] type of this directory entry. name[256] name of this directory entry. This patch adds v9fs_dir_readdir_dotl() as the readdir() call for 9p2000.L. This function sends P9_TREADDIR command to the server. In response the server sends a buffer filled with dirent structures. This is different from the existing v9fs_dir_readdir() call which receives stat structures from the server. This results in significant speedup of readdir() on large directories. For example, doing 'ls >/dev/null' on a directory with 10000 files on my laptop takes 1.088 seconds with the existing code, but only takes 0.339 seconds with the new readdir. Signed-off-by: Sripathi Kodi Reviewed-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_dir.c | 134 ++++++++++++++++++++++++++++++++++++++++++------ include/net/9p/9p.h | 17 ++++++ include/net/9p/client.h | 18 +++++++ net/9p/client.c | 47 +++++++++++++++++ net/9p/protocol.c | 27 ++++++++++ 5 files changed, 227 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 36d961f342af..16c8a2a98c1b 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -87,29 +87,19 @@ static void p9stat_init(struct p9_wstat *stbuf) } /** - * v9fs_dir_readdir - read a directory + * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir * @filp: opened file structure - * @dirent: directory structure ??? - * @filldir: function to populate directory structure ??? + * @buflen: Length in bytes of buffer to allocate * */ -static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int v9fs_alloc_rdir_buf(struct file *filp, int buflen) { - int over; - struct p9_wstat st; - int err = 0; - struct p9_fid *fid; - int buflen; - int reclen = 0; struct p9_rdir *rdir; + struct p9_fid *fid; + int err = 0; - P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); fid = filp->private_data; - - buflen = fid->clnt->msize - P9_IOHDRSZ; - - /* allocate rdir on demand */ if (!fid->rdir) { rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); @@ -128,6 +118,36 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) spin_unlock(&filp->f_dentry->d_lock); kfree(rdir); } +exit: + return err; +} + +/** + * v9fs_dir_readdir - read a directory + * @filp: opened file structure + * @dirent: directory structure ??? + * @filldir: function to populate directory structure ??? + * + */ + +static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + int over; + struct p9_wstat st; + int err = 0; + struct p9_fid *fid; + int buflen; + int reclen = 0; + struct p9_rdir *rdir; + + P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; + + buflen = fid->clnt->msize - P9_IOHDRSZ; + + err = v9fs_alloc_rdir_buf(filp, buflen); + if (err) + goto exit; rdir = (struct p9_rdir *) fid->rdir; err = mutex_lock_interruptible(&rdir->mutex); @@ -176,6 +196,88 @@ exit: return err; } +/** + * v9fs_dir_readdir_dotl - read a directory + * @filp: opened file structure + * @dirent: buffer to fill dirent structures + * @filldir: function to populate dirent structures + * + */ +static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, + filldir_t filldir) +{ + int over; + int err = 0; + struct p9_fid *fid; + int buflen; + struct p9_rdir *rdir; + struct p9_dirent curdirent; + u64 oldoffset = 0; + + P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; + + buflen = fid->clnt->msize - P9_READDIRHDRSZ; + + err = v9fs_alloc_rdir_buf(filp, buflen); + if (err) + goto exit; + rdir = (struct p9_rdir *) fid->rdir; + + err = mutex_lock_interruptible(&rdir->mutex); + if (err) + return err; + + while (err == 0) { + if (rdir->tail == rdir->head) { + err = p9_client_readdir(fid, rdir->buf, buflen, + filp->f_pos); + if (err <= 0) + goto unlock_and_exit; + + rdir->head = 0; + rdir->tail = err; + } + + while (rdir->head < rdir->tail) { + + err = p9dirent_read(rdir->buf + rdir->head, + buflen - rdir->head, &curdirent, + fid->clnt->proto_version); + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); + err = -EIO; + goto unlock_and_exit; + } + + /* d_off in dirent structure tracks the offset into + * the next dirent in the dir. However, filldir() + * expects offset into the current dirent. Hence + * while calling filldir send the offset from the + * previous dirent structure. + */ + over = filldir(dirent, curdirent.d_name, + strlen(curdirent.d_name), + oldoffset, v9fs_qid2ino(&curdirent.qid), + curdirent.d_type); + oldoffset = curdirent.d_off; + + if (over) { + err = 0; + goto unlock_and_exit; + } + + filp->f_pos = curdirent.d_off; + rdir->head += err; + } + } + +unlock_and_exit: + mutex_unlock(&rdir->mutex); +exit: + return err; +} + /** * v9fs_dir_release - close a directory @@ -207,7 +309,7 @@ const struct file_operations v9fs_dir_operations = { const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, - .readdir = v9fs_dir_readdir, + .readdir = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, }; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 156c26bb8bd7..f1b0b310265d 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -133,6 +133,8 @@ enum p9_msg_t { P9_RSTATFS, P9_TRENAME = 20, P9_RRENAME, + P9_TREADDIR = 40, + P9_RREADDIR, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, @@ -275,6 +277,9 @@ enum p9_qid_t { /* ample room for Twrite/Rread header */ #define P9_IOHDRSZ 24 +/* Room for readdir header */ +#define P9_READDIRHDRSZ 24 + /** * struct p9_str - length prefixed string type * @len: length of the string @@ -485,6 +490,18 @@ struct p9_rwrite { u32 count; }; +struct p9_treaddir { + u32 fid; + u64 offset; + u32 count; +}; + +struct p9_rreaddir { + u32 count; + u8 *data; +}; + + struct p9_tclunk { u32 fid; }; diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 7dd3ed85c782..2ec93685e6db 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -195,6 +195,21 @@ struct p9_fid { struct list_head dlist; /* list of all fids attached to a dentry */ }; +/** + * struct p9_dirent - directory entry structure + * @qid: The p9 server qid for this dirent + * @d_off: offset to the next dirent + * @d_type: type of file + * @d_name: file name + */ + +struct p9_dirent { + struct p9_qid qid; + u64 d_off; + unsigned char d_type; + char d_name[256]; +}; + int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name); int p9_client_version(struct p9_client *); @@ -217,6 +232,9 @@ int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, u32 count); int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, u64 offset, u32 count); +int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); +int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, + int proto_version); struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); diff --git a/net/9p/client.c b/net/9p/client.c index 37c8da07a80b..a80357483a47 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1432,3 +1432,50 @@ error: } EXPORT_SYMBOL(p9_client_rename); +int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) +{ + int err, rsize, total; + struct p9_client *clnt; + struct p9_req_t *req; + char *dataptr; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", + fid->fid, (long long unsigned) offset, count); + + err = 0; + clnt = fid->clnt; + total = 0; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) + rsize = clnt->msize - P9_READDIRHDRSZ; + + if (count < rsize) + rsize = count; + + req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, offset, rsize); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); + if (err) { + p9pdu_dump(1, req->rc); + goto free_and_error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + + if (data) + memmove(data, dataptr, count); + + p9_free_req(clnt, req); + return count; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_readdir); diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 149f82160130..b645c8263538 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -580,3 +580,30 @@ void p9pdu_reset(struct p9_fcall *pdu) pdu->offset = 0; pdu->size = 0; } + +int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, + int proto_version) +{ + struct p9_fcall fake_pdu; + int ret; + char *nameptr; + + fake_pdu.size = len; + fake_pdu.capacity = len; + fake_pdu.sdata = buf; + fake_pdu.offset = 0; + + ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid, + &dirent->d_off, &dirent->d_type, &nameptr); + if (ret) { + P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); + p9pdu_dump(1, &fake_pdu); + goto out; + } + + strcpy(dirent->d_name, nameptr); + +out: + return fake_pdu.offset; +} +EXPORT_SYMBOL(p9dirent_read); -- cgit v1.2.3 From f085312204f384a0277a66c3c48ba8f9edcd58f2 Mon Sep 17 00:00:00 2001 From: Sripathi Kodi Date: Mon, 12 Jul 2010 20:07:23 +0530 Subject: 9p: getattr client implementation for 9P2000.L protocol. SYNOPSIS size[4] Tgetattr tag[2] fid[4] request_mask[8] size[4] Rgetattr tag[2] lstat[n] DESCRIPTION The getattr transaction inquires about the file identified by fid. request_mask is a bit mask that specifies which fields of the stat structure is the client interested in. The reply will contain a machine-independent directory entry, laid out as follows: st_result_mask[8] Bit mask that indicates which fields in the stat structure have been populated by the server qid.type[1] the type of the file (directory, etc.), represented as a bit vector corresponding to the high 8 bits of the file's mode word. qid.vers[4] version number for given path qid.path[8] the file server's unique identification for the file st_mode[4] Permission and flags st_uid[4] User id of owner st_gid[4] Group ID of owner st_nlink[8] Number of hard links st_rdev[8] Device ID (if special file) st_size[8] Size, in bytes st_blksize[8] Block size for file system IO st_blocks[8] Number of file system blocks allocated st_atime_sec[8] Time of last access, seconds st_atime_nsec[8] Time of last access, nanoseconds st_mtime_sec[8] Time of last modification, seconds st_mtime_nsec[8] Time of last modification, nanoseconds st_ctime_sec[8] Time of last status change, seconds st_ctime_nsec[8] Time of last status change, nanoseconds st_btime_sec[8] Time of creation (birth) of file, seconds st_btime_nsec[8] Time of creation (birth) of file, nanoseconds st_gen[8] Inode generation st_data_version[8] Data version number request_mask and result_mask bit masks contain the following bits #define P9_STATS_MODE 0x00000001ULL #define P9_STATS_NLINK 0x00000002ULL #define P9_STATS_UID 0x00000004ULL #define P9_STATS_GID 0x00000008ULL #define P9_STATS_RDEV 0x00000010ULL #define P9_STATS_ATIME 0x00000020ULL #define P9_STATS_MTIME 0x00000040ULL #define P9_STATS_CTIME 0x00000080ULL #define P9_STATS_INO 0x00000100ULL #define P9_STATS_SIZE 0x00000200ULL #define P9_STATS_BLOCKS 0x00000400ULL #define P9_STATS_BTIME 0x00000800ULL #define P9_STATS_GEN 0x00001000ULL #define P9_STATS_DATA_VERSION 0x00002000ULL #define P9_STATS_BASIC 0x000007ffULL #define P9_STATS_ALL 0x00003fffULL This patch implements the client side of getattr implementation for 9P2000.L. It introduces a new structure p9_stat_dotl for getting Linux stat information along with QID. The data layout is similar to stat structure in Linux user space with the following major differences: inode (st_ino) is not part of data. Instead qid is. device (st_dev) is not part of data because this doesn't make sense on the client. All time variables are 64 bit wide on the wire. The kernel seems to use 32 bit variables for these variables. However, some of the architectures have used 64 bit variables and glibc exposes 64 bit variables to user space on some architectures. Hence to be on the safer side we have made these 64 bit in the protocol. Refer to the comments in include/asm-generic/stat.h There are some additional fields: st_btime_sec, st_btime_nsec, st_gen, st_data_version apart from the bitmask, st_result_mask. The bit mask is filled by the server to indicate which stat fields have been populated by the server. Currently there is no clean way for the server to obtain these additional fields, so it sends back just the basic fields. Signed-off-by: Sripathi Kodi Signed-off-by: Eric Van Hensbegren --- fs/9p/v9fs_vfs.h | 1 + fs/9p/vfs_inode.c | 177 +++++++++++++++++++++++++++++++++++++++++++----- fs/9p/vfs_super.c | 43 +++++++----- include/net/9p/9p.h | 44 ++++++++++++ include/net/9p/client.h | 3 + net/9p/client.c | 59 ++++++++++++++++ net/9p/protocol.c | 28 ++++++++ 7 files changed, 321 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 32ef4009d030..f47c6bbb01b3 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -55,6 +55,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode); void v9fs_clear_inode(struct inode *inode); ino_t v9fs_qid2ino(struct p9_qid *qid); void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); +void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); int v9fs_dir_release(struct inode *inode, struct file *filp); int v9fs_file_open(struct inode *inode, struct file *file); void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 4331b3b5ee1c..afcb8d889382 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -396,23 +396,14 @@ void v9fs_clear_inode(struct inode *inode) #endif } -/** - * v9fs_inode_from_fid - populate an inode by issuing a attribute request - * @v9ses: session information - * @fid: fid to issue attribute request for - * @sb: superblock on which to create inode - * - */ - static struct inode * -v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, +v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { int err, umode; - struct inode *ret; + struct inode *ret = NULL; struct p9_wstat *st; - ret = NULL; st = p9_client_stat(fid); if (IS_ERR(st)) return ERR_CAST(st); @@ -433,15 +424,62 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, #endif p9stat_free(st); kfree(st); - return ret; - error: p9stat_free(st); kfree(st); return ERR_PTR(err); } +static struct inode * +v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + struct inode *ret = NULL; + int err; + struct p9_stat_dotl *st; + + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) + return ERR_CAST(st); + + ret = v9fs_get_inode(sb, st->st_mode); + if (IS_ERR(ret)) { + err = PTR_ERR(ret); + goto error; + } + + v9fs_stat2inode_dotl(st, ret); + ret->i_ino = v9fs_qid2ino(&st->qid); +#ifdef CONFIG_9P_FSCACHE + v9fs_vcookie_set_qid(ret, &st->qid); + v9fs_cache_inode_get_cookie(ret); +#endif + kfree(st); + return ret; +error: + kfree(st); + return ERR_PTR(err); +} + +/** + * v9fs_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_dotl(v9ses, fid, sb); + else + return v9fs_inode(v9ses, fid, sb); +} + /** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted @@ -853,6 +891,42 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, return 0; } +static int +v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_stat_dotl *st; + + P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); + err = -EPERM; + v9ses = v9fs_inode2v9ses(dentry->d_inode); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + return simple_getattr(mnt, dentry, stat); + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Ask for all the fields in stat structure. Server will return + * whatever it supports + */ + + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode_dotl(st, dentry->d_inode); + generic_fillattr(dentry->d_inode, stat); + /* Change block size to what the server returned */ + stat->blksize = st->st_blksize; + + kfree(st); + return 0; +} + /** * v9fs_vfs_setattr - set file metadata * @dentry: file whose metadata to set @@ -979,6 +1053,77 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; } +/** + * v9fs_stat2inode_dotl - populate an inode structure with stat info + * @stat: stat structure + * @inode: inode to populate + * @sb: superblock of filesystem + * + */ + +void +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) +{ + + if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + inode->i_uid = stat->st_uid; + inode->i_gid = stat->st_gid; + inode->i_nlink = stat->st_nlink; + inode->i_mode = stat->st_mode; + inode->i_rdev = new_decode_dev(stat->st_rdev); + + if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, inode->i_rdev); + + i_size_write(inode, stat->st_size); + inode->i_blocks = stat->st_blocks; + } else { + if (stat->st_result_mask & P9_STATS_ATIME) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + } + if (stat->st_result_mask & P9_STATS_MTIME) { + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + } + if (stat->st_result_mask & P9_STATS_CTIME) { + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + } + if (stat->st_result_mask & P9_STATS_UID) + inode->i_uid = stat->st_uid; + if (stat->st_result_mask & P9_STATS_GID) + inode->i_gid = stat->st_gid; + if (stat->st_result_mask & P9_STATS_NLINK) + inode->i_nlink = stat->st_nlink; + if (stat->st_result_mask & P9_STATS_MODE) { + inode->i_mode = stat->st_mode; + if ((S_ISBLK(inode->i_mode)) || + (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, + inode->i_rdev); + } + if (stat->st_result_mask & P9_STATS_RDEV) + inode->i_rdev = new_decode_dev(stat->st_rdev); + if (stat->st_result_mask & P9_STATS_SIZE) + i_size_write(inode, stat->st_size); + if (stat->st_result_mask & P9_STATS_BLOCKS) + inode->i_blocks = stat->st_blocks; + } + if (stat->st_result_mask & P9_STATS_GEN) + inode->i_generation = stat->st_gen; + + /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION + * because the inode structure does not have fields for them. + */ +} + /** * v9fs_qid2ino - convert qid into inode number * @qid: qid to hash @@ -1254,7 +1399,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotl = { .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, - .getattr = v9fs_vfs_getattr, + .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr, }; @@ -1276,7 +1421,7 @@ static const struct inode_operations v9fs_file_inode_operations = { }; static const struct inode_operations v9fs_file_inode_operations_dotl = { - .getattr = v9fs_vfs_getattr, + .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr, }; @@ -1292,6 +1437,6 @@ static const struct inode_operations v9fs_symlink_inode_operations_dotl = { .readlink = generic_readlink, .follow_link = v9fs_vfs_follow_link, .put_link = v9fs_vfs_put_link, - .getattr = v9fs_vfs_getattr, + .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr, }; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index be74d020436e..3623f692b448 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -107,7 +107,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, struct inode *inode = NULL; struct dentry *root = NULL; struct v9fs_session_info *v9ses = NULL; - struct p9_wstat *st = NULL; int mode = S_IRWXUGO | S_ISVTX; struct p9_fid *fid; int retval = 0; @@ -124,16 +123,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, goto close_session; } - st = p9_client_stat(fid); - if (IS_ERR(st)) { - retval = PTR_ERR(st); - goto clunk_fid; - } - sb = sget(fs_type, NULL, v9fs_set_super, v9ses); if (IS_ERR(sb)) { retval = PTR_ERR(sb); - goto free_stat; + goto clunk_fid; } v9fs_fill_super(sb, v9ses, flags, data); @@ -151,22 +144,38 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, } sb->s_root = root; - root->d_inode->i_ino = v9fs_qid2ino(&st->qid); - v9fs_stat2inode(st, root->d_inode, sb); + if (v9fs_proto_dotl(v9ses)) { + struct p9_stat_dotl *st = NULL; + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto clunk_fid; + } + + v9fs_stat2inode_dotl(st, root->d_inode); + kfree(st); + } else { + struct p9_wstat *st = NULL; + st = p9_client_stat(fid); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto clunk_fid; + } + + root->d_inode->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode(st, root->d_inode, sb); + + p9stat_free(st); + kfree(st); + } v9fs_fid_add(root, fid); - p9stat_free(st); - kfree(st); P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); simple_set_mnt(mnt, sb); return 0; -free_stat: - p9stat_free(st); - kfree(st); - clunk_fid: p9_client_clunk(fid); @@ -176,8 +185,6 @@ close_session: return retval; release_sb: - p9stat_free(st); - kfree(st); deactivate_locked_super(sb); return retval; } diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index f1b0b310265d..ab12e1c9cc7e 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -133,6 +133,8 @@ enum p9_msg_t { P9_RSTATFS, P9_TRENAME = 20, P9_RRENAME, + P9_TGETATTR = 24, + P9_RGETATTR, P9_TREADDIR = 40, P9_RREADDIR, P9_TVERSION = 100, @@ -362,6 +364,48 @@ struct p9_wstat { u32 n_muid; /* 9p2000.u extensions */ }; +struct p9_stat_dotl { + u64 st_result_mask; + struct p9_qid qid; + u32 st_mode; + u32 st_uid; + u32 st_gid; + u64 st_nlink; + u64 st_rdev; + u64 st_size; + u64 st_blksize; + u64 st_blocks; + u64 st_atime_sec; + u64 st_atime_nsec; + u64 st_mtime_sec; + u64 st_mtime_nsec; + u64 st_ctime_sec; + u64 st_ctime_nsec; + u64 st_btime_sec; + u64 st_btime_nsec; + u64 st_gen; + u64 st_data_version; +}; + +#define P9_STATS_MODE 0x00000001ULL +#define P9_STATS_NLINK 0x00000002ULL +#define P9_STATS_UID 0x00000004ULL +#define P9_STATS_GID 0x00000008ULL +#define P9_STATS_RDEV 0x00000010ULL +#define P9_STATS_ATIME 0x00000020ULL +#define P9_STATS_MTIME 0x00000040ULL +#define P9_STATS_CTIME 0x00000080ULL +#define P9_STATS_INO 0x00000100ULL +#define P9_STATS_SIZE 0x00000200ULL +#define P9_STATS_BLOCKS 0x00000400ULL + +#define P9_STATS_BTIME 0x00000800ULL +#define P9_STATS_GEN 0x00001000ULL +#define P9_STATS_DATA_VERSION 0x00002000ULL + +#define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ +#define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ + /* Structures for Protocol Operations */ struct p9_tstatfs { u32 fid; diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 2ec93685e6db..6462eec435bc 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -238,6 +238,9 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); +struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, + u64 request_mask); + struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); void p9_client_cb(struct p9_client *c, struct p9_req_t *req); diff --git a/net/9p/client.c b/net/9p/client.c index 4ff068e98f76..5e97118da3bf 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1303,6 +1303,65 @@ error: } EXPORT_SYMBOL(p9_client_stat); +struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, + u64 request_mask) +{ + int err; + struct p9_client *clnt; + struct p9_stat_dotl *ret = kmalloc(sizeof(struct p9_stat_dotl), + GFP_KERNEL); + struct p9_req_t *req; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n", + fid->fid, request_mask); + + if (!ret) + return ERR_PTR(-ENOMEM); + + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); + if (err) { + p9pdu_dump(1, req->rc); + p9_free_req(clnt, req); + goto error; + } + + P9_DPRINTK(P9_DEBUG_9P, + "<<< RGETATTR st_result_mask=%lld\n" + "<<< qid=%x.%llx.%x\n" + "<<< st_mode=%8.8x st_nlink=%llu\n" + "<<< st_uid=%d st_gid=%d\n" + "<<< st_rdev=%llx st_size=%llx st_blksize=%llu st_blocks=%llu\n" + "<<< st_atime_sec=%lld st_atime_nsec=%lld\n" + "<<< st_mtime_sec=%lld st_mtime_nsec=%lld\n" + "<<< st_ctime_sec=%lld st_ctime_nsec=%lld\n" + "<<< st_btime_sec=%lld st_btime_nsec=%lld\n" + "<<< st_gen=%lld st_data_version=%lld", + ret->st_result_mask, ret->qid.type, ret->qid.path, + ret->qid.version, ret->st_mode, ret->st_nlink, ret->st_uid, + ret->st_gid, ret->st_rdev, ret->st_size, ret->st_blksize, + ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec, + ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec, + ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec, + ret->st_gen, ret->st_data_version); + + p9_free_req(clnt, req); + return ret; + +error: + kfree(ret); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_getattr_dotl); + static int p9_client_statsize(struct p9_wstat *wst, int proto_version) { int ret; diff --git a/net/9p/protocol.c b/net/9p/protocol.c index b645c8263538..3e4f77695891 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -141,6 +141,7 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) D - data blob (int32_t size followed by void *, results are not freed) T - array of strings (int16_t count, followed by strings) R - array of qids (int16_t count, followed by qids) + A - stat for 9p2000.L (p9_stat_dotl) ? - if optional = 1, continue parsing */ @@ -340,6 +341,33 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, } } break; + case 'A': { + struct p9_stat_dotl *stbuf = + va_arg(ap, struct p9_stat_dotl *); + + memset(stbuf, 0, sizeof(struct p9_stat_dotl)); + errcode = + p9pdu_readf(pdu, proto_version, + "qQdddqqqqqqqqqqqqqqq", + &stbuf->st_result_mask, + &stbuf->qid, + &stbuf->st_mode, + &stbuf->st_uid, &stbuf->st_gid, + &stbuf->st_nlink, + &stbuf->st_rdev, &stbuf->st_size, + &stbuf->st_blksize, &stbuf->st_blocks, + &stbuf->st_atime_sec, + &stbuf->st_atime_nsec, + &stbuf->st_mtime_sec, + &stbuf->st_mtime_nsec, + &stbuf->st_ctime_sec, + &stbuf->st_ctime_nsec, + &stbuf->st_btime_sec, + &stbuf->st_btime_nsec, + &stbuf->st_gen, + &stbuf->st_data_version); + } + break; case '?': if ((proto_version != p9_proto_2000u) && (proto_version != p9_proto_2000L)) -- cgit v1.2.3 From 87d7845aa0b157a62448dd3e339856f28befe1f4 Mon Sep 17 00:00:00 2001 From: Sripathi Kodi Date: Fri, 18 Jun 2010 11:50:10 +0530 Subject: 9p: Implement client side of setattr for 9P2000.L protocol. SYNOPSIS size[4] Tsetattr tag[2] attr[n] size[4] Rsetattr tag[2] DESCRIPTION The setattr command changes some of the file status information. attr resembles the iattr structure used in Linux kernel. It specifies which status parameter is to be changed and to what value. It is laid out as follows: valid[4] specifies which status information is to be changed. Possible values are: ATTR_MODE (1 << 0) ATTR_UID (1 << 1) ATTR_GID (1 << 2) ATTR_SIZE (1 << 3) ATTR_ATIME (1 << 4) ATTR_MTIME (1 << 5) ATTR_ATIME_SET (1 << 7) ATTR_MTIME_SET (1 << 8) The last two bits represent whether the time information is being sent by the client's user space. In the absense of these bits the server always uses server's time. mode[4] File permission bits uid[4] Owner id of file gid[4] Group id of the file size[8] File size atime_sec[8] Time of last file access, seconds atime_nsec[8] Time of last file access, nanoseconds mtime_sec[8] Time of last file modification, seconds mtime_nsec[8] Time of last file modification, nanoseconds Explanation of the patches: -------------------------- *) The kernel just copies relevent contents of iattr structure to p9_iattr_dotl structure and passes it down to the client. The only check it has is calling inode_change_ok() *) The p9_iattr_dotl structure does not have ctime and ia_file parameters because I don't think these are needed in our case. The client user space can request updating just ctime by calling chown(fd, -1, -1). This is handled on server side without a need for putting ctime on the wire. *) The server currently supports changing mode, time, ownership and size of the file. *) 9P RFC says "Either all the changes in wstat request happen, or none of them does: if the request succeeds, all changes were made; if it fails, none were." I have not done anything to implement this specifically because I don't see a reason. Signed-off-by: Sripathi Kodi Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++--- include/net/9p/9p.h | 28 ++++++++++++++++++++++++++++ include/net/9p/client.h | 1 + net/9p/client.c | 30 ++++++++++++++++++++++++++++++ net/9p/protocol.c | 17 +++++++++++++++++ 5 files changed, 122 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index afcb8d889382..a90324f4546a 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -976,6 +976,49 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) return retval; } +/** + * v9fs_vfs_setattr_dotl - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +static int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_iattr_dotl p9attr; + + P9_DPRINTK(P9_DEBUG_VFS, "\n"); + + retval = inode_change_ok(dentry->d_inode, iattr); + if (retval) + return retval; + + p9attr.valid = iattr->ia_valid; + p9attr.mode = iattr->ia_mode; + p9attr.uid = iattr->ia_uid; + p9attr.gid = iattr->ia_gid; + p9attr.size = iattr->ia_size; + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + + retval = -EPERM; + v9ses = v9fs_inode2v9ses(dentry->d_inode); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + retval = p9_client_setattr(fid, &p9attr); + if (retval >= 0) + retval = inode_setattr(dentry->d_inode, iattr); + + return retval; +} + /** * v9fs_stat2inode - populate an inode structure with mistat info * @stat: Plan 9 metadata (mistat) structure @@ -1400,7 +1443,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotl = { .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr, + .setattr = v9fs_vfs_setattr_dotl, }; static const struct inode_operations v9fs_dir_inode_operations = { @@ -1422,7 +1465,7 @@ static const struct inode_operations v9fs_file_inode_operations = { static const struct inode_operations v9fs_file_inode_operations_dotl = { .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr, + .setattr = v9fs_vfs_setattr_dotl, }; static const struct inode_operations v9fs_symlink_inode_operations = { @@ -1438,5 +1481,5 @@ static const struct inode_operations v9fs_symlink_inode_operations_dotl = { .follow_link = v9fs_vfs_follow_link, .put_link = v9fs_vfs_put_link, .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr, + .setattr = v9fs_vfs_setattr_dotl, }; diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index ab12e1c9cc7e..7f64d72f6c61 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -135,6 +135,8 @@ enum p9_msg_t { P9_RRENAME, P9_TGETATTR = 24, P9_RGETATTR, + P9_TSETATTR = 26, + P9_RSETATTR, P9_TREADDIR = 40, P9_RREADDIR, P9_TVERSION = 100, @@ -406,6 +408,32 @@ struct p9_stat_dotl { #define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ #define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ +/** + * struct p9_iattr_dotl - P9 inode attribute for setattr + * @valid: bitfield specifying which fields are valid + * same as in struct iattr + * @mode: File permission bits + * @uid: user id of owner + * @gid: group id + * @size: File size + * @atime_sec: Last access time, seconds + * @atime_nsec: Last access time, nanoseconds + * @mtime_sec: Last modification time, seconds + * @mtime_nsec: Last modification time, nanoseconds + */ + +struct p9_iattr_dotl { + u32 valid; + u32 mode; + u32 uid; + u32 gid; + u64 size; + u64 atime_sec; + u64 atime_nsec; + u64 mtime_sec; + u64 mtime_nsec; +}; + /* Structures for Protocol Operations */ struct p9_tstatfs { u32 fid; diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 6462eec435bc..afdc385152f6 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -237,6 +237,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent, int proto_version); struct p9_wstat *p9_client_stat(struct p9_fid *fid); int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst); +int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); diff --git a/net/9p/client.c b/net/9p/client.c index 5e97118da3bf..b2f70ec889c2 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1426,6 +1426,36 @@ error: } EXPORT_SYMBOL(p9_client_wstat); +int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + P9_DPRINTK(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid); + P9_DPRINTK(P9_DEBUG_9P, + " valid=%x mode=%x uid=%d gid=%d size=%lld\n" + " atime_sec=%lld atime_nsec=%lld\n" + " mtime_sec=%lld mtime_nsec=%lld\n", + p9attr->valid, p9attr->mode, p9attr->uid, p9attr->gid, + p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, + p9attr->mtime_sec, p9attr->mtime_nsec); + + req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr); + + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_setattr); + int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) { int err; diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 3e4f77695891..3acd3afb20c8 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -516,6 +516,23 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, } } break; + case 'I':{ + struct p9_iattr_dotl *p9attr = va_arg(ap, + struct p9_iattr_dotl *); + + errcode = p9pdu_writef(pdu, proto_version, + "ddddqqqqq", + p9attr->valid, + p9attr->mode, + p9attr->uid, + p9attr->gid, + p9attr->size, + p9attr->atime_sec, + p9attr->atime_nsec, + p9attr->mtime_sec, + p9attr->mtime_nsec); + } + break; case '?': if ((proto_version != p9_proto_2000u) && (proto_version != p9_proto_2000L)) -- cgit v1.2.3 From 652df9a7fd03cb47a3f663f0c08a2bd086505e9b Mon Sep 17 00:00:00 2001 From: "Venkateswararao Jujjuri (JV)" Date: Thu, 3 Jun 2010 15:16:59 -0700 Subject: 9p: Define and implement TLINK for 9P2000.L This patch adds a helper function to get the dentry from inode and uses it in creating a Hardlink SYNOPSIS size[4] Tlink tag[2] dfid[4] oldfid[4] newpath[s] size[4] Rlink tag[2] DESCRIPTION Create a link 'newpath' in directory pointed by dfid linking to oldfid path. [sripathik@in.ibm.com : p9_client_link should not free req structure if p9_client_rpc has returned an error.] Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- include/net/9p/9p.h | 2 ++ include/net/9p/client.h | 1 + net/9p/client.c | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+) (limited to 'include') diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 7f64d72f6c61..5985c0f83db3 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -139,6 +139,8 @@ enum p9_msg_t { P9_RSETATTR, P9_TREADDIR = 40, P9_RREADDIR, + P9_TLINK = 70, + P9_RLINK, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index afdc385152f6..e36f11650e99 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -226,6 +226,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, char *extension); +int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); int p9_client_clunk(struct p9_fid *fid); int p9_client_remove(struct p9_fid *fid); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, diff --git a/net/9p/client.c b/net/9p/client.c index b2f70ec889c2..ad1c4489ab4d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1095,6 +1095,25 @@ error: } EXPORT_SYMBOL(p9_client_fcreate); +int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) +{ + struct p9_client *clnt; + struct p9_req_t *req; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n", + dfid->fid, oldfid->fid, newname); + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid, + newname); + if (IS_ERR(req)) + return PTR_ERR(req); + + P9_DPRINTK(P9_DEBUG_9P, "<<< RLINK\n"); + p9_free_req(clnt, req); + return 0; +} +EXPORT_SYMBOL(p9_client_link); + int p9_client_clunk(struct p9_fid *fid) { int err; -- cgit v1.2.3 From 50cc42ff3d7bc48a436c5a0413459ca7841b505f Mon Sep 17 00:00:00 2001 From: "Venkateswararao Jujjuri (JV)" Date: Wed, 9 Jun 2010 15:59:31 -0700 Subject: 9p: Define and implement TSYMLINK for 9P2000.L Create a symbolic link SYNOPSIS size[4] Tsymlink tag[2] fid[4] name[s] symtgt[s] gid[4] size[4] Rsymlink tag[2] qid[13] DESCRIPTION Create a symbolic link named 'name' pointing to 'symtgt'. gid represents the effective group id of the caller. The permissions of a symbolic link are irrelevant hence it is omitted from the protocol. Signed-off-by: Venkateswararao Jujjuri Reviewed-by: Sripathi Kodi Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++-- include/net/9p/9p.h | 4 ++ include/net/9p/client.h | 2 + net/9p/client.c | 34 ++++++++++++++++ 4 files changed, 137 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index e6ece237241f..a7319364544b 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1245,7 +1245,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) if (IS_ERR(fid)) return PTR_ERR(fid); - if (!v9fs_proto_dotu(v9ses)) + if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) return -EBADF; st = p9_client_stat(fid); @@ -1350,6 +1350,99 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, return 0; } +/** + * v9fs_vfs_symlink_dotl - helper function to create symlinks + * @dir: directory inode containing symlink + * @dentry: dentry for symlink + * @symname: symlink data + * + * See Also: 9P2000.L RFC for more information + * + */ + +static int +v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *dfid; + struct p9_fid *fid = NULL; + struct inode *inode; + struct p9_qid qid; + char *name; + int err; + gid_t gid; + + name = (char *) dentry->d_name.name; + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n", + dir->i_ino, name, symname); + v9ses = v9fs_inode2v9ses(dir); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + + if (gid < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_egid failed %d\n", gid); + goto error; + } + + /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ + err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); + + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); + goto error; + } + + if (v9ses->cache) { + /* Now walk from the parent so we can get an unopened fid. */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to dentry */ + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + dentry->d_op = &v9fs_cached_dentry_operations; + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } else { + /* Not in cached mode. No need to populate inode with stat */ + inode = v9fs_get_inode(dir->i_sb, S_IFLNK); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + dentry->d_op = &v9fs_dentry_operations; + d_instantiate(dentry, inode); + } + +error: + if (fid) + p9_client_clunk(fid); + + return err; +} + /** * v9fs_vfs_symlink - helper function to create symlinks * @dir: directory inode containing symlink @@ -1527,7 +1620,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, .symlink = v9fs_vfs_symlink, - .link = v9fs_vfs_link_dotl, + .link = v9fs_vfs_link, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, @@ -1540,8 +1633,8 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { static const struct inode_operations v9fs_dir_inode_operations_dotl = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, - .symlink = v9fs_vfs_symlink, - .link = v9fs_vfs_link, + .link = v9fs_vfs_link_dotl, + .symlink = v9fs_vfs_symlink_dotl, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 5985c0f83db3..44a6883d7144 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -88,6 +88,8 @@ do { \ * enum p9_msg_t - 9P message types * @P9_TSTATFS: file system status request * @P9_RSTATFS: file system status response + * @P9_TSYMLINK: make symlink request + * @P9_RSYMLINK: make symlink response * @P9_TRENAME: rename request * @P9_RRENAME: rename response * @P9_TVERSION: version handshake request @@ -131,6 +133,8 @@ do { \ enum p9_msg_t { P9_TSTATFS = 8, P9_RSTATFS, + P9_TSYMLINK = 16, + P9_RSYMLINK, P9_TRENAME = 20, P9_RRENAME, P9_TGETATTR = 24, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index e36f11650e99..2e039730920e 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -227,6 +227,8 @@ int p9_client_open(struct p9_fid *fid, int mode); int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, char *extension); int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); +int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, gid_t gid, + struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_remove(struct p9_fid *fid); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, diff --git a/net/9p/client.c b/net/9p/client.c index ad1c4489ab4d..e37e64cb9394 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1095,6 +1095,40 @@ error: } EXPORT_SYMBOL(p9_client_fcreate); +int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid, + struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n", + dfid->fid, name, symtgt); + clnt = dfid->clnt; + + req = p9_client_rpc(clnt, P9_TSYMLINK, "dssd", dfid->fid, name, symtgt, + gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + p9pdu_dump(1, req->rc); + goto free_and_error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n", + qid->type, (unsigned long long)qid->path, qid->version); + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_symlink); + int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) { struct p9_client *clnt; -- cgit v1.2.3 From 4b43516ab19b748b48322937fd9307af17541c4d Mon Sep 17 00:00:00 2001 From: "M. Mohan Kumar" Date: Wed, 16 Jun 2010 14:27:01 +0530 Subject: 9p: Implement TMKNOD Synopsis size[4] Tmknod tag[2] fid[4] name[s] mode[4] major[4] minor[4] gid[4] size[4] Rmknod tag[2] qid[13] Description mknod asks the file server to create a device node with given major and minor number, mode and gid. The qid for the new device node is returned with the mknod reply message. [sripathik@in.ibm.com: Fix error handling code] Signed-off-by: M. Mohan Kumar Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++-- include/net/9p/9p.h | 4 ++ include/net/9p/client.h | 2 + net/9p/client.c | 31 ++++++++++++++ 4 files changed, 140 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index a7319364544b..4d9f45ec6126 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -302,7 +302,13 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode) case S_IFBLK: case S_IFCHR: case S_IFSOCK: - if (!v9fs_proto_dotu(v9ses)) { + if (v9fs_proto_dotl(v9ses)) { + inode->i_op = &v9fs_file_inode_operations_dotl; + inode->i_fop = &v9fs_file_operations_dotl; + } else if (v9fs_proto_dotu(v9ses)) { + inode->i_op = &v9fs_file_inode_operations; + inode->i_fop = &v9fs_file_operations; + } else { P9_DPRINTK(P9_DEBUG_ERROR, "special files without extended mode\n"); err = -EINVAL; @@ -1616,6 +1622,100 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) return retval; } +/** + * v9fs_vfs_mknod_dotl - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @mode: mode for creation + * @rdev: device associated with special file + * + */ +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode, + dev_t rdev) +{ + int err; + char *name; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + struct inode *inode; + gid_t gid; + struct p9_qid qid; + struct dentry *dir_entry; + + P9_DPRINTK(P9_DEBUG_VFS, + " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, + dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev)); + + if (!new_valid_dev(rdev)) + return -EINVAL; + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = v9fs_dentry_from_dir_inode(dir); + dfid = v9fs_fid_lookup(dir_entry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + if (gid < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n"); + goto error; + } + + name = (char *) dentry->d_name.name; + + err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); + if (err < 0) + goto error; + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + dentry->d_op = &v9fs_cached_dentry_operations; + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } else { + /* + * Not in cached mode. No need to populate inode with stat. + * socket syscall returns a fd, so we need instantiate + */ + inode = v9fs_get_inode(dir->i_sb, mode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + dentry->d_op = &v9fs_dentry_operations; + d_instantiate(dentry, inode); + } + +error: + if (fid) + p9_client_clunk(fid); + return err; +} + static const struct inode_operations v9fs_dir_inode_operations_dotu = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, @@ -1624,7 +1724,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, - .mknod = v9fs_vfs_mknod, + .mknod = v9fs_vfs_mknod_dotl, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, @@ -1638,7 +1738,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotl = { .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, - .mknod = v9fs_vfs_mknod, + .mknod = v9fs_vfs_mknod_dotl, .rename = v9fs_vfs_rename, .getattr = v9fs_vfs_getattr_dotl, .setattr = v9fs_vfs_setattr_dotl, diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 44a6883d7144..ff32091d8063 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -90,6 +90,8 @@ do { \ * @P9_RSTATFS: file system status response * @P9_TSYMLINK: make symlink request * @P9_RSYMLINK: make symlink response + * @P9_TMKNOD: create a special file object request + * @P9_RMKNOD: create a special file object response * @P9_TRENAME: rename request * @P9_RRENAME: rename response * @P9_TVERSION: version handshake request @@ -135,6 +137,8 @@ enum p9_msg_t { P9_RSTATFS, P9_TSYMLINK = 16, P9_RSYMLINK, + P9_TMKNOD = 18, + P9_RMKNOD, P9_TRENAME = 20, P9_RRENAME, P9_TGETATTR = 24, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 2e039730920e..6e70358c71d9 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -245,6 +245,8 @@ int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr); struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, u64 request_mask); +int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, + dev_t rdev, gid_t gid, struct p9_qid *); struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); void p9_client_cb(struct p9_client *c, struct p9_req_t *req); diff --git a/net/9p/client.c b/net/9p/client.c index e37e64cb9394..cdfbd6740796 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1622,3 +1622,34 @@ error: return err; } EXPORT_SYMBOL(p9_client_readdir); + +int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, + dev_t rdev, gid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + P9_DPRINTK(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d " + "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev)); + req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode, + MAJOR(rdev), MINOR(rdev), gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + p9pdu_dump(1, req->rc); + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mknod_dotl); -- cgit v1.2.3 From 01a622bd7409bb7af38e784cff814e5e723f7951 Mon Sep 17 00:00:00 2001 From: "M. Mohan Kumar" Date: Wed, 16 Jun 2010 14:27:22 +0530 Subject: 9p: Implement TMKDIR Implement TMKDIR as part of 2000.L Work Synopsis size[4] Tmkdir tag[2] fid[4] name[s] mode[4] gid[4] size[4] Rmkdir tag[2] qid[13] Description mkdir asks the file server to create a directory with given name, mode and gid. The qid for the new directory is returned with the mkdir reply message. Note: 72 is selected as the opcode for TMKDIR from the reserved list. Signed-off-by: M. Mohan Kumar Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++-- include/net/9p/9p.h | 4 +++ include/net/9p/client.h | 2 ++ net/9p/client.c | 31 ++++++++++++++++++ 4 files changed, 117 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 4d9f45ec6126..39dc79567322 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -731,6 +731,83 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return err; } + +/** + * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @mode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry, + int mode) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + gid_t gid; + char *name; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + + P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + + mode |= S_IFDIR; + dir_dentry = v9fs_dentry_from_dir_inode(dir); + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + if (gid < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n"); + goto error; + } + + name = (char *) dentry->d_name.name; + err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); + if (err < 0) + goto error; + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + dentry->d_op = &v9fs_cached_dentry_operations; + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } +error: + if (fid) + p9_client_clunk(fid); + return err; +} + /** * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode * @dir: inode that is being walked from @@ -1641,7 +1718,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode, struct inode *inode; gid_t gid; struct p9_qid qid; - struct dentry *dir_entry; + struct dentry *dir_dentry; P9_DPRINTK(P9_DEBUG_VFS, " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, @@ -1652,7 +1729,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode, v9ses = v9fs_inode2v9ses(dir); dir_dentry = v9fs_dentry_from_dir_inode(dir); - dfid = v9fs_fid_lookup(dir_entry); + dfid = v9fs_fid_lookup(dir_dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); @@ -1736,7 +1813,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotl = { .link = v9fs_vfs_link_dotl, .symlink = v9fs_vfs_symlink_dotl, .unlink = v9fs_vfs_unlink, - .mkdir = v9fs_vfs_mkdir, + .mkdir = v9fs_vfs_mkdir_dotl, .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod_dotl, .rename = v9fs_vfs_rename, diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index ff32091d8063..091b471d8f05 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -94,6 +94,8 @@ do { \ * @P9_RMKNOD: create a special file object response * @P9_TRENAME: rename request * @P9_RRENAME: rename response + * @P9_TMKDIR: create a directory request + * @P9_RMKDIR: create a directory response * @P9_TVERSION: version handshake request * @P9_RVERSION: version handshake response * @P9_TAUTH: request to establish authentication channel @@ -149,6 +151,8 @@ enum p9_msg_t { P9_RREADDIR, P9_TLINK = 70, P9_RLINK, + P9_TMKDIR = 72, + P9_RMKDIR, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 6e70358c71d9..55d913a9b797 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -247,6 +247,8 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, int p9_client_mknod_dotl(struct p9_fid *oldfid, char *name, int mode, dev_t rdev, gid_t gid, struct p9_qid *); +int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, + gid_t gid, struct p9_qid *); struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); void p9_client_cb(struct p9_client *c, struct p9_req_t *req); diff --git a/net/9p/client.c b/net/9p/client.c index cdfbd6740796..a3bdd341f2ac 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1653,3 +1653,34 @@ error: } EXPORT_SYMBOL(p9_client_mknod_dotl); + +int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, + gid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + P9_DPRINTK(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", + fid->fid, name, mode, gid); + req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode, + gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + p9pdu_dump(1, req->rc); + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mkdir_dotl); -- cgit v1.2.3 From 5643135a28464e7c19d8d23a9e0804697a62c84b Mon Sep 17 00:00:00 2001 From: "Venkateswararao Jujjuri (JV)" Date: Thu, 17 Jun 2010 18:27:46 -0700 Subject: fs/9p: This patch implements TLCREATE for 9p2000.L protocol. SYNOPSIS size[4] Tlcreate tag[2] fid[4] name[s] flags[4] mode[4] gid[4] size[4] Rlcreate tag[2] qid[13] iounit[4] DESCRIPTION The Tlreate request asks the file server to create a new regular file with the name supplied, in the directory (dir) represented by fid. The mode argument specifies the permissions to use. New file is created with the uid if the fid and with supplied gid. The flags argument represent Linux access mode flags with which the caller is requesting to open the file with. Protocol allows all the Linux access modes but it is upto the server to allow/disallow any of these acess modes. If the server doesn't support any of the access mode, it is expected to return error. Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++- include/net/9p/9p.h | 4 ++ include/net/9p/client.h | 2 + net/9p/client.c | 44 +++++++++++++++++++ 4 files changed, 163 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 39dc79567322..2ac245902a4f 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -641,6 +641,118 @@ error: return ERR_PTR(err); } +/** + * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @mode: create permissions + * @nd: path information + * + */ + +static int +v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *nd) +{ + int err = 0; + char *name = NULL; + gid_t gid; + int flags; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL; + struct p9_fid *dfid, *ofid; + struct file *filp; + struct p9_qid qid; + struct inode *inode; + + v9ses = v9fs_inode2v9ses(dir); + if (nd && nd->flags & LOOKUP_OPEN) + flags = nd->intent.open.flags - 1; + else + flags = O_RDWR; + + name = (char *) dentry->d_name.name; + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x " + "mode:0x%x\n", name, flags, mode); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, + "p9_client_open_dotl failed in creat %d\n", + err); + goto error; + } + + /* No need to populate the inode if we are not opening the file AND + * not in cached mode. + */ + if (!v9ses->cache && !(nd && nd->flags & LOOKUP_OPEN)) { + /* Not in cached mode. No need to populate inode with stat */ + dentry->d_op = &v9fs_dentry_operations; + p9_client_clunk(ofid); + d_instantiate(dentry, NULL); + return 0; + } + + /* Now walk from the parent so we can get an unopened fid. */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to dentry */ + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); + goto error; + } + dentry->d_op = &v9fs_cached_dentry_operations; + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + + /* if we are opening a file, assign the open fid to the file */ + if (nd && nd->flags & LOOKUP_OPEN) { + filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created); + if (IS_ERR(filp)) { + p9_client_clunk(ofid); + return PTR_ERR(filp); + } + filp->private_data = ofid; + } else + p9_client_clunk(ofid); + + return 0; + +error: + if (ofid) + p9_client_clunk(ofid); + if (fid) + p9_client_clunk(fid); + return err; +} + /** * v9fs_vfs_create - VFS hook to create files * @dir: directory inode that is being created @@ -1808,7 +1920,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { }; static const struct inode_operations v9fs_dir_inode_operations_dotl = { - .create = v9fs_vfs_create, + .create = v9fs_vfs_create_dotl, .lookup = v9fs_vfs_lookup, .link = v9fs_vfs_link_dotl, .symlink = v9fs_vfs_symlink_dotl, diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 091b471d8f05..06d111d61038 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -92,6 +92,8 @@ do { \ * @P9_RSYMLINK: make symlink response * @P9_TMKNOD: create a special file object request * @P9_RMKNOD: create a special file object response + * @P9_TLCREATE: prepare a handle for I/O on an new file for 9P2000.L + * @P9_RLCREATE: response with file access information for 9P2000.L * @P9_TRENAME: rename request * @P9_RRENAME: rename response * @P9_TMKDIR: create a directory request @@ -137,6 +139,8 @@ do { \ enum p9_msg_t { P9_TSTATFS = 8, P9_RSTATFS, + P9_TLCREATE = 14, + P9_RLCREATE, P9_TSYMLINK = 16, P9_RSYMLINK, P9_TMKNOD = 18, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 55d913a9b797..d755c0ed6750 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -229,6 +229,8 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, int p9_client_link(struct p9_fid *fid, struct p9_fid *oldfid, char *newname); int p9_client_symlink(struct p9_fid *fid, char *name, char *symname, gid_t gid, struct p9_qid *qid); +int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, + gid_t gid, struct p9_qid *qid); int p9_client_clunk(struct p9_fid *fid); int p9_client_remove(struct p9_fid *fid); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, diff --git a/net/9p/client.c b/net/9p/client.c index a3bdd341f2ac..e580409b1052 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1050,6 +1050,50 @@ error: } EXPORT_SYMBOL(p9_client_open); +int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, + gid_t gid, struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + int iounit; + + P9_DPRINTK(P9_DEBUG_9P, + ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n", + ofid->fid, name, flags, mode, gid); + clnt = ofid->clnt; + + if (ofid->mode != -1) + return -EINVAL; + + req = p9_client_rpc(clnt, P9_TLCREATE, "dsddd", ofid->fid, name, flags, + mode, gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); + if (err) { + p9pdu_dump(1, req->rc); + goto free_and_error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n", + qid->type, + (unsigned long long)qid->path, + qid->version, iounit); + + ofid->mode = mode; + ofid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_create_dotl); + int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, char *extension) { -- cgit v1.2.3 From ef56547efa3c88609069e2a91f46e25c31dd536e Mon Sep 17 00:00:00 2001 From: "M. Mohan Kumar" Date: Tue, 22 Jun 2010 19:47:50 +0530 Subject: 9p: Implement LOPEN Implement 9p2000.L version of open(LOPEN) interface in 9p client. For LOPEN, no need to convert the flags to and from 9p mode to VFS mode. Synopsis: size[4] Tlopen tag[2] fid[4] mode[4] size[4] Rlopen tag[2] qid[13] iounit[4] [Fix mode bit format - jvrao@linux.vnet.ibm.com] Signed-off-by: M. Mohan Kumar Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbegren --- fs/9p/vfs_file.c | 13 +++++++++---- include/net/9p/9p.h | 2 ++ net/9p/client.c | 17 ++++++++++------- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 2d686ec322a0..e97c92bd6f16 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -59,9 +59,13 @@ int v9fs_file_open(struct inode *inode, struct file *file) struct p9_fid *fid; int omode; - P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p \n", inode, file); + P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); v9ses = v9fs_inode2v9ses(inode); - omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses)); + if (v9fs_proto_dotl(v9ses)) + omode = file->f_flags; + else + omode = v9fs_uflags2omode(file->f_flags, + v9fs_proto_dotu(v9ses)); fid = file->private_data; if (!fid) { fid = v9fs_fid_clone(file->f_path.dentry); @@ -73,11 +77,12 @@ int v9fs_file_open(struct inode *inode, struct file *file) p9_client_clunk(fid); return err; } - if (omode & P9_OTRUNC) { + if (file->f_flags & O_TRUNC) { i_size_write(inode, 0); inode->i_blocks = 0; } - if ((file->f_flags & O_APPEND) && (!v9fs_proto_dotu(v9ses))) + if ((file->f_flags & O_APPEND) && + (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) generic_file_llseek(file, 0, SEEK_END); } diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 06d111d61038..cf580a40e299 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -139,6 +139,8 @@ do { \ enum p9_msg_t { P9_TSTATFS = 8, P9_RSTATFS, + P9_TLOPEN = 12, + P9_RLOPEN, P9_TLCREATE = 14, P9_RLCREATE, P9_TSYMLINK = 16, diff --git a/net/9p/client.c b/net/9p/client.c index e580409b1052..c458e042d384 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1016,14 +1016,18 @@ int p9_client_open(struct p9_fid *fid, int mode) struct p9_qid qid; int iounit; - P9_DPRINTK(P9_DEBUG_9P, ">>> TOPEN fid %d mode %d\n", fid->fid, mode); - err = 0; clnt = fid->clnt; + P9_DPRINTK(P9_DEBUG_9P, ">>> %s fid %d mode %d\n", + p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode); + err = 0; if (fid->mode != -1) return -EINVAL; - req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); + if (p9_is_proto_dotl(clnt)) + req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode); + else + req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); if (IS_ERR(req)) { err = PTR_ERR(req); goto error; @@ -1035,10 +1039,9 @@ int p9_client_open(struct p9_fid *fid, int mode) goto free_and_error; } - P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n", - qid.type, - (unsigned long long)qid.path, - qid.version, iounit); + P9_DPRINTK(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n", + p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, + (unsigned long long)qid.path, qid.version, iounit); fid->mode = mode; fid->iounit = iounit; -- cgit v1.2.3 From 0ef63f345c48afe5896c5cffcba57f0457d409b9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 31 May 2010 13:22:45 +0530 Subject: net/9p: Implement attrwalk 9p call TXATTRWALK: Descend a ATTR namespace size[4] TXATTRWALK tag[2] fid[4] newfid[4] name[s] size[4] RXATTRWALK tag[2] size[8] txattrwalk gets a fid pointing to xattr. This fid can later be used to read the xattr value. If name is NULL the fid returned can be used to get the list of extended attribute associated to the file system object. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- include/net/9p/9p.h | 2 ++ include/net/9p/client.h | 1 + net/9p/client.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) (limited to 'include') diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index cf580a40e299..6fabb5e559ba 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -153,6 +153,8 @@ enum p9_msg_t { P9_RGETATTR, P9_TSETATTR = 26, P9_RSETATTR, + P9_TXATTRWALK = 30, + P9_RXATTRWALK, P9_TREADDIR = 40, P9_RREADDIR, P9_TLINK = 70, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index d755c0ed6750..60398b1a3f75 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -260,5 +260,6 @@ void p9stat_free(struct p9_wstat *); int p9_is_proto_dotu(struct p9_client *clnt); int p9_is_proto_dotl(struct p9_client *clnt); +struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *); #endif /* NET_9P_CLIENT_H */ diff --git a/net/9p/client.c b/net/9p/client.c index c458e042d384..ec80ee71d453 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1622,6 +1622,56 @@ error: } EXPORT_SYMBOL(p9_client_rename); +/* + * An xattrwalk without @attr_name gives the fid for the lisxattr namespace + */ +struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid, + const char *attr_name, u64 *attr_size) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + struct p9_fid *attr_fid; + + err = 0; + clnt = file_fid->clnt; + attr_fid = p9_fid_create(clnt); + if (IS_ERR(attr_fid)) { + err = PTR_ERR(attr_fid); + attr_fid = NULL; + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, + ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n", + file_fid->fid, attr_fid->fid, attr_name); + + req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds", + file_fid->fid, attr_fid->fid, attr_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); + if (err) { + p9pdu_dump(1, req->rc); + p9_free_req(clnt, req); + goto clunk_fid; + } + p9_free_req(clnt, req); + P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n", + attr_fid->fid, *attr_size); + return attr_fid; +clunk_fid: + p9_client_clunk(attr_fid); + attr_fid = NULL; +error: + if (attr_fid && (attr_fid != file_fid)) + p9_fid_destroy(attr_fid); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(p9_client_xattrwalk); + int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) { int err, rsize, total; -- cgit v1.2.3 From eda25e46161527845572131b37706a458d9270ef Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 31 May 2010 13:22:50 +0530 Subject: net/9p: Implement TXATTRCREATE 9p call TXATTRCREATE: Prepare a fid for setting xattr value on a file system object. size[4] TXATTRCREATE tag[2] fid[4] name[s] attr_size[8] flags[4] size[4] RXATTRCREATE tag[2] txattrcreate gets a fid pointing to xattr. This fid can later be used to set the xattr value. flag value is derived from set Linux setxattr. The manpage says "The flags parameter can be used to refine the semantics of the operation. XATTR_CREATE specifies a pure create, which fails if the named attribute exists already. XATTR_REPLACE specifies a pure replace operation, which fails if the named attribute does not already exist. By default (no flags), the extended attribute will be created if need be, or will simply replace the value if the attribute exists." The actual setxattr operation happens when the fid is clunked. At that point the written byte count and the attr_size specified in TXATTRCREATE should be same otherwise an error will be returned. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- include/net/9p/9p.h | 2 ++ include/net/9p/client.h | 1 + net/9p/client.c | 25 +++++++++++++++++++++++++ 3 files changed, 28 insertions(+) (limited to 'include') diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 6fabb5e559ba..a8de812ccbc8 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -155,6 +155,8 @@ enum p9_msg_t { P9_RSETATTR, P9_TXATTRWALK = 30, P9_RXATTRWALK, + P9_TXATTRCREATE = 32, + P9_RXATTRCREATE, P9_TREADDIR = 40, P9_RREADDIR, P9_TLINK = 70, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 60398b1a3f75..d1aa2cfb30f0 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -261,5 +261,6 @@ void p9stat_free(struct p9_wstat *); int p9_is_proto_dotu(struct p9_client *clnt); int p9_is_proto_dotl(struct p9_client *clnt); struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *); +int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int); #endif /* NET_9P_CLIENT_H */ diff --git a/net/9p/client.c b/net/9p/client.c index ec80ee71d453..43396acd714a 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1672,6 +1672,31 @@ error: } EXPORT_SYMBOL_GPL(p9_client_xattrwalk); +int p9_client_xattrcreate(struct p9_fid *fid, const char *name, + u64 attr_size, int flags) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + P9_DPRINTK(P9_DEBUG_9P, + ">>> TXATTRCREATE fid %d name %s size %lld flag %d\n", + fid->fid, name, (long long)attr_size, flags); + err = 0; + clnt = fid->clnt; + req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd", + fid->fid, name, attr_size, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL_GPL(p9_client_xattrcreate); + int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) { int err, rsize, total; -- cgit v1.2.3 From 5690085e7ba7f3081c6ab6db3a3b543444ad8a21 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 16 Jul 2010 14:25:33 -0300 Subject: V4L/DVB: IR/lirc: make lirc userspace and staging modules buildable The lirc userspace needs all the current ioctls defined, and we need to put the header files in places out-of-tree and/or staging lirc drivers (which I plan to prep soon) can easily build with. I've actually tested this in a tree w/all the lirc drivers queued up to be submitted for staging. I'm also reasonably sure that Andy Walls is going to need most of the ioctls anyway for his cx23888 IR driver work. Signed-off-by: Jarod Wilson Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/ir-lirc-codec.c | 2 +- drivers/media/IR/lirc_dev.c | 2 +- drivers/media/IR/lirc_dev.h | 225 --------------------------------------- include/media/lirc.h | 34 +++--- include/media/lirc_dev.h | 225 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 245 insertions(+), 243 deletions(-) delete mode 100644 drivers/media/IR/lirc_dev.h create mode 100644 include/media/lirc_dev.h (limited to 'include') diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c index 178bc5baab78..afb1ada36c78 100644 --- a/drivers/media/IR/ir-lirc-codec.c +++ b/drivers/media/IR/ir-lirc-codec.c @@ -15,9 +15,9 @@ #include #include #include +#include #include #include "ir-core-priv.h" -#include "lirc_dev.h" #define LIRCBUF_SIZE 256 diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c index c11b8f706258..899891bec352 100644 --- a/drivers/media/IR/lirc_dev.c +++ b/drivers/media/IR/lirc_dev.c @@ -37,7 +37,7 @@ #include #include -#include "lirc_dev.h" +#include static int debug; diff --git a/drivers/media/IR/lirc_dev.h b/drivers/media/IR/lirc_dev.h deleted file mode 100644 index b1f60663cb39..000000000000 --- a/drivers/media/IR/lirc_dev.h +++ /dev/null @@ -1,225 +0,0 @@ -/* - * LIRC base driver - * - * by Artur Lipowski - * This code is licensed under GNU GPL - * - */ - -#ifndef _LINUX_LIRC_DEV_H -#define _LINUX_LIRC_DEV_H - -#define MAX_IRCTL_DEVICES 4 -#define BUFLEN 16 - -#define mod(n, div) ((n) % (div)) - -#include -#include -#include -#include -#include -#include - -struct lirc_buffer { - wait_queue_head_t wait_poll; - spinlock_t fifo_lock; - unsigned int chunk_size; - unsigned int size; /* in chunks */ - /* Using chunks instead of bytes pretends to simplify boundary checking - * And should allow for some performance fine tunning later */ - struct kfifo fifo; - u8 fifo_initialized; -}; - -static inline void lirc_buffer_clear(struct lirc_buffer *buf) -{ - unsigned long flags; - - if (buf->fifo_initialized) { - spin_lock_irqsave(&buf->fifo_lock, flags); - kfifo_reset(&buf->fifo); - spin_unlock_irqrestore(&buf->fifo_lock, flags); - } else - WARN(1, "calling %s on an uninitialized lirc_buffer\n", - __func__); -} - -static inline int lirc_buffer_init(struct lirc_buffer *buf, - unsigned int chunk_size, - unsigned int size) -{ - int ret; - - init_waitqueue_head(&buf->wait_poll); - spin_lock_init(&buf->fifo_lock); - buf->chunk_size = chunk_size; - buf->size = size; - ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); - if (ret == 0) - buf->fifo_initialized = 1; - - return ret; -} - -static inline void lirc_buffer_free(struct lirc_buffer *buf) -{ - if (buf->fifo_initialized) { - kfifo_free(&buf->fifo); - buf->fifo_initialized = 0; - } else - WARN(1, "calling %s on an uninitialized lirc_buffer\n", - __func__); -} - -static inline int lirc_buffer_len(struct lirc_buffer *buf) -{ - int len; - unsigned long flags; - - spin_lock_irqsave(&buf->fifo_lock, flags); - len = kfifo_len(&buf->fifo); - spin_unlock_irqrestore(&buf->fifo_lock, flags); - - return len; -} - -static inline int lirc_buffer_full(struct lirc_buffer *buf) -{ - return lirc_buffer_len(buf) == buf->size * buf->chunk_size; -} - -static inline int lirc_buffer_empty(struct lirc_buffer *buf) -{ - return !lirc_buffer_len(buf); -} - -static inline int lirc_buffer_available(struct lirc_buffer *buf) -{ - return buf->size - (lirc_buffer_len(buf) / buf->chunk_size); -} - -static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf, - unsigned char *dest) -{ - unsigned int ret = 0; - - if (lirc_buffer_len(buf) >= buf->chunk_size) - ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size, - &buf->fifo_lock); - return ret; - -} - -static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, - unsigned char *orig) -{ - unsigned int ret; - - ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size, - &buf->fifo_lock); - - return ret; -} - -struct lirc_driver { - char name[40]; - int minor; - unsigned long code_length; - unsigned int buffer_size; /* in chunks holding one code each */ - int sample_rate; - unsigned long features; - - unsigned int chunk_size; - - void *data; - int min_timeout; - int max_timeout; - int (*add_to_buf) (void *data, struct lirc_buffer *buf); - struct lirc_buffer *rbuf; - int (*set_use_inc) (void *data); - void (*set_use_dec) (void *data); - struct file_operations *fops; - struct device *dev; - struct module *owner; -}; - -/* name: - * this string will be used for logs - * - * minor: - * indicates minor device (/dev/lirc) number for registered driver - * if caller fills it with negative value, then the first free minor - * number will be used (if available) - * - * code_length: - * length of the remote control key code expressed in bits - * - * sample_rate: - * - * data: - * it may point to any driver data and this pointer will be passed to - * all callback functions - * - * add_to_buf: - * add_to_buf will be called after specified period of the time or - * triggered by the external event, this behavior depends on value of - * the sample_rate this function will be called in user context. This - * routine should return 0 if data was added to the buffer and - * -ENODATA if none was available. This should add some number of bits - * evenly divisible by code_length to the buffer - * - * rbuf: - * if not NULL, it will be used as a read buffer, you will have to - * write to the buffer by other means, like irq's (see also - * lirc_serial.c). - * - * set_use_inc: - * set_use_inc will be called after device is opened - * - * set_use_dec: - * set_use_dec will be called after device is closed - * - * fops: - * file_operations for drivers which don't fit the current driver model. - * - * Some ioctl's can be directly handled by lirc_dev if the driver's - * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also - * lirc_serial.c). - * - * owner: - * the module owning this struct - * - */ - - -/* following functions can be called ONLY from user context - * - * returns negative value on error or minor number - * of the registered device if success - * contents of the structure pointed by p is copied - */ -extern int lirc_register_driver(struct lirc_driver *d); - -/* returns negative value on error or 0 if success -*/ -extern int lirc_unregister_driver(int minor); - -/* Returns the private data stored in the lirc_driver - * associated with the given device file pointer. - */ -void *lirc_get_pdata(struct file *file); - -/* default file operations - * used by drivers if they override only some operations - */ -int lirc_dev_fop_open(struct inode *inode, struct file *file); -int lirc_dev_fop_close(struct inode *inode, struct file *file); -unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); -long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -ssize_t lirc_dev_fop_read(struct file *file, char *buffer, size_t length, - loff_t *ppos); -ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, size_t length, - loff_t *ppos); - -#endif diff --git a/include/media/lirc.h b/include/media/lirc.h index 8dffd4f47bf6..42c467c50519 100644 --- a/include/media/lirc.h +++ b/include/media/lirc.h @@ -1,6 +1,6 @@ /* * lirc.h - linux infrared remote control header file - * last modified 2010/06/03 by Jarod Wilson + * last modified 2010/07/13 by Jarod Wilson */ #ifndef _LINUX_LIRC_H @@ -33,6 +33,9 @@ #define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) #define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) +/* used heavily by lirc userspace */ +#define lirc_t int + /*** lirc compatible hardware features ***/ #define LIRC_MODE2SEND(x) (x) @@ -95,12 +98,10 @@ #define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) #define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) -#if 0 /* these ioctls are not used at the moment */ #define LIRC_GET_MIN_FILTER_PULSE _IOR('i', 0x0000000a, __u32) #define LIRC_GET_MAX_FILTER_PULSE _IOR('i', 0x0000000b, __u32) #define LIRC_GET_MIN_FILTER_SPACE _IOR('i', 0x0000000c, __u32) #define LIRC_GET_MAX_FILTER_SPACE _IOR('i', 0x0000000d, __u32) -#endif /* code length in bits, currently only for LIRC_MODE_LIRCCODE */ #define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) @@ -121,23 +122,30 @@ */ #define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) -#if 0 /* these ioctls are not used at the moment */ +/* 1 enables, 0 disables timeout reports in MODE2 */ +#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) + /* * pulses shorter than this are filtered out by hardware (software * emulation in lirc_dev?) */ -#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x00000019, __u32) +#define LIRC_SET_REC_FILTER_PULSE _IOW('i', 0x0000001a, __u32) /* * spaces shorter than this are filtered out by hardware (software * emulation in lirc_dev?) */ -#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001a, __u32) +#define LIRC_SET_REC_FILTER_SPACE _IOW('i', 0x0000001b, __u32) /* * if filter cannot be set independantly for pulse/space, this should * be used */ -#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001b, __u32) -#endif +#define LIRC_SET_REC_FILTER _IOW('i', 0x0000001c, __u32) + +/* + * if enabled from the next key press on the driver will send + * LIRC_MODE2_FREQUENCY packets + */ +#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) /* * to set a range use @@ -151,13 +159,7 @@ #define LIRC_NOTIFY_DECODE _IO('i', 0x00000020) -#if 0 /* these ioctls are not used at the moment */ -/* - * from the next key press on the driver will send - * LIRC_MODE2_FREQUENCY packets - */ -#define LIRC_MEASURE_CARRIER_ENABLE _IO('i', 0x00000021) -#define LIRC_MEASURE_CARRIER_DISABLE _IO('i', 0x00000022) -#endif +#define LIRC_SETUP_START _IO('i', 0x00000021) +#define LIRC_SETUP_END _IO('i', 0x00000022) #endif diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h new file mode 100644 index 000000000000..b1f60663cb39 --- /dev/null +++ b/include/media/lirc_dev.h @@ -0,0 +1,225 @@ +/* + * LIRC base driver + * + * by Artur Lipowski + * This code is licensed under GNU GPL + * + */ + +#ifndef _LINUX_LIRC_DEV_H +#define _LINUX_LIRC_DEV_H + +#define MAX_IRCTL_DEVICES 4 +#define BUFLEN 16 + +#define mod(n, div) ((n) % (div)) + +#include +#include +#include +#include +#include +#include + +struct lirc_buffer { + wait_queue_head_t wait_poll; + spinlock_t fifo_lock; + unsigned int chunk_size; + unsigned int size; /* in chunks */ + /* Using chunks instead of bytes pretends to simplify boundary checking + * And should allow for some performance fine tunning later */ + struct kfifo fifo; + u8 fifo_initialized; +}; + +static inline void lirc_buffer_clear(struct lirc_buffer *buf) +{ + unsigned long flags; + + if (buf->fifo_initialized) { + spin_lock_irqsave(&buf->fifo_lock, flags); + kfifo_reset(&buf->fifo); + spin_unlock_irqrestore(&buf->fifo_lock, flags); + } else + WARN(1, "calling %s on an uninitialized lirc_buffer\n", + __func__); +} + +static inline int lirc_buffer_init(struct lirc_buffer *buf, + unsigned int chunk_size, + unsigned int size) +{ + int ret; + + init_waitqueue_head(&buf->wait_poll); + spin_lock_init(&buf->fifo_lock); + buf->chunk_size = chunk_size; + buf->size = size; + ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); + if (ret == 0) + buf->fifo_initialized = 1; + + return ret; +} + +static inline void lirc_buffer_free(struct lirc_buffer *buf) +{ + if (buf->fifo_initialized) { + kfifo_free(&buf->fifo); + buf->fifo_initialized = 0; + } else + WARN(1, "calling %s on an uninitialized lirc_buffer\n", + __func__); +} + +static inline int lirc_buffer_len(struct lirc_buffer *buf) +{ + int len; + unsigned long flags; + + spin_lock_irqsave(&buf->fifo_lock, flags); + len = kfifo_len(&buf->fifo); + spin_unlock_irqrestore(&buf->fifo_lock, flags); + + return len; +} + +static inline int lirc_buffer_full(struct lirc_buffer *buf) +{ + return lirc_buffer_len(buf) == buf->size * buf->chunk_size; +} + +static inline int lirc_buffer_empty(struct lirc_buffer *buf) +{ + return !lirc_buffer_len(buf); +} + +static inline int lirc_buffer_available(struct lirc_buffer *buf) +{ + return buf->size - (lirc_buffer_len(buf) / buf->chunk_size); +} + +static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf, + unsigned char *dest) +{ + unsigned int ret = 0; + + if (lirc_buffer_len(buf) >= buf->chunk_size) + ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size, + &buf->fifo_lock); + return ret; + +} + +static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, + unsigned char *orig) +{ + unsigned int ret; + + ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size, + &buf->fifo_lock); + + return ret; +} + +struct lirc_driver { + char name[40]; + int minor; + unsigned long code_length; + unsigned int buffer_size; /* in chunks holding one code each */ + int sample_rate; + unsigned long features; + + unsigned int chunk_size; + + void *data; + int min_timeout; + int max_timeout; + int (*add_to_buf) (void *data, struct lirc_buffer *buf); + struct lirc_buffer *rbuf; + int (*set_use_inc) (void *data); + void (*set_use_dec) (void *data); + struct file_operations *fops; + struct device *dev; + struct module *owner; +}; + +/* name: + * this string will be used for logs + * + * minor: + * indicates minor device (/dev/lirc) number for registered driver + * if caller fills it with negative value, then the first free minor + * number will be used (if available) + * + * code_length: + * length of the remote control key code expressed in bits + * + * sample_rate: + * + * data: + * it may point to any driver data and this pointer will be passed to + * all callback functions + * + * add_to_buf: + * add_to_buf will be called after specified period of the time or + * triggered by the external event, this behavior depends on value of + * the sample_rate this function will be called in user context. This + * routine should return 0 if data was added to the buffer and + * -ENODATA if none was available. This should add some number of bits + * evenly divisible by code_length to the buffer + * + * rbuf: + * if not NULL, it will be used as a read buffer, you will have to + * write to the buffer by other means, like irq's (see also + * lirc_serial.c). + * + * set_use_inc: + * set_use_inc will be called after device is opened + * + * set_use_dec: + * set_use_dec will be called after device is closed + * + * fops: + * file_operations for drivers which don't fit the current driver model. + * + * Some ioctl's can be directly handled by lirc_dev if the driver's + * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also + * lirc_serial.c). + * + * owner: + * the module owning this struct + * + */ + + +/* following functions can be called ONLY from user context + * + * returns negative value on error or minor number + * of the registered device if success + * contents of the structure pointed by p is copied + */ +extern int lirc_register_driver(struct lirc_driver *d); + +/* returns negative value on error or 0 if success +*/ +extern int lirc_unregister_driver(int minor); + +/* Returns the private data stored in the lirc_driver + * associated with the given device file pointer. + */ +void *lirc_get_pdata(struct file *file); + +/* default file operations + * used by drivers if they override only some operations + */ +int lirc_dev_fop_open(struct inode *inode, struct file *file); +int lirc_dev_fop_close(struct inode *inode, struct file *file); +unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); +long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +ssize_t lirc_dev_fop_read(struct file *file, char *buffer, size_t length, + loff_t *ppos); +ssize_t lirc_dev_fop_write(struct file *file, const char *buffer, size_t length, + loff_t *ppos); + +#endif -- cgit v1.2.3 From bbafc0cb6c52c40647f561854db5fbac4d608186 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 10 Jul 2010 15:03:20 -0300 Subject: V4L/DVB: uvc: Move constants and structures definitions to linux/usb/video.h The UVC host and gadget drivers both define constants and structures in private header files. Move all those definitions to linux/usb/video.h where they can be shared by the two drivers (and be available for userspace applications). Signed-off-by: Laurent Pinchart Signed-off-by: Greg Kroah-Hartman Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/uvc/uvcvideo.h | 19 -- drivers/usb/gadget/f_uvc.c | 16 +- drivers/usb/gadget/f_uvc.h | 352 +------------------------------- drivers/usb/gadget/uvc.h | 36 ---- drivers/usb/gadget/webcam.c | 24 +-- include/linux/usb/video.h | 397 +++++++++++++++++++++++++++++++++++++ 6 files changed, 418 insertions(+), 426 deletions(-) (limited to 'include') diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index 47b20e7e3786..ac272456fbfd 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -196,25 +196,6 @@ struct uvc_device; /* TODO: Put the most frequently accessed fields at the beginning of * structures to maximize cache efficiency. */ -struct uvc_streaming_control { - __u16 bmHint; - __u8 bFormatIndex; - __u8 bFrameIndex; - __u32 dwFrameInterval; - __u16 wKeyFrameRate; - __u16 wPFrameRate; - __u16 wCompQuality; - __u16 wCompWindowSize; - __u16 wDelay; - __u32 dwMaxVideoFrameSize; - __u32 dwMaxPayloadTransferSize; - __u32 dwClockFrequency; - __u8 bmFramingInfo; - __u8 bPreferedVersion; - __u8 bMinVersion; - __u8 bMaxVersion; -}; - struct uvc_control_info { struct list_head list; struct list_head mappings; diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c index dbe6db0184fd..be446b7e7eaa 100644 --- a/drivers/usb/gadget/f_uvc.c +++ b/drivers/usb/gadget/f_uvc.c @@ -61,12 +61,12 @@ static struct usb_gadget_strings *uvc_function_strings[] = { #define UVC_INTF_VIDEO_STREAMING 1 static struct usb_interface_assoc_descriptor uvc_iad __initdata = { - .bLength = USB_DT_INTERFACE_ASSOCIATION_SIZE, + .bLength = sizeof(uvc_iad), .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, .bFirstInterface = 0, .bInterfaceCount = 2, .bFunctionClass = USB_CLASS_VIDEO, - .bFunctionSubClass = 0x03, + .bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION, .bFunctionProtocol = 0x00, .iFunction = 0, }; @@ -78,7 +78,7 @@ static struct usb_interface_descriptor uvc_control_intf __initdata = { .bAlternateSetting = 0, .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_VIDEO, - .bInterfaceSubClass = 0x01, + .bInterfaceSubClass = UVC_SC_VIDEOCONTROL, .bInterfaceProtocol = 0x00, .iInterface = 0, }; @@ -106,7 +106,7 @@ static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = { .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_VIDEO, - .bInterfaceSubClass = 0x02, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, .bInterfaceProtocol = 0x00, .iInterface = 0, }; @@ -118,7 +118,7 @@ static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = { .bAlternateSetting = 1, .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_VIDEO, - .bInterfaceSubClass = 0x02, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, .bInterfaceProtocol = 0x00, .iInterface = 0, }; @@ -603,15 +603,15 @@ uvc_bind_config(struct usb_configuration *c, /* Validate the descriptors. */ if (control == NULL || control[0] == NULL || - control[0]->bDescriptorSubType != UVC_DT_HEADER) + control[0]->bDescriptorSubType != UVC_VC_HEADER) goto error; if (fs_streaming == NULL || fs_streaming[0] == NULL || - fs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER) + fs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER) goto error; if (hs_streaming == NULL || hs_streaming[0] == NULL || - hs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER) + hs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER) goto error; uvc->desc.control = control; diff --git a/drivers/usb/gadget/f_uvc.h b/drivers/usb/gadget/f_uvc.h index 8a5db7c4fe7c..e18a6636c283 100644 --- a/drivers/usb/gadget/f_uvc.h +++ b/drivers/usb/gadget/f_uvc.h @@ -15,357 +15,7 @@ #define _F_UVC_H_ #include - -#define USB_CLASS_VIDEO_CONTROL 1 -#define USB_CLASS_VIDEO_STREAMING 2 - -struct uvc_descriptor_header { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; -} __attribute__ ((packed)); - -struct uvc_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u16 bcdUVC; - __u16 wTotalLength; - __u32 dwClockFrequency; - __u8 bInCollection; - __u8 baInterfaceNr[]; -} __attribute__((__packed__)); - -#define UVC_HEADER_DESCRIPTOR(n) uvc_header_descriptor_##n - -#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ -struct UVC_HEADER_DESCRIPTOR(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u16 bcdUVC; \ - __u16 wTotalLength; \ - __u32 dwClockFrequency; \ - __u8 bInCollection; \ - __u8 baInterfaceNr[n]; \ -} __attribute__ ((packed)) - -struct uvc_input_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 iTerminal; -} __attribute__((__packed__)); - -struct uvc_output_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 bSourceID; - __u8 iTerminal; -} __attribute__((__packed__)); - -struct uvc_camera_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 iTerminal; - __u16 wObjectiveFocalLengthMin; - __u16 wObjectiveFocalLengthMax; - __u16 wOcularFocalLength; - __u8 bControlSize; - __u8 bmControls[3]; -} __attribute__((__packed__)); - -struct uvc_selector_unit_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bUnitID; - __u8 bNrInPins; - __u8 baSourceID[0]; - __u8 iSelector; -} __attribute__((__packed__)); - -#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ - uvc_selector_unit_descriptor_##n - -#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ -struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bUnitID; \ - __u8 bNrInPins; \ - __u8 baSourceID[n]; \ - __u8 iSelector; \ -} __attribute__ ((packed)) - -struct uvc_processing_unit_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bUnitID; - __u8 bSourceID; - __u16 wMaxMultiplier; - __u8 bControlSize; - __u8 bmControls[2]; - __u8 iProcessing; -} __attribute__((__packed__)); - -struct uvc_extension_unit_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bUnitID; - __u8 guidExtensionCode[16]; - __u8 bNumControls; - __u8 bNrInPins; - __u8 baSourceID[0]; - __u8 bControlSize; - __u8 bmControls[0]; - __u8 iExtension; -} __attribute__((__packed__)); - -#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ - uvc_extension_unit_descriptor_##p_##n - -#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ -struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bUnitID; \ - __u8 guidExtensionCode[16]; \ - __u8 bNumControls; \ - __u8 bNrInPins; \ - __u8 baSourceID[p]; \ - __u8 bControlSize; \ - __u8 bmControls[n]; \ - __u8 iExtension; \ -} __attribute__ ((packed)) - -struct uvc_control_endpoint_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u16 wMaxTransferSize; -} __attribute__((__packed__)); - -#define UVC_DT_HEADER 1 -#define UVC_DT_INPUT_TERMINAL 2 -#define UVC_DT_OUTPUT_TERMINAL 3 -#define UVC_DT_SELECTOR_UNIT 4 -#define UVC_DT_PROCESSING_UNIT 5 -#define UVC_DT_EXTENSION_UNIT 6 - -#define UVC_DT_HEADER_SIZE(n) (12+(n)) -#define UVC_DT_INPUT_TERMINAL_SIZE 8 -#define UVC_DT_OUTPUT_TERMINAL_SIZE 9 -#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) -#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n)) -#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) -#define UVC_DT_EXTENSION_UNIT_SIZE(p,n) (24+(p)+(n)) -#define UVC_DT_CONTROL_ENDPOINT_SIZE 5 - -struct uvc_input_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bNumFormats; - __u16 wTotalLength; - __u8 bEndpointAddress; - __u8 bmInfo; - __u8 bTerminalLink; - __u8 bStillCaptureMethod; - __u8 bTriggerSupport; - __u8 bTriggerUsage; - __u8 bControlSize; - __u8 bmaControls[]; -} __attribute__((__packed__)); - -#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ - uvc_input_header_descriptor_##n_##p - -#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ -struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bNumFormats; \ - __u16 wTotalLength; \ - __u8 bEndpointAddress; \ - __u8 bmInfo; \ - __u8 bTerminalLink; \ - __u8 bStillCaptureMethod; \ - __u8 bTriggerSupport; \ - __u8 bTriggerUsage; \ - __u8 bControlSize; \ - __u8 bmaControls[p][n]; \ -} __attribute__ ((packed)) - -struct uvc_output_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bNumFormats; - __u16 wTotalLength; - __u8 bEndpointAddress; - __u8 bTerminalLink; - __u8 bControlSize; - __u8 bmaControls[]; -} __attribute__((__packed__)); - -#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ - uvc_output_header_descriptor_##n_##p - -#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ -struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bNumFormats; \ - __u16 wTotalLength; \ - __u8 bEndpointAddress; \ - __u8 bTerminalLink; \ - __u8 bControlSize; \ - __u8 bmaControls[p][n]; \ -} __attribute__ ((packed)) - -struct uvc_format_uncompressed { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFormatIndex; - __u8 bNumFrameDescriptors; - __u8 guidFormat[16]; - __u8 bBitsPerPixel; - __u8 bDefaultFrameIndex; - __u8 bAspectRatioX; - __u8 bAspectRatioY; - __u8 bmInterfaceFlags; - __u8 bCopyProtect; -} __attribute__((__packed__)); - -struct uvc_frame_uncompressed { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFrameIndex; - __u8 bmCapabilities; - __u16 wWidth; - __u16 wHeight; - __u32 dwMinBitRate; - __u32 dwMaxBitRate; - __u32 dwMaxVideoFrameBufferSize; - __u32 dwDefaultFrameInterval; - __u8 bFrameIntervalType; - __u32 dwFrameInterval[]; -} __attribute__((__packed__)); - -#define UVC_FRAME_UNCOMPRESSED(n) \ - uvc_frame_uncompressed_##n - -#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ -struct UVC_FRAME_UNCOMPRESSED(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bFrameIndex; \ - __u8 bmCapabilities; \ - __u16 wWidth; \ - __u16 wHeight; \ - __u32 dwMinBitRate; \ - __u32 dwMaxBitRate; \ - __u32 dwMaxVideoFrameBufferSize; \ - __u32 dwDefaultFrameInterval; \ - __u8 bFrameIntervalType; \ - __u32 dwFrameInterval[n]; \ -} __attribute__ ((packed)) - -struct uvc_format_mjpeg { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFormatIndex; - __u8 bNumFrameDescriptors; - __u8 bmFlags; - __u8 bDefaultFrameIndex; - __u8 bAspectRatioX; - __u8 bAspectRatioY; - __u8 bmInterfaceFlags; - __u8 bCopyProtect; -} __attribute__((__packed__)); - -struct uvc_frame_mjpeg { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFrameIndex; - __u8 bmCapabilities; - __u16 wWidth; - __u16 wHeight; - __u32 dwMinBitRate; - __u32 dwMaxBitRate; - __u32 dwMaxVideoFrameBufferSize; - __u32 dwDefaultFrameInterval; - __u8 bFrameIntervalType; - __u32 dwFrameInterval[]; -} __attribute__((__packed__)); - -#define UVC_FRAME_MJPEG(n) \ - uvc_frame_mjpeg_##n - -#define DECLARE_UVC_FRAME_MJPEG(n) \ -struct UVC_FRAME_MJPEG(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bFrameIndex; \ - __u8 bmCapabilities; \ - __u16 wWidth; \ - __u16 wHeight; \ - __u32 dwMinBitRate; \ - __u32 dwMaxBitRate; \ - __u32 dwMaxVideoFrameBufferSize; \ - __u32 dwDefaultFrameInterval; \ - __u8 bFrameIntervalType; \ - __u32 dwFrameInterval[n]; \ -} __attribute__ ((packed)) - -struct uvc_color_matching_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bColorPrimaries; - __u8 bTransferCharacteristics; - __u8 bMatrixCoefficients; -} __attribute__((__packed__)); - -#define UVC_DT_INPUT_HEADER 1 -#define UVC_DT_OUTPUT_HEADER 2 -#define UVC_DT_FORMAT_UNCOMPRESSED 4 -#define UVC_DT_FRAME_UNCOMPRESSED 5 -#define UVC_DT_FORMAT_MJPEG 6 -#define UVC_DT_FRAME_MJPEG 7 -#define UVC_DT_COLOR_MATCHING 13 - -#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) -#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) -#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27 -#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) -#define UVC_DT_FORMAT_MJPEG_SIZE 11 -#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) -#define UVC_DT_COLOR_MATCHING_SIZE 6 +#include extern int uvc_bind_config(struct usb_configuration *c, const struct uvc_descriptor_header * const *control, diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h index e92454cddd7d..5b7919460fd2 100644 --- a/drivers/usb/gadget/uvc.h +++ b/drivers/usb/gadget/uvc.h @@ -47,39 +47,6 @@ struct uvc_event #define UVC_INTF_CONTROL 0 #define UVC_INTF_STREAMING 1 -/* ------------------------------------------------------------------------ - * UVC constants & structures - */ - -/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */ -#define UVC_STREAM_EOH (1 << 7) -#define UVC_STREAM_ERR (1 << 6) -#define UVC_STREAM_STI (1 << 5) -#define UVC_STREAM_RES (1 << 4) -#define UVC_STREAM_SCR (1 << 3) -#define UVC_STREAM_PTS (1 << 2) -#define UVC_STREAM_EOF (1 << 1) -#define UVC_STREAM_FID (1 << 0) - -struct uvc_streaming_control { - __u16 bmHint; - __u8 bFormatIndex; - __u8 bFrameIndex; - __u32 dwFrameInterval; - __u16 wKeyFrameRate; - __u16 wPFrameRate; - __u16 wCompQuality; - __u16 wCompWindowSize; - __u16 wDelay; - __u32 dwMaxVideoFrameSize; - __u32 dwMaxPayloadTransferSize; - __u32 dwClockFrequency; - __u8 bmFramingInfo; - __u8 bPreferedVersion; - __u8 bMinVersion; - __u8 bMaxVersion; -} __attribute__((__packed__)); - /* ------------------------------------------------------------------------ * Debugging, printing and logging */ @@ -137,9 +104,6 @@ extern unsigned int uvc_gadget_trace_param; #define UVC_MAX_REQUEST_SIZE 64 #define UVC_MAX_EVENTS 4 -#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8 -#define USB_CLASS_MISC 0xef - /* ------------------------------------------------------------------------ * Structures */ diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c index f5f3030cc416..288d21155abe 100644 --- a/drivers/usb/gadget/webcam.c +++ b/drivers/usb/gadget/webcam.c @@ -90,7 +90,7 @@ DECLARE_UVC_HEADER_DESCRIPTOR(1); static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = { .bLength = UVC_DT_HEADER_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_HEADER, + .bDescriptorSubType = UVC_VC_HEADER, .bcdUVC = cpu_to_le16(0x0100), .wTotalLength = 0, /* dynamic */ .dwClockFrequency = cpu_to_le32(48000000), @@ -101,7 +101,7 @@ static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = { static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = { .bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_INPUT_TERMINAL, + .bDescriptorSubType = UVC_VC_INPUT_TERMINAL, .bTerminalID = 1, .wTerminalType = cpu_to_le16(0x0201), .bAssocTerminal = 0, @@ -118,7 +118,7 @@ static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = { static const struct uvc_processing_unit_descriptor uvc_processing = { .bLength = UVC_DT_PROCESSING_UNIT_SIZE(2), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_PROCESSING_UNIT, + .bDescriptorSubType = UVC_VC_PROCESSING_UNIT, .bUnitID = 2, .bSourceID = 1, .wMaxMultiplier = cpu_to_le16(16*1024), @@ -131,7 +131,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = { static const struct uvc_output_terminal_descriptor uvc_output_terminal = { .bLength = UVC_DT_OUTPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_OUTPUT_TERMINAL, + .bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL, .bTerminalID = 3, .wTerminalType = cpu_to_le16(0x0101), .bAssocTerminal = 0, @@ -144,7 +144,7 @@ DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2); static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = { .bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_INPUT_HEADER, + .bDescriptorSubType = UVC_VS_INPUT_HEADER, .bNumFormats = 2, .wTotalLength = 0, /* dynamic */ .bEndpointAddress = 0, /* dynamic */ @@ -161,7 +161,7 @@ static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = { static const struct uvc_format_uncompressed uvc_format_yuv = { .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FORMAT_UNCOMPRESSED, + .bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED, .bFormatIndex = 1, .bNumFrameDescriptors = 2, .guidFormat = @@ -181,7 +181,7 @@ DECLARE_UVC_FRAME_UNCOMPRESSED(3); static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = { .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED, + .bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED, .bFrameIndex = 1, .bmCapabilities = 0, .wWidth = cpu_to_le16(640), @@ -199,7 +199,7 @@ static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = { static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = { .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED, + .bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED, .bFrameIndex = 2, .bmCapabilities = 0, .wWidth = cpu_to_le16(1280), @@ -215,7 +215,7 @@ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = { static const struct uvc_format_mjpeg uvc_format_mjpg = { .bLength = UVC_DT_FORMAT_MJPEG_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FORMAT_MJPEG, + .bDescriptorSubType = UVC_VS_FORMAT_MJPEG, .bFormatIndex = 2, .bNumFrameDescriptors = 2, .bmFlags = 0, @@ -232,7 +232,7 @@ DECLARE_UVC_FRAME_MJPEG(3); static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = { .bLength = UVC_DT_FRAME_MJPEG_SIZE(3), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FRAME_MJPEG, + .bDescriptorSubType = UVC_VS_FRAME_MJPEG, .bFrameIndex = 1, .bmCapabilities = 0, .wWidth = cpu_to_le16(640), @@ -250,7 +250,7 @@ static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = { static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = { .bLength = UVC_DT_FRAME_MJPEG_SIZE(1), .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_FRAME_MJPEG, + .bDescriptorSubType = UVC_VS_FRAME_MJPEG, .bFrameIndex = 2, .bmCapabilities = 0, .wWidth = cpu_to_le16(1280), @@ -266,7 +266,7 @@ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = { static const struct uvc_color_matching_descriptor uvc_color_matching = { .bLength = UVC_DT_COLOR_MATCHING_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubType = UVC_DT_COLOR_MATCHING, + .bDescriptorSubType = UVC_VS_COLORFORMAT, .bColorPrimaries = 1, .bTransferCharacteristics = 1, .bMatrixCoefficients = 4, diff --git a/include/linux/usb/video.h b/include/linux/usb/video.h index 2d5b7fc6a265..3b3b95e01f71 100644 --- a/include/linux/usb/video.h +++ b/include/linux/usb/video.h @@ -160,6 +160,16 @@ #define UVC_STATUS_TYPE_CONTROL 1 #define UVC_STATUS_TYPE_STREAMING 2 +/* 2.4.3.3. Payload Header Information */ +#define UVC_STREAM_EOH (1 << 7) +#define UVC_STREAM_ERR (1 << 6) +#define UVC_STREAM_STI (1 << 5) +#define UVC_STREAM_RES (1 << 4) +#define UVC_STREAM_SCR (1 << 3) +#define UVC_STREAM_PTS (1 << 2) +#define UVC_STREAM_EOF (1 << 1) +#define UVC_STREAM_FID (1 << 0) + /* 4.1.2. Control Capabilities */ #define UVC_CONTROL_CAP_GET (1 << 0) #define UVC_CONTROL_CAP_SET (1 << 1) @@ -167,5 +177,392 @@ #define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) #define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) +/* ------------------------------------------------------------------------ + * UVC structures + */ + +/* All UVC descriptors have these 3 fields at the beginning */ +struct uvc_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; +} __attribute__((packed)); + +/* 3.7.2. Video Control Interface Header Descriptor */ +struct uvc_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 bcdUVC; + __u16 wTotalLength; + __u32 dwClockFrequency; + __u8 bInCollection; + __u8 baInterfaceNr[]; +} __attribute__((__packed__)); + +#define UVC_DT_HEADER_SIZE(n) (12+(n)) + +#define UVC_HEADER_DESCRIPTOR(n) \ + uvc_header_descriptor_##n + +#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ +struct UVC_HEADER_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u16 bcdUVC; \ + __u16 wTotalLength; \ + __u32 dwClockFrequency; \ + __u8 bInCollection; \ + __u8 baInterfaceNr[n]; \ +} __attribute__ ((packed)) + +/* 3.7.2.1. Input Terminal Descriptor */ +struct uvc_input_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_TERMINAL_SIZE 8 + +/* 3.7.2.2. Output Terminal Descriptor */ +struct uvc_output_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 iTerminal; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_TERMINAL_SIZE 9 + +/* 3.7.2.3. Camera Terminal Descriptor */ +struct uvc_camera_terminal_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __u16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; + __u16 wObjectiveFocalLengthMin; + __u16 wObjectiveFocalLengthMax; + __u16 wOcularFocalLength; + __u8 bControlSize; + __u8 bmControls[3]; +} __attribute__((__packed__)); + +#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) + +/* 3.7.2.4. Selector Unit Descriptor */ +struct uvc_selector_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 iSelector; +} __attribute__((__packed__)); + +#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n)) + +#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ + uvc_selector_unit_descriptor_##n + +#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ +struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 bNrInPins; \ + __u8 baSourceID[n]; \ + __u8 iSelector; \ +} __attribute__ ((packed)) + +/* 3.7.2.5. Processing Unit Descriptor */ +struct uvc_processing_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bSourceID; + __u16 wMaxMultiplier; + __u8 bControlSize; + __u8 bmControls[2]; + __u8 iProcessing; +} __attribute__((__packed__)); + +#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) + +/* 3.7.2.6. Extension Unit Descriptor */ +struct uvc_extension_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 guidExtensionCode[16]; + __u8 bNumControls; + __u8 bNrInPins; + __u8 baSourceID[0]; + __u8 bControlSize; + __u8 bmControls[0]; + __u8 iExtension; +} __attribute__((__packed__)); + +#define UVC_DT_EXTENSION_UNIT_SIZE(p, n) (24+(p)+(n)) + +#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ + uvc_extension_unit_descriptor_##p_##n + +#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ +struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bUnitID; \ + __u8 guidExtensionCode[16]; \ + __u8 bNumControls; \ + __u8 bNrInPins; \ + __u8 baSourceID[p]; \ + __u8 bControlSize; \ + __u8 bmControls[n]; \ + __u8 iExtension; \ +} __attribute__ ((packed)) + +/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */ +struct uvc_control_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u16 wMaxTransferSize; +} __attribute__((__packed__)); + +#define UVC_DT_CONTROL_ENDPOINT_SIZE 5 + +/* 3.9.2.1. Input Header Descriptor */ +struct uvc_input_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bmInfo; + __u8 bTerminalLink; + __u8 bStillCaptureMethod; + __u8 bTriggerSupport; + __u8 bTriggerUsage; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) + +#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_input_header_descriptor_##n_##p + +#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bmInfo; \ + __u8 bTerminalLink; \ + __u8 bStillCaptureMethod; \ + __u8 bTriggerSupport; \ + __u8 bTriggerUsage; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.2. Output Header Descriptor */ +struct uvc_output_header_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __u16 wTotalLength; + __u8 bEndpointAddress; + __u8 bTerminalLink; + __u8 bControlSize; + __u8 bmaControls[]; +} __attribute__((__packed__)); + +#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) + +#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ + uvc_output_header_descriptor_##n_##p + +#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ +struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __u16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bTerminalLink; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ +} __attribute__ ((packed)) + +/* 3.9.2.6. Color matching descriptor */ +struct uvc_color_matching_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bColorPrimaries; + __u8 bTransferCharacteristics; + __u8 bMatrixCoefficients; +} __attribute__((__packed__)); + +#define UVC_DT_COLOR_MATCHING_SIZE 6 + +/* 4.3.1.1. Video Probe and Commit Controls */ +struct uvc_streaming_control { + __u16 bmHint; + __u8 bFormatIndex; + __u8 bFrameIndex; + __u32 dwFrameInterval; + __u16 wKeyFrameRate; + __u16 wPFrameRate; + __u16 wCompQuality; + __u16 wCompWindowSize; + __u16 wDelay; + __u32 dwMaxVideoFrameSize; + __u32 dwMaxPayloadTransferSize; + __u32 dwClockFrequency; + __u8 bmFramingInfo; + __u8 bPreferedVersion; + __u8 bMinVersion; + __u8 bMaxVersion; +} __attribute__((__packed__)); + +/* Uncompressed Payload - 3.1.1. Uncompressed Video Format Descriptor */ +struct uvc_format_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 guidFormat[16]; + __u8 bBitsPerPixel; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27 + +/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */ +struct uvc_frame_uncompressed { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_UNCOMPRESSED(n) \ + uvc_frame_uncompressed_##n + +#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ +struct UVC_FRAME_UNCOMPRESSED(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + +/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */ +struct uvc_format_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 bmFlags; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_MJPEG_SIZE 11 + +/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */ +struct uvc_frame_mjpeg { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwMaxVideoFrameBufferSize; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_MJPEG(n) \ + uvc_frame_mjpeg_##n + +#define DECLARE_UVC_FRAME_MJPEG(n) \ +struct UVC_FRAME_MJPEG(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwMaxVideoFrameBufferSize; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + #endif /* __LINUX_USB_VIDEO_H */ -- cgit v1.2.3 From ace6e9799f585994c92ac3c0696bc336e50077e6 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 22 Jul 2010 16:52:51 -0300 Subject: V4L/DVB: mediabus: fix ambiguous pixel code names Endianness notation is meaningless for 8 bit YUYV codes. Switch pixel code names to explicitly state the order of colour components in the data stream. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- arch/sh/boards/mach-ap325rxa/setup.c | 2 +- drivers/media/video/ak881x.c | 6 +++--- drivers/media/video/mt9m111.c | 16 ++++++++-------- drivers/media/video/mt9t112.c | 12 ++++++------ drivers/media/video/ov772x.c | 8 ++++---- drivers/media/video/ov9640.c | 14 +++++++------- drivers/media/video/pxa_camera.c | 8 ++++---- drivers/media/video/rj54n1cb0c.c | 8 ++++---- drivers/media/video/sh_mobile_ceu_camera.c | 16 ++++++++-------- drivers/media/video/sh_vou.c | 8 ++++---- drivers/media/video/soc_mediabus.c | 8 ++++---- drivers/media/video/tw9910.c | 8 ++++---- include/media/v4l2-mediabus.h | 8 ++++---- 13 files changed, 61 insertions(+), 61 deletions(-) (limited to 'include') diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 3a170bd3f3d0..de375b64e410 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -316,7 +316,7 @@ static struct soc_camera_platform_info camera_info = { .format_name = "UYVY", .format_depth = 16, .format = { - .code = V4L2_MBUS_FMT_YUYV8_2X8_BE, + .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_SMPTE170M, .field = V4L2_FIELD_NONE, .width = 640, diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c index 1573392f74bd..b388654d48cd 100644 --- a/drivers/media/video/ak881x.c +++ b/drivers/media/video/ak881x.c @@ -126,7 +126,7 @@ static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd, v4l_bound_align_image(&mf->width, 0, 720, 2, &mf->height, 0, ak881x->lines, 1, 0); mf->field = V4L2_FIELD_INTERLACED; - mf->code = V4L2_MBUS_FMT_YUYV8_2X8_LE; + mf->code = V4L2_MBUS_FMT_YUYV8_2X8; mf->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; @@ -136,7 +136,7 @@ static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { if (mf->field != V4L2_FIELD_INTERLACED || - mf->code != V4L2_MBUS_FMT_YUYV8_2X8_LE) + mf->code != V4L2_MBUS_FMT_YUYV8_2X8) return -EINVAL; return ak881x_try_g_mbus_fmt(sd, mf); @@ -148,7 +148,7 @@ static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, if (index) return -EINVAL; - *code = V4L2_MBUS_FMT_YUYV8_2X8_LE; + *code = V4L2_MBUS_FMT_YUYV8_2X8; return 0; } diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index fbd0fc794720..31cc3d04bcc4 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -143,10 +143,10 @@ static const struct mt9m111_datafmt *mt9m111_find_datafmt( } static const struct mt9m111_datafmt mt9m111_colour_fmts[] = { - {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG}, - {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG}, - {V4L2_MBUS_FMT_YUYV8_2X8_BE, V4L2_COLORSPACE_JPEG}, - {V4L2_MBUS_FMT_YVYU8_2X8_BE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB}, @@ -505,22 +505,22 @@ static int mt9m111_set_pixfmt(struct i2c_client *client, case V4L2_MBUS_FMT_RGB565_2X8_LE: ret = mt9m111_setfmt_rgb565(client); break; - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + case V4L2_MBUS_FMT_UYVY8_2X8: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_MBUS_FMT_YVYU8_2X8_BE: + case V4L2_MBUS_FMT_VYUY8_2X8: mt9m111->swap_yuv_y_chromas = 0; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_MBUS_FMT_YUYV8_2X8_LE: + case V4L2_MBUS_FMT_YUYV8_2X8: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 0; ret = mt9m111_setfmt_yuv(client); break; - case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_YVYU8_2X8: mt9m111->swap_yuv_y_chromas = 1; mt9m111->swap_yuv_cb_cr = 1; ret = mt9m111_setfmt_yuv(client); diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c index e4bf1db9a87b..8ec47e42d4d0 100644 --- a/drivers/media/video/mt9t112.c +++ b/drivers/media/video/mt9t112.c @@ -121,22 +121,22 @@ struct mt9t112_priv { static const struct mt9t112_format mt9t112_cfmts[] = { { - .code = V4L2_MBUS_FMT_YUYV8_2X8_BE, + .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 0, }, { - .code = V4L2_MBUS_FMT_YVYU8_2X8_BE, + .code = V4L2_MBUS_FMT_VYUY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 1, }, { - .code = V4L2_MBUS_FMT_YUYV8_2X8_LE, + .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 2, }, { - .code = V4L2_MBUS_FMT_YVYU8_2X8_LE, + .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .fmt = 1, .order = 3, @@ -972,7 +972,7 @@ static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) struct v4l2_rect *rect = &a->c; return mt9t112_set_params(client, rect->width, rect->height, - V4L2_MBUS_FMT_YUYV8_2X8_BE); + V4L2_MBUS_FMT_UYVY8_2X8); } static int mt9t112_g_fmt(struct v4l2_subdev *sd, @@ -983,7 +983,7 @@ static int mt9t112_g_fmt(struct v4l2_subdev *sd, if (!priv->format) { int ret = mt9t112_set_params(client, VGA_WIDTH, VGA_HEIGHT, - V4L2_MBUS_FMT_YUYV8_2X8_BE); + V4L2_MBUS_FMT_UYVY8_2X8); if (ret < 0) return ret; } diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c index 34034a710214..25eb5d637eea 100644 --- a/drivers/media/video/ov772x.c +++ b/drivers/media/video/ov772x.c @@ -440,21 +440,21 @@ static const struct regval_list ov772x_vga_regs[] = { */ static const struct ov772x_color_format ov772x_cfmts[] = { { - .code = V4L2_MBUS_FMT_YUYV8_2X8_LE, + .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = 0x0, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { - .code = V4L2_MBUS_FMT_YVYU8_2X8_LE, + .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = UV_ON, .com3 = SWAP_YUV, .com7 = OFMT_YUV, }, { - .code = V4L2_MBUS_FMT_YUYV8_2X8_BE, + .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .dsp3 = 0x0, .com3 = 0x0, @@ -960,7 +960,7 @@ static int ov772x_g_fmt(struct v4l2_subdev *sd, if (!priv->win || !priv->cfmt) { u32 width = VGA_WIDTH, height = VGA_HEIGHT; int ret = ov772x_set_params(client, &width, &height, - V4L2_MBUS_FMT_YUYV8_2X8_LE); + V4L2_MBUS_FMT_YUYV8_2X8); if (ret < 0) return ret; } diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c index 7ce9e05b4781..40cdfab74ccc 100644 --- a/drivers/media/video/ov9640.c +++ b/drivers/media/video/ov9640.c @@ -155,7 +155,7 @@ static const struct ov9640_reg ov9640_regs_rgb[] = { }; static enum v4l2_mbus_pixelcode ov9640_codes[] = { - V4L2_MBUS_FMT_YUYV8_2X8_BE, + V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_MBUS_FMT_RGB565_2X8_LE, }; @@ -430,7 +430,7 @@ static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code, { switch (code) { default: - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + case V4L2_MBUS_FMT_UYVY8_2X8: alt->com12 = OV9640_COM12_YUV_AVG; alt->com13 = OV9640_COM13_Y_DELAY_EN | OV9640_COM13_YUV_DLY(0x01); @@ -493,7 +493,7 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width, } /* select color matrix configuration for given color encoding */ - if (code == V4L2_MBUS_FMT_YUYV8_2X8_BE) { + if (code == V4L2_MBUS_FMT_UYVY8_2X8) { matrix_regs = ov9640_regs_yuv; matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv); } else { @@ -579,8 +579,8 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd, cspace = V4L2_COLORSPACE_SRGB; break; default: - code = V4L2_MBUS_FMT_YUYV8_2X8_BE; - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + code = V4L2_MBUS_FMT_UYVY8_2X8; + case V4L2_MBUS_FMT_UYVY8_2X8: cspace = V4L2_COLORSPACE_JPEG; } @@ -606,8 +606,8 @@ static int ov9640_try_fmt(struct v4l2_subdev *sd, mf->colorspace = V4L2_COLORSPACE_SRGB; break; default: - mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE; - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + mf->code = V4L2_MBUS_FMT_UYVY8_2X8; + case V4L2_MBUS_FMT_UYVY8_2X8: mf->colorspace = V4L2_COLORSPACE_JPEG; } diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c index 5835acf7fa7a..9de7d59916bd 100644 --- a/drivers/media/video/pxa_camera.c +++ b/drivers/media/video/pxa_camera.c @@ -1284,7 +1284,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id } switch (code) { - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + case V4L2_MBUS_FMT_UYVY8_2X8: formats++; if (xlate) { xlate->host_fmt = &pxa_camera_formats[0]; @@ -1293,9 +1293,9 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id dev_dbg(dev, "Providing format %s using code %d\n", pxa_camera_formats[0].name, code); } - case V4L2_MBUS_FMT_YVYU8_2X8_BE: - case V4L2_MBUS_FMT_YUYV8_2X8_LE: - case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_VYUY8_2X8: + case V4L2_MBUS_FMT_YUYV8_2X8: + case V4L2_MBUS_FMT_YVYU8_2X8: case V4L2_MBUS_FMT_RGB565_2X8_LE: case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: if (xlate) diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c index 47fd207ba3b1..d319aef7527e 100644 --- a/drivers/media/video/rj54n1cb0c.c +++ b/drivers/media/video/rj54n1cb0c.c @@ -127,8 +127,8 @@ static const struct rj54n1_datafmt *rj54n1_find_datafmt( } static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { - {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG}, - {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, + {V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, @@ -1046,12 +1046,12 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ switch (mf->code) { - case V4L2_MBUS_FMT_YUYV8_2X8_LE: + case V4L2_MBUS_FMT_YUYV8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; - case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_YVYU8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index d40b1e08bcec..86869dbcbab0 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -743,16 +743,16 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { - case V4L2_MBUS_FMT_YUYV8_2X8_BE: + case V4L2_MBUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; - case V4L2_MBUS_FMT_YVYU8_2X8_BE: + case V4L2_MBUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; - case V4L2_MBUS_FMT_YUYV8_2X8_LE: + case V4L2_MBUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; - case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: @@ -965,10 +965,10 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int cam->extra_fmt = NULL; switch (code) { - case V4L2_MBUS_FMT_YUYV8_2X8_BE: - case V4L2_MBUS_FMT_YVYU8_2X8_BE: - case V4L2_MBUS_FMT_YUYV8_2X8_LE: - case V4L2_MBUS_FMT_YVYU8_2X8_LE: + case V4L2_MBUS_FMT_UYVY8_2X8: + case V4L2_MBUS_FMT_VYUY8_2X8: + case V4L2_MBUS_FMT_YUYV8_2X8: + case V4L2_MBUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c index 5f73a017961c..3869d515febd 100644 --- a/drivers/media/video/sh_vou.c +++ b/drivers/media/video/sh_vou.c @@ -678,7 +678,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv, struct sh_vou_geometry geo; struct v4l2_mbus_framefmt mbfmt = { /* Revisit: is this the correct code? */ - .code = V4L2_MBUS_FMT_YUYV8_2X8_LE, + .code = V4L2_MBUS_FMT_YUYV8_2X8, .field = V4L2_FIELD_INTERLACED, .colorspace = V4L2_COLORSPACE_SMPTE170M, }; @@ -726,7 +726,7 @@ static int sh_vou_s_fmt_vid_out(struct file *file, void *priv, /* Sanity checks */ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT || - mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE) + mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) return -EIO; if (mbfmt.width != geo.output.width || @@ -937,7 +937,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a) struct sh_vou_geometry geo; struct v4l2_mbus_framefmt mbfmt = { /* Revisit: is this the correct code? */ - .code = V4L2_MBUS_FMT_YUYV8_2X8_LE, + .code = V4L2_MBUS_FMT_YUYV8_2X8, .field = V4L2_FIELD_INTERLACED, .colorspace = V4L2_COLORSPACE_SMPTE170M, }; @@ -982,7 +982,7 @@ static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a) /* Sanity checks */ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT || - mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE) + mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) return -EIO; geo.output.width = mbfmt.width; diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c index 8b63b6545e76..91391214c682 100644 --- a/drivers/media/video/soc_mediabus.c +++ b/drivers/media/video/soc_mediabus.c @@ -18,28 +18,28 @@ #define MBUS_IDX(f) (V4L2_MBUS_FMT_ ## f - V4L2_MBUS_FMT_FIXED - 1) static const struct soc_mbus_pixelfmt mbus_fmt[] = { - [MBUS_IDX(YUYV8_2X8_LE)] = { + [MBUS_IDX(YUYV8_2X8)] = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, - [MBUS_IDX(YVYU8_2X8_LE)] = { + [MBUS_IDX(YVYU8_2X8)] = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, - [MBUS_IDX(YUYV8_2X8_BE)] = { + [MBUS_IDX(UYVY8_2X8)] = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, - [MBUS_IDX(YVYU8_2X8_BE)] = { + [MBUS_IDX(VYUY8_2X8)] = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c index 445dc93413e3..a727962781a3 100644 --- a/drivers/media/video/tw9910.c +++ b/drivers/media/video/tw9910.c @@ -768,7 +768,7 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd, mf->width = priv->scale->width; mf->height = priv->scale->height; - mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE; + mf->code = V4L2_MBUS_FMT_UYVY8_2X8; mf->colorspace = V4L2_COLORSPACE_JPEG; mf->field = V4L2_FIELD_INTERLACED_BT; @@ -797,7 +797,7 @@ static int tw9910_s_fmt(struct v4l2_subdev *sd, /* * check color format */ - if (mf->code != V4L2_MBUS_FMT_YUYV8_2X8_BE) + if (mf->code != V4L2_MBUS_FMT_UYVY8_2X8) return -EINVAL; mf->colorspace = V4L2_COLORSPACE_JPEG; @@ -824,7 +824,7 @@ static int tw9910_try_fmt(struct v4l2_subdev *sd, return -EINVAL; } - mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE; + mf->code = V4L2_MBUS_FMT_UYVY8_2X8; mf->colorspace = V4L2_COLORSPACE_JPEG; /* @@ -909,7 +909,7 @@ static int tw9910_enum_fmt(struct v4l2_subdev *sd, unsigned int index, if (index) return -EINVAL; - *code = V4L2_MBUS_FMT_YUYV8_2X8_BE; + *code = V4L2_MBUS_FMT_UYVY8_2X8; return 0; } diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 865cda7cd611..a87096598416 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -24,10 +24,10 @@ */ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_FIXED = 1, - V4L2_MBUS_FMT_YUYV8_2X8_LE, - V4L2_MBUS_FMT_YVYU8_2X8_LE, - V4L2_MBUS_FMT_YUYV8_2X8_BE, - V4L2_MBUS_FMT_YVYU8_2X8_BE, + V4L2_MBUS_FMT_YUYV8_2X8, + V4L2_MBUS_FMT_YVYU8_2X8, + V4L2_MBUS_FMT_UYVY8_2X8, + V4L2_MBUS_FMT_VYUY8_2X8, V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_MBUS_FMT_RGB565_2X8_LE, -- cgit v1.2.3 From c6b65ab78bebf5ceaa8de53d8a9c4f5e34e45e57 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 26 Jul 2010 10:41:55 -0300 Subject: V4L/DVB: V4L2: mediabus: add 12-bit Bayer and YUV420 pixel formats These formats belong to the standard format set, defined by the MIPI CSI-2 specification. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- include/media/v4l2-mediabus.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index a87096598416..f0cf2e7def06 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -41,6 +41,11 @@ enum v4l2_mbus_pixelcode { V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_MBUS_FMT_SBGGR12_1X12, + V4L2_MBUS_FMT_YUYV8_1_5X8, + V4L2_MBUS_FMT_YVYU8_1_5X8, + V4L2_MBUS_FMT_UYVY8_1_5X8, + V4L2_MBUS_FMT_VYUY8_1_5X8, }; /** -- cgit v1.2.3 From 52d268a36246ee4156cc719036522616bb4d73fa Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 26 Jul 2010 11:37:13 -0300 Subject: V4L/DVB: V4L2: soc-camera: export soc-camera bus type for notifications Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/soc_camera.c | 3 ++- include/media/soc_camera.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 475757bfd7ba..f2032939fd4b 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -1107,13 +1107,14 @@ static int soc_camera_resume(struct device *dev) return ret; } -static struct bus_type soc_camera_bus_type = { +struct bus_type soc_camera_bus_type = { .name = "soc-camera", .probe = soc_camera_probe, .remove = soc_camera_remove, .suspend = soc_camera_suspend, .resume = soc_camera_resume, }; +EXPORT_SYMBOL_GPL(soc_camera_bus_type); static struct device_driver ic_drv = { .name = "camera", diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index b8289c2f609b..2ce957301f77 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -12,12 +12,15 @@ #ifndef SOC_CAMERA_H #define SOC_CAMERA_H +#include #include #include #include #include #include +extern struct bus_type soc_camera_bus_type; + struct soc_camera_device { struct list_head list; struct device dev; -- cgit v1.2.3 From 077e2c10c9cb618d571bf16475db696610bdb24a Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 26 Jul 2010 11:12:43 -0300 Subject: V4L/DVB: V4L2: soc-camera: add a MIPI CSI-2 driver for SH-Mobile platforms Some SH-Mobile SoCs implement a MIPI CSI-2 controller, that can interface to several video clients and send data to the CEU or to the Image Signal Processor. This patch implements a v4l2-subdevice driver for CSI-2 to be used within the soc-camera framework, implementing the second subdevice in addition to the actual video clients. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/Kconfig | 6 + drivers/media/video/Makefile | 1 + drivers/media/video/sh_mobile_csi2.c | 354 +++++++++++++++++++++++++++++++++++ include/media/sh_mobile_csi2.h | 46 +++++ 4 files changed, 407 insertions(+) create mode 100644 drivers/media/video/sh_mobile_csi2.c create mode 100644 include/media/sh_mobile_csi2.h (limited to 'include') diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index c627f776c1e9..0f1ac401dedd 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -876,6 +876,12 @@ config VIDEO_PXA27x ---help--- This is a v4l2 driver for the PXA27x Quick Capture Interface +config VIDEO_SH_MOBILE_CSI2 + tristate "SuperH Mobile MIPI CSI-2 Interface driver" + depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK + ---help--- + This is a v4l2 driver for the SuperH MIPI CSI-2 Interface + config VIDEO_SH_MOBILE_CEU tristate "SuperH Mobile CEU Interface driver" depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index eba259fbf7ef..88478630e854 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -160,6 +160,7 @@ obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o +obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o obj-$(CONFIG_ARCH_DAVINCI) += davinci/ diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c new file mode 100644 index 000000000000..84a646819318 --- /dev/null +++ b/drivers/media/video/sh_mobile_csi2.c @@ -0,0 +1,354 @@ +/* + * Driver for the SH-Mobile MIPI CSI-2 unit + * + * Copyright (C) 2010, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define SH_CSI2_TREF 0x00 +#define SH_CSI2_SRST 0x04 +#define SH_CSI2_PHYCNT 0x08 +#define SH_CSI2_CHKSUM 0x0C +#define SH_CSI2_VCDT 0x10 + +struct sh_csi2 { + struct v4l2_subdev subdev; + struct list_head list; + struct notifier_block notifier; + unsigned int irq; + void __iomem *base; + struct platform_device *pdev; + struct sh_csi2_client_config *client; +}; + +static int sh_csi2_try_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) +{ + struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); + struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; + + if (mf->width > 8188) + mf->width = 8188; + else if (mf->width & 1) + mf->width &= ~1; + + switch (pdata->type) { + case SH_CSI2C: + switch (mf->code) { + case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */ + case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */ + case V4L2_MBUS_FMT_GREY8_1X8: /* RAW8 */ + case V4L2_MBUS_FMT_SBGGR8_1X8: + case V4L2_MBUS_FMT_SGRBG8_1X8: + break; + default: + /* All MIPI CSI-2 devices must support one of primary formats */ + mf->code = V4L2_MBUS_FMT_YUYV8_2X8; + } + break; + case SH_CSI2I: + switch (mf->code) { + case V4L2_MBUS_FMT_GREY8_1X8: /* RAW8 */ + case V4L2_MBUS_FMT_SBGGR8_1X8: + case V4L2_MBUS_FMT_SGRBG8_1X8: + case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */ + case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */ + break; + default: + /* All MIPI CSI-2 devices must support one of primary formats */ + mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; + } + break; + } + + return 0; +} + +/* + * We have done our best in try_fmt to try and tell the sensor, which formats + * we support. If now the configuration is unsuitable for us we can only + * error out. + */ +static int sh_csi2_s_fmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *mf) +{ + struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); + u32 tmp = (priv->client->channel & 3) << 8; + + dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code); + if (mf->width > 8188 || mf->width & 1) + return -EINVAL; + + switch (mf->code) { + case V4L2_MBUS_FMT_UYVY8_2X8: + tmp |= 0x1e; /* YUV422 8 bit */ + break; + case V4L2_MBUS_FMT_YUYV8_1_5X8: + tmp |= 0x18; /* YUV420 8 bit */ + break; + case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: + tmp |= 0x21; /* RGB555 */ + break; + case V4L2_MBUS_FMT_RGB565_2X8_BE: + tmp |= 0x22; /* RGB565 */ + break; + case V4L2_MBUS_FMT_GREY8_1X8: + case V4L2_MBUS_FMT_SBGGR8_1X8: + case V4L2_MBUS_FMT_SGRBG8_1X8: + tmp |= 0x2a; /* RAW8 */ + break; + default: + return -EINVAL; + } + + iowrite32(tmp, priv->base + SH_CSI2_VCDT); + + return 0; +} + +static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = { + .s_mbus_fmt = sh_csi2_s_fmt, + .try_mbus_fmt = sh_csi2_try_fmt, +}; + +static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops; + +static struct v4l2_subdev_ops sh_csi2_subdev_ops = { + .core = &sh_csi2_subdev_core_ops, + .video = &sh_csi2_subdev_video_ops, +}; + +static void sh_csi2_hwinit(struct sh_csi2 *priv) +{ + struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; + __u32 tmp = 0x10; /* Enable MIPI CSI clock lane */ + + /* Reflect registers immediately */ + iowrite32(0x00000001, priv->base + SH_CSI2_TREF); + /* reset CSI2 harware */ + iowrite32(0x00000001, priv->base + SH_CSI2_SRST); + udelay(5); + iowrite32(0x00000000, priv->base + SH_CSI2_SRST); + + if (priv->client->lanes & 3) + tmp |= priv->client->lanes & 3; + else + /* Default - both lanes */ + tmp |= 3; + + if (priv->client->phy == SH_CSI2_PHY_MAIN) + tmp |= 0x8000; + + iowrite32(tmp, priv->base + SH_CSI2_PHYCNT); + + tmp = 0; + if (pdata->flags & SH_CSI2_ECC) + tmp |= 2; + if (pdata->flags & SH_CSI2_CRC) + tmp |= 1; + iowrite32(tmp, priv->base + SH_CSI2_CHKSUM); +} + +static int sh_csi2_set_bus_param(struct soc_camera_device *icd, + unsigned long flags) +{ + return 0; +} + +static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd) +{ + struct soc_camera_link *icl = to_soc_camera_link(icd); + const unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | + SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH | + SOCAM_MASTER | SOCAM_DATAWIDTH_8 | SOCAM_DATA_ACTIVE_HIGH; + + return soc_camera_apply_sensor_flags(icl, flags); +} + +static int sh_csi2_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct soc_camera_device *icd = to_soc_camera_dev(dev); + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev->parent); + struct sh_csi2 *priv = + container_of(nb, struct sh_csi2, notifier); + struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; + int ret, i; + + for (i = 0; i < pdata->num_clients; i++) + if (&pdata->clients[i].pdev->dev == icd->pdev) + break; + + dev_dbg(dev, "%s(%p): action = %lu, found #%d\n", __func__, dev, action, i); + + if (i == pdata->num_clients) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_BOUND_DRIVER: + snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s", + dev_name(v4l2_dev->dev), ".mipi-csi"); + ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev); + dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); + if (ret < 0) + return NOTIFY_DONE; + + priv->client = pdata->clients + i; + + icd->ops->set_bus_param = sh_csi2_set_bus_param; + icd->ops->query_bus_param = sh_csi2_query_bus_param; + + pm_runtime_get_sync(v4l2_get_subdevdata(&priv->subdev)); + + sh_csi2_hwinit(priv); + break; + case BUS_NOTIFY_UNBIND_DRIVER: + priv->client = NULL; + + /* Driver is about to be unbound */ + icd->ops->set_bus_param = NULL; + icd->ops->query_bus_param = NULL; + + v4l2_device_unregister_subdev(&priv->subdev); + + pm_runtime_put(v4l2_get_subdevdata(&priv->subdev)); + break; + } + + return NOTIFY_OK; +} + +static __devinit int sh_csi2_probe(struct platform_device *pdev) +{ + struct resource *res; + unsigned int irq; + int ret; + struct sh_csi2 *priv; + /* Platform data specify the PHY, lanes, ECC, CRC */ + struct sh_csi2_pdata *pdata = pdev->dev.platform_data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* Interrupt unused so far */ + irq = platform_get_irq(pdev, 0); + + if (!res || (int)irq <= 0 || !pdata) { + dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n"); + return -ENODEV; + } + + /* TODO: Add support for CSI2I. Careful: different register layout! */ + if (pdata->type != SH_CSI2C) { + dev_err(&pdev->dev, "Only CSI2C supported ATM.\n"); + return -EINVAL; + } + + priv = kzalloc(sizeof(struct sh_csi2), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->irq = irq; + priv->notifier.notifier_call = sh_csi2_notify; + + /* We MUST attach after the MIPI sensor */ + ret = bus_register_notifier(&soc_camera_bus_type, &priv->notifier); + if (ret < 0) { + dev_err(&pdev->dev, "CSI2 cannot register notifier\n"); + goto ernotify; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "CSI2 register region already claimed\n"); + ret = -EBUSY; + goto ereqreg; + } + + priv->base = ioremap(res->start, resource_size(res)); + if (!priv->base) { + ret = -ENXIO; + dev_err(&pdev->dev, "Unable to ioremap CSI2 registers.\n"); + goto eremap; + } + + priv->pdev = pdev; + + v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops); + v4l2_set_subdevdata(&priv->subdev, &pdev->dev); + + platform_set_drvdata(pdev, priv); + + pm_runtime_enable(&pdev->dev); + + dev_dbg(&pdev->dev, "CSI2 probed.\n"); + + return 0; + +eremap: + release_mem_region(res->start, resource_size(res)); +ereqreg: + bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier); +ernotify: + kfree(priv); + + return ret; +} + +static __devexit int sh_csi2_remove(struct platform_device *pdev) +{ + struct sh_csi2 *priv = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier); + pm_runtime_disable(&pdev->dev); + iounmap(priv->base); + release_mem_region(res->start, resource_size(res)); + platform_set_drvdata(pdev, NULL); + kfree(priv); + + return 0; +} + +static struct platform_driver __refdata sh_csi2_pdrv = { + .remove = __devexit_p(sh_csi2_remove), + .driver = { + .name = "sh-mobile-csi2", + .owner = THIS_MODULE, + }, +}; + +static int __init sh_csi2_init(void) +{ + return platform_driver_probe(&sh_csi2_pdrv, sh_csi2_probe); +} + +static void __exit sh_csi2_exit(void) +{ + platform_driver_unregister(&sh_csi2_pdrv); +} + +module_init(sh_csi2_init); +module_exit(sh_csi2_exit); + +MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver"); +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sh-mobile-csi2"); diff --git a/include/media/sh_mobile_csi2.h b/include/media/sh_mobile_csi2.h new file mode 100644 index 000000000000..4d2615174461 --- /dev/null +++ b/include/media/sh_mobile_csi2.h @@ -0,0 +1,46 @@ +/* + * Driver header for the SH-Mobile MIPI CSI-2 unit + * + * Copyright (C) 2010, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SH_MIPI_CSI +#define SH_MIPI_CSI + +enum sh_csi2_phy { + SH_CSI2_PHY_MAIN, + SH_CSI2_PHY_SUB, +}; + +enum sh_csi2_type { + SH_CSI2C, + SH_CSI2I, +}; + +#define SH_CSI2_CRC (1 << 0) +#define SH_CSI2_ECC (1 << 1) + +struct platform_device; + +struct sh_csi2_client_config { + enum sh_csi2_phy phy; + unsigned char lanes; /* bitmask[3:0] */ + unsigned char channel; /* 0..3 */ + struct platform_device *pdev; /* client platform device */ +}; + +struct sh_csi2_pdata { + enum sh_csi2_type type; + unsigned int flags; + struct sh_csi2_client_config *clients; + int num_clients; +}; + +struct device; +struct v4l2_device; + +#endif -- cgit v1.2.3 From b3b5020d8c12037f030242aab8e272148bf1f472 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 26 Jul 2010 12:13:34 -0300 Subject: V4L/DVB: V4L2: sh_mobile_camera_ceu: add support for CSI2 Using CEU with CSI2 on SH-Mobile requires some special configuration of the former. We also have to switch from calling only one subdev .s_mbus_fmt and .try_mbus_fmt to calling all subdevices. Take care to increment CSI2 driver use count to prevent it from unloading, while in use. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Mauro Carvalho Chehab --- drivers/media/video/sh_mobile_ceu_camera.c | 131 +++++++++++++++++++++++++---- include/media/sh_mobile_ceu.h | 3 + 2 files changed, 119 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c index 86869dbcbab0..2b24bd0de3ad 100644 --- a/drivers/media/video/sh_mobile_ceu_camera.c +++ b/drivers/media/video/sh_mobile_ceu_camera.c @@ -633,6 +633,12 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) cdwdr_width *= 2; } + /* CSI2 special configuration */ + if (pcdev->pdata->csi2_dev) { + in_width = ((in_width - 2) * 2); + left_offset *= 2; + } + /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); @@ -767,6 +773,11 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; value |= pcdev->is_16bit ? 1 << 12 : 0; + + /* CSI2 mode */ + if (pcdev->pdata->csi2_dev) + value |= 3 << 12; + ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); @@ -883,6 +894,8 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; + struct soc_camera_host *ici = to_soc_camera_host(dev); + struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; @@ -896,19 +909,19 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { - dev_err(icd->dev.parent, - "Invalid format code #%u: %d\n", idx, code); + dev_err(dev, "Invalid format code #%u: %d\n", idx, code); return -EINVAL; } - ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); - if (ret < 0) - return 0; + if (!pcdev->pdata->csi2_dev) { + ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); + if (ret < 0) + return 0; + } if (!icd->host_priv) { struct v4l2_mbus_framefmt mf; struct v4l2_rect rect; - struct device *dev = icd->dev.parent; int shift = 0; /* FIXME: subwindow is lost between close / open */ @@ -927,7 +940,8 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; - ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + s_mbus_fmt, &mf); if (ret < 0) return ret; shift++; @@ -1228,7 +1242,8 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_cropcap cap; int ret; - ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + s_mbus_fmt, mf); if (ret < 0) return ret; @@ -1257,7 +1272,8 @@ static int client_s_fmt(struct soc_camera_device *icd, tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; - ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { @@ -1514,7 +1530,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; - unsigned int ceu_sub_width, ceu_sub_height; + /* Keep Compiler Happy */ + unsigned int ceu_sub_width = 0, ceu_sub_height = 0; u16 scale_v, scale_h; int ret; bool image_mode; @@ -1569,8 +1586,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, /* Done with the camera. Now see if we can improve the result */ - dev_geo(dev, "Camera %d fmt %ux%u, requested %ux%u\n", - ret, mf.width, mf.height, pix->width, pix->height); + dev_geo(dev, "fmt %ux%u, requested %ux%u\n", + mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; @@ -1634,6 +1651,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, int width, height; int ret; + dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n", + pixfmt, pix->width, pix->height); + xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); @@ -1660,7 +1680,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, mf.code = xlate->code; mf.colorspace = pix->colorspace; - ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); if (ret < 0) return ret; @@ -1684,7 +1704,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, */ mf.width = 2560; mf.height = 1920; - ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->dev.parent, @@ -1699,6 +1720,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, pix->height = height; } + dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n", + __func__, ret, pix->pixelformat, pix->width, pix->height); + return ret; } @@ -1853,6 +1877,30 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .num_controls = ARRAY_SIZE(sh_mobile_ceu_controls), }; +struct bus_wait { + struct notifier_block notifier; + struct completion completion; + struct device *dev; +}; + +static int bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); + + if (wait->dev != dev) + return NOTIFY_DONE; + + switch (action) { + case BUS_NOTIFY_UNBOUND_DRIVER: + /* Protect from module unloading */ + wait_for_completion(&wait->completion); + return NOTIFY_OK; + } + return NOTIFY_DONE; +} + static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; @@ -1860,6 +1908,11 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) void __iomem *base; unsigned int irq; int err = 0; + struct bus_wait wait = { + .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), + .notifier.notifier_call = bus_notify, + }; + struct device *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); @@ -1931,12 +1984,54 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; + /* CSI2 interfacing */ + csi2 = pcdev->pdata->csi2_dev; + if (csi2) { + wait.dev = csi2; + + err = bus_register_notifier(&platform_bus_type, &wait.notifier); + if (err < 0) + goto exit_free_clk; + + /* + * From this point the driver module will not unload, until + * we complete the completion. + */ + + if (!csi2->driver || !csi2->driver->owner) { + complete(&wait.completion); + /* Either too late, or probing failed */ + bus_unregister_notifier(&platform_bus_type, &wait.notifier); + err = -ENXIO; + goto exit_free_clk; + } + + /* + * The module is still loaded, in the worst case it is hanging + * in device release on our completion. So, _now_ dereferencing + * the "owner" is safe! + */ + + err = try_module_get(csi2->driver->owner); + + /* Let notifier complete, if it has been locked */ + complete(&wait.completion); + bus_unregister_notifier(&platform_bus_type, &wait.notifier); + if (!err) { + err = -ENODEV; + goto exit_free_clk; + } + } + err = soc_camera_host_register(&pcdev->ici); if (err) - goto exit_free_clk; + goto exit_module_put; return 0; +exit_module_put: + if (csi2 && csi2->driver) + module_put(csi2->driver->owner); exit_free_clk: pm_runtime_disable(&pdev->dev); free_irq(pcdev->irq, pcdev); @@ -1956,6 +2051,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev) struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); + struct device *csi2 = pcdev->pdata->csi2_dev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); @@ -1963,7 +2059,10 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev) if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); iounmap(pcdev->base); + if (csi2 && csi2->driver) + module_put(csi2->driver->owner); kfree(pcdev); + return 0; } @@ -1995,6 +2094,8 @@ static struct platform_driver sh_mobile_ceu_driver = { static int __init sh_mobile_ceu_init(void) { + /* Whatever return code */ + request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } diff --git a/include/media/sh_mobile_ceu.h b/include/media/sh_mobile_ceu.h index b67747836878..80346a6d28a9 100644 --- a/include/media/sh_mobile_ceu.h +++ b/include/media/sh_mobile_ceu.h @@ -6,8 +6,11 @@ #define SH_CEU_FLAG_HSYNC_LOW (1 << 2) /* default High if possible */ #define SH_CEU_FLAG_VSYNC_LOW (1 << 3) /* default High if possible */ +struct device; + struct sh_mobile_ceu_info { unsigned long flags; + struct device *csi2_dev; }; #endif /* __ASM_SH_MOBILE_CEU_H__ */ -- cgit v1.2.3 From d700226902a62a3b6f3563782d569c0e2af74397 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sat, 31 Jul 2010 19:24:49 -0300 Subject: V4L/DVB: Add a keymap file with dib0700 table Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/keymaps/Makefile | 1 + drivers/media/IR/keymaps/rc-dib0700-big.c | 314 ++++++++++++++++++++++++++++++ include/media/rc-map.h | 4 + 3 files changed, 319 insertions(+) create mode 100644 drivers/media/IR/keymaps/rc-dib0700-big.c (limited to 'include') diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index 86d3d1f2eaa9..85330d171c4e 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-budget-ci-old.o \ rc-cinergy-1400.o \ rc-cinergy.o \ + rc-dib0700-big.o \ rc-dm1105-nec.o \ rc-dntv-live-dvb-t.o \ rc-dntv-live-dvbt-pro.o \ diff --git a/drivers/media/IR/keymaps/rc-dib0700-big.c b/drivers/media/IR/keymaps/rc-dib0700-big.c new file mode 100644 index 000000000000..2e83820d3e59 --- /dev/null +++ b/drivers/media/IR/keymaps/rc-dib0700-big.c @@ -0,0 +1,314 @@ +/* rc-dvb0700-big.c - Keytable for devices in dvb0700 + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + * + * TODO: This table is a real mess, as it merges RC codes from several + * devices into a big table. It also has both RC-5 and NEC codes inside. + * It should be broken into small tables, and the protocols should properly + * be indentificated. + * + * The table were imported from dib0700_devices.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +static struct ir_scancode dib0700_table[] = { + /* Key codes for the tiny Pinnacle remote*/ + { 0x0700, KEY_MUTE }, + { 0x0701, KEY_MENU }, /* Pinnacle logo */ + { 0x0739, KEY_POWER }, + { 0x0703, KEY_VOLUMEUP }, + { 0x0709, KEY_VOLUMEDOWN }, + { 0x0706, KEY_CHANNELUP }, + { 0x070c, KEY_CHANNELDOWN }, + { 0x070f, KEY_1 }, + { 0x0715, KEY_2 }, + { 0x0710, KEY_3 }, + { 0x0718, KEY_4 }, + { 0x071b, KEY_5 }, + { 0x071e, KEY_6 }, + { 0x0711, KEY_7 }, + { 0x0721, KEY_8 }, + { 0x0712, KEY_9 }, + { 0x0727, KEY_0 }, + { 0x0724, KEY_SCREEN }, /* 'Square' key */ + { 0x072a, KEY_TEXT }, /* 'T' key */ + { 0x072d, KEY_REWIND }, + { 0x0730, KEY_PLAY }, + { 0x0733, KEY_FASTFORWARD }, + { 0x0736, KEY_RECORD }, + { 0x073c, KEY_STOP }, + { 0x073f, KEY_CANCEL }, /* '?' key */ + /* Key codes for the Terratec Cinergy DT XS Diversity, similar to cinergyT2.c */ + { 0xeb01, KEY_POWER }, + { 0xeb02, KEY_1 }, + { 0xeb03, KEY_2 }, + { 0xeb04, KEY_3 }, + { 0xeb05, KEY_4 }, + { 0xeb06, KEY_5 }, + { 0xeb07, KEY_6 }, + { 0xeb08, KEY_7 }, + { 0xeb09, KEY_8 }, + { 0xeb0a, KEY_9 }, + { 0xeb0b, KEY_VIDEO }, + { 0xeb0c, KEY_0 }, + { 0xeb0d, KEY_REFRESH }, + { 0xeb0f, KEY_EPG }, + { 0xeb10, KEY_UP }, + { 0xeb11, KEY_LEFT }, + { 0xeb12, KEY_OK }, + { 0xeb13, KEY_RIGHT }, + { 0xeb14, KEY_DOWN }, + { 0xeb16, KEY_INFO }, + { 0xeb17, KEY_RED }, + { 0xeb18, KEY_GREEN }, + { 0xeb19, KEY_YELLOW }, + { 0xeb1a, KEY_BLUE }, + { 0xeb1b, KEY_CHANNELUP }, + { 0xeb1c, KEY_VOLUMEUP }, + { 0xeb1d, KEY_MUTE }, + { 0xeb1e, KEY_VOLUMEDOWN }, + { 0xeb1f, KEY_CHANNELDOWN }, + { 0xeb40, KEY_PAUSE }, + { 0xeb41, KEY_HOME }, + { 0xeb42, KEY_MENU }, /* DVD Menu */ + { 0xeb43, KEY_SUBTITLE }, + { 0xeb44, KEY_TEXT }, /* Teletext */ + { 0xeb45, KEY_DELETE }, + { 0xeb46, KEY_TV }, + { 0xeb47, KEY_DVD }, + { 0xeb48, KEY_STOP }, + { 0xeb49, KEY_VIDEO }, + { 0xeb4a, KEY_AUDIO }, /* Music */ + { 0xeb4b, KEY_SCREEN }, /* Pic */ + { 0xeb4c, KEY_PLAY }, + { 0xeb4d, KEY_BACK }, + { 0xeb4e, KEY_REWIND }, + { 0xeb4f, KEY_FASTFORWARD }, + { 0xeb54, KEY_PREVIOUS }, + { 0xeb58, KEY_RECORD }, + { 0xeb5c, KEY_NEXT }, + + /* Key codes for the Haupauge WinTV Nova-TD, copied from nova-t-usb2.c (Nova-T USB2) */ + { 0x1e00, KEY_0 }, + { 0x1e01, KEY_1 }, + { 0x1e02, KEY_2 }, + { 0x1e03, KEY_3 }, + { 0x1e04, KEY_4 }, + { 0x1e05, KEY_5 }, + { 0x1e06, KEY_6 }, + { 0x1e07, KEY_7 }, + { 0x1e08, KEY_8 }, + { 0x1e09, KEY_9 }, + { 0x1e0a, KEY_KPASTERISK }, + { 0x1e0b, KEY_RED }, + { 0x1e0c, KEY_RADIO }, + { 0x1e0d, KEY_MENU }, + { 0x1e0e, KEY_GRAVE }, /* # */ + { 0x1e0f, KEY_MUTE }, + { 0x1e10, KEY_VOLUMEUP }, + { 0x1e11, KEY_VOLUMEDOWN }, + { 0x1e12, KEY_CHANNEL }, + { 0x1e14, KEY_UP }, + { 0x1e15, KEY_DOWN }, + { 0x1e16, KEY_LEFT }, + { 0x1e17, KEY_RIGHT }, + { 0x1e18, KEY_VIDEO }, + { 0x1e19, KEY_AUDIO }, + { 0x1e1a, KEY_MEDIA }, + { 0x1e1b, KEY_EPG }, + { 0x1e1c, KEY_TV }, + { 0x1e1e, KEY_NEXT }, + { 0x1e1f, KEY_BACK }, + { 0x1e20, KEY_CHANNELUP }, + { 0x1e21, KEY_CHANNELDOWN }, + { 0x1e24, KEY_LAST }, /* Skip backwards */ + { 0x1e25, KEY_OK }, + { 0x1e29, KEY_BLUE}, + { 0x1e2e, KEY_GREEN }, + { 0x1e30, KEY_PAUSE }, + { 0x1e32, KEY_REWIND }, + { 0x1e34, KEY_FASTFORWARD }, + { 0x1e35, KEY_PLAY }, + { 0x1e36, KEY_STOP }, + { 0x1e37, KEY_RECORD }, + { 0x1e38, KEY_YELLOW }, + { 0x1e3b, KEY_GOTO }, + { 0x1e3d, KEY_POWER }, + + /* Key codes for the Leadtek Winfast DTV Dongle */ + { 0x0042, KEY_POWER }, + { 0x077c, KEY_TUNER }, + { 0x0f4e, KEY_PRINT }, /* PREVIEW */ + { 0x0840, KEY_SCREEN }, /* full screen toggle*/ + { 0x0f71, KEY_DOT }, /* frequency */ + { 0x0743, KEY_0 }, + { 0x0c41, KEY_1 }, + { 0x0443, KEY_2 }, + { 0x0b7f, KEY_3 }, + { 0x0e41, KEY_4 }, + { 0x0643, KEY_5 }, + { 0x097f, KEY_6 }, + { 0x0d7e, KEY_7 }, + { 0x057c, KEY_8 }, + { 0x0a40, KEY_9 }, + { 0x0e4e, KEY_CLEAR }, + { 0x047c, KEY_CHANNEL }, /* show channel number */ + { 0x0f41, KEY_LAST }, /* recall */ + { 0x0342, KEY_MUTE }, + { 0x064c, KEY_RESERVED }, /* PIP button*/ + { 0x0172, KEY_SHUFFLE }, /* SNAPSHOT */ + { 0x0c4e, KEY_PLAYPAUSE }, /* TIMESHIFT */ + { 0x0b70, KEY_RECORD }, + { 0x037d, KEY_VOLUMEUP }, + { 0x017d, KEY_VOLUMEDOWN }, + { 0x0242, KEY_CHANNELUP }, + { 0x007d, KEY_CHANNELDOWN }, + + /* Key codes for Nova-TD "credit card" remote control. */ + { 0x1d00, KEY_0 }, + { 0x1d01, KEY_1 }, + { 0x1d02, KEY_2 }, + { 0x1d03, KEY_3 }, + { 0x1d04, KEY_4 }, + { 0x1d05, KEY_5 }, + { 0x1d06, KEY_6 }, + { 0x1d07, KEY_7 }, + { 0x1d08, KEY_8 }, + { 0x1d09, KEY_9 }, + { 0x1d0a, KEY_TEXT }, + { 0x1d0d, KEY_MENU }, + { 0x1d0f, KEY_MUTE }, + { 0x1d10, KEY_VOLUMEUP }, + { 0x1d11, KEY_VOLUMEDOWN }, + { 0x1d12, KEY_CHANNEL }, + { 0x1d14, KEY_UP }, + { 0x1d15, KEY_DOWN }, + { 0x1d16, KEY_LEFT }, + { 0x1d17, KEY_RIGHT }, + { 0x1d1c, KEY_TV }, + { 0x1d1e, KEY_NEXT }, + { 0x1d1f, KEY_BACK }, + { 0x1d20, KEY_CHANNELUP }, + { 0x1d21, KEY_CHANNELDOWN }, + { 0x1d24, KEY_LAST }, + { 0x1d25, KEY_OK }, + { 0x1d30, KEY_PAUSE }, + { 0x1d32, KEY_REWIND }, + { 0x1d34, KEY_FASTFORWARD }, + { 0x1d35, KEY_PLAY }, + { 0x1d36, KEY_STOP }, + { 0x1d37, KEY_RECORD }, + { 0x1d3b, KEY_GOTO }, + { 0x1d3d, KEY_POWER }, + + /* Key codes for the Pixelview SBTVD remote (proto NEC) */ + { 0x8613, KEY_MUTE }, + { 0x8612, KEY_POWER }, + { 0x8601, KEY_1 }, + { 0x8602, KEY_2 }, + { 0x8603, KEY_3 }, + { 0x8604, KEY_4 }, + { 0x8605, KEY_5 }, + { 0x8606, KEY_6 }, + { 0x8607, KEY_7 }, + { 0x8608, KEY_8 }, + { 0x8609, KEY_9 }, + { 0x8600, KEY_0 }, + { 0x860d, KEY_CHANNELUP }, + { 0x8619, KEY_CHANNELDOWN }, + { 0x8610, KEY_VOLUMEUP }, + { 0x860c, KEY_VOLUMEDOWN }, + + { 0x860a, KEY_CAMERA }, + { 0x860b, KEY_ZOOM }, + { 0x861b, KEY_BACKSPACE }, + { 0x8615, KEY_ENTER }, + + { 0x861d, KEY_UP }, + { 0x861e, KEY_DOWN }, + { 0x860e, KEY_LEFT }, + { 0x860f, KEY_RIGHT }, + + { 0x8618, KEY_RECORD }, + { 0x861a, KEY_STOP }, + + /* Key codes for the EvolutePC TVWay+ remote (proto NEC) */ + { 0x7a00, KEY_MENU }, + { 0x7a01, KEY_RECORD }, + { 0x7a02, KEY_PLAY }, + { 0x7a03, KEY_STOP }, + { 0x7a10, KEY_CHANNELUP }, + { 0x7a11, KEY_CHANNELDOWN }, + { 0x7a12, KEY_VOLUMEUP }, + { 0x7a13, KEY_VOLUMEDOWN }, + { 0x7a40, KEY_POWER }, + { 0x7a41, KEY_MUTE }, + + /* Key codes for the Elgato EyeTV Diversity silver remote, + set dvb_usb_dib0700_ir_proto=0 */ + { 0x4501, KEY_POWER }, + { 0x4502, KEY_MUTE }, + { 0x4503, KEY_1 }, + { 0x4504, KEY_2 }, + { 0x4505, KEY_3 }, + { 0x4506, KEY_4 }, + { 0x4507, KEY_5 }, + { 0x4508, KEY_6 }, + { 0x4509, KEY_7 }, + { 0x450a, KEY_8 }, + { 0x450b, KEY_9 }, + { 0x450c, KEY_LAST }, + { 0x450d, KEY_0 }, + { 0x450e, KEY_ENTER }, + { 0x450f, KEY_RED }, + { 0x4510, KEY_CHANNELUP }, + { 0x4511, KEY_GREEN }, + { 0x4512, KEY_VOLUMEDOWN }, + { 0x4513, KEY_OK }, + { 0x4514, KEY_VOLUMEUP }, + { 0x4515, KEY_YELLOW }, + { 0x4516, KEY_CHANNELDOWN }, + { 0x4517, KEY_BLUE }, + { 0x4518, KEY_LEFT }, /* Skip backwards */ + { 0x4519, KEY_PLAYPAUSE }, + { 0x451a, KEY_RIGHT }, /* Skip forward */ + { 0x451b, KEY_REWIND }, + { 0x451c, KEY_L }, /* Live */ + { 0x451d, KEY_FASTFORWARD }, + { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */ + { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */ + { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */ + { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */ + { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */ +}; + +static struct rc_keymap dib0700_map = { + .map = { + .scan = dib0700_table, + .size = ARRAY_SIZE(dib0700_table), + .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ + .name = RC_MAP_DIB0700_BIG_TABLE, + } +}; + +static int __init init_rc_map(void) +{ + return ir_register_map(&dib0700_map); +} + +static void __exit exit_rc_map(void) +{ + ir_unregister_map(&dib0700_map); +} + +module_init(init_rc_map) +module_exit(exit_rc_map) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index a329858c4b42..adbcccb54c8b 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -69,6 +69,9 @@ void rc_map_init(void); #define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old" #define RC_MAP_CINERGY_1400 "rc-cinergy-1400" #define RC_MAP_CINERGY "rc-cinergy" +/* Temporary table - should be broken into smaller tables */ +#define RC_MAP_DIB0700_BIG_TABLE "rc-dib0700-big" + #define RC_MAP_DM1105_NEC "rc-dm1105-nec" #define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" #define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" @@ -123,6 +126,7 @@ void rc_map_init(void); #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" #define RC_MAP_WINFAST "rc-winfast" #define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" + /* * Please, do not just append newer Remote Controller names at the end. * The names should be ordered in alphabetical order -- cgit v1.2.3 From 5af935cc96a291f90799bf6a2587d87329a91699 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Sun, 1 Aug 2010 08:02:35 -0300 Subject: V4L/DVB: dib0700: break keytable into NEC and RC-5 variants Instead of having one big keytable with 2 protocols inside, break it into two separate tables, being one for NEC and another for RC-5 variants, and properly identify what variant should be used at the boards entries. Signed-off-by: Mauro Carvalho Chehab --- drivers/media/IR/keymaps/Makefile | 3 +- drivers/media/IR/keymaps/rc-dib0700-big.c | 314 ---------------------------- drivers/media/IR/keymaps/rc-dib0700-nec.c | 124 +++++++++++ drivers/media/IR/keymaps/rc-dib0700-rc5.c | 235 +++++++++++++++++++++ drivers/media/dvb/dvb-usb/dib0700_devices.c | 67 ++++-- include/media/rc-map.h | 5 +- 6 files changed, 416 insertions(+), 332 deletions(-) delete mode 100644 drivers/media/IR/keymaps/rc-dib0700-big.c create mode 100644 drivers/media/IR/keymaps/rc-dib0700-nec.c create mode 100644 drivers/media/IR/keymaps/rc-dib0700-rc5.c (limited to 'include') diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile index 85330d171c4e..cbee06243b51 100644 --- a/drivers/media/IR/keymaps/Makefile +++ b/drivers/media/IR/keymaps/Makefile @@ -14,7 +14,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \ rc-budget-ci-old.o \ rc-cinergy-1400.o \ rc-cinergy.o \ - rc-dib0700-big.o \ + rc-dib0700-nec.o \ + rc-dib0700-rc5.o \ rc-dm1105-nec.o \ rc-dntv-live-dvb-t.o \ rc-dntv-live-dvbt-pro.o \ diff --git a/drivers/media/IR/keymaps/rc-dib0700-big.c b/drivers/media/IR/keymaps/rc-dib0700-big.c deleted file mode 100644 index 2e83820d3e59..000000000000 --- a/drivers/media/IR/keymaps/rc-dib0700-big.c +++ /dev/null @@ -1,314 +0,0 @@ -/* rc-dvb0700-big.c - Keytable for devices in dvb0700 - * - * Copyright (c) 2010 by Mauro Carvalho Chehab - * - * TODO: This table is a real mess, as it merges RC codes from several - * devices into a big table. It also has both RC-5 and NEC codes inside. - * It should be broken into small tables, and the protocols should properly - * be indentificated. - * - * The table were imported from dib0700_devices.c. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include - -static struct ir_scancode dib0700_table[] = { - /* Key codes for the tiny Pinnacle remote*/ - { 0x0700, KEY_MUTE }, - { 0x0701, KEY_MENU }, /* Pinnacle logo */ - { 0x0739, KEY_POWER }, - { 0x0703, KEY_VOLUMEUP }, - { 0x0709, KEY_VOLUMEDOWN }, - { 0x0706, KEY_CHANNELUP }, - { 0x070c, KEY_CHANNELDOWN }, - { 0x070f, KEY_1 }, - { 0x0715, KEY_2 }, - { 0x0710, KEY_3 }, - { 0x0718, KEY_4 }, - { 0x071b, KEY_5 }, - { 0x071e, KEY_6 }, - { 0x0711, KEY_7 }, - { 0x0721, KEY_8 }, - { 0x0712, KEY_9 }, - { 0x0727, KEY_0 }, - { 0x0724, KEY_SCREEN }, /* 'Square' key */ - { 0x072a, KEY_TEXT }, /* 'T' key */ - { 0x072d, KEY_REWIND }, - { 0x0730, KEY_PLAY }, - { 0x0733, KEY_FASTFORWARD }, - { 0x0736, KEY_RECORD }, - { 0x073c, KEY_STOP }, - { 0x073f, KEY_CANCEL }, /* '?' key */ - /* Key codes for the Terratec Cinergy DT XS Diversity, similar to cinergyT2.c */ - { 0xeb01, KEY_POWER }, - { 0xeb02, KEY_1 }, - { 0xeb03, KEY_2 }, - { 0xeb04, KEY_3 }, - { 0xeb05, KEY_4 }, - { 0xeb06, KEY_5 }, - { 0xeb07, KEY_6 }, - { 0xeb08, KEY_7 }, - { 0xeb09, KEY_8 }, - { 0xeb0a, KEY_9 }, - { 0xeb0b, KEY_VIDEO }, - { 0xeb0c, KEY_0 }, - { 0xeb0d, KEY_REFRESH }, - { 0xeb0f, KEY_EPG }, - { 0xeb10, KEY_UP }, - { 0xeb11, KEY_LEFT }, - { 0xeb12, KEY_OK }, - { 0xeb13, KEY_RIGHT }, - { 0xeb14, KEY_DOWN }, - { 0xeb16, KEY_INFO }, - { 0xeb17, KEY_RED }, - { 0xeb18, KEY_GREEN }, - { 0xeb19, KEY_YELLOW }, - { 0xeb1a, KEY_BLUE }, - { 0xeb1b, KEY_CHANNELUP }, - { 0xeb1c, KEY_VOLUMEUP }, - { 0xeb1d, KEY_MUTE }, - { 0xeb1e, KEY_VOLUMEDOWN }, - { 0xeb1f, KEY_CHANNELDOWN }, - { 0xeb40, KEY_PAUSE }, - { 0xeb41, KEY_HOME }, - { 0xeb42, KEY_MENU }, /* DVD Menu */ - { 0xeb43, KEY_SUBTITLE }, - { 0xeb44, KEY_TEXT }, /* Teletext */ - { 0xeb45, KEY_DELETE }, - { 0xeb46, KEY_TV }, - { 0xeb47, KEY_DVD }, - { 0xeb48, KEY_STOP }, - { 0xeb49, KEY_VIDEO }, - { 0xeb4a, KEY_AUDIO }, /* Music */ - { 0xeb4b, KEY_SCREEN }, /* Pic */ - { 0xeb4c, KEY_PLAY }, - { 0xeb4d, KEY_BACK }, - { 0xeb4e, KEY_REWIND }, - { 0xeb4f, KEY_FASTFORWARD }, - { 0xeb54, KEY_PREVIOUS }, - { 0xeb58, KEY_RECORD }, - { 0xeb5c, KEY_NEXT }, - - /* Key codes for the Haupauge WinTV Nova-TD, copied from nova-t-usb2.c (Nova-T USB2) */ - { 0x1e00, KEY_0 }, - { 0x1e01, KEY_1 }, - { 0x1e02, KEY_2 }, - { 0x1e03, KEY_3 }, - { 0x1e04, KEY_4 }, - { 0x1e05, KEY_5 }, - { 0x1e06, KEY_6 }, - { 0x1e07, KEY_7 }, - { 0x1e08, KEY_8 }, - { 0x1e09, KEY_9 }, - { 0x1e0a, KEY_KPASTERISK }, - { 0x1e0b, KEY_RED }, - { 0x1e0c, KEY_RADIO }, - { 0x1e0d, KEY_MENU }, - { 0x1e0e, KEY_GRAVE }, /* # */ - { 0x1e0f, KEY_MUTE }, - { 0x1e10, KEY_VOLUMEUP }, - { 0x1e11, KEY_VOLUMEDOWN }, - { 0x1e12, KEY_CHANNEL }, - { 0x1e14, KEY_UP }, - { 0x1e15, KEY_DOWN }, - { 0x1e16, KEY_LEFT }, - { 0x1e17, KEY_RIGHT }, - { 0x1e18, KEY_VIDEO }, - { 0x1e19, KEY_AUDIO }, - { 0x1e1a, KEY_MEDIA }, - { 0x1e1b, KEY_EPG }, - { 0x1e1c, KEY_TV }, - { 0x1e1e, KEY_NEXT }, - { 0x1e1f, KEY_BACK }, - { 0x1e20, KEY_CHANNELUP }, - { 0x1e21, KEY_CHANNELDOWN }, - { 0x1e24, KEY_LAST }, /* Skip backwards */ - { 0x1e25, KEY_OK }, - { 0x1e29, KEY_BLUE}, - { 0x1e2e, KEY_GREEN }, - { 0x1e30, KEY_PAUSE }, - { 0x1e32, KEY_REWIND }, - { 0x1e34, KEY_FASTFORWARD }, - { 0x1e35, KEY_PLAY }, - { 0x1e36, KEY_STOP }, - { 0x1e37, KEY_RECORD }, - { 0x1e38, KEY_YELLOW }, - { 0x1e3b, KEY_GOTO }, - { 0x1e3d, KEY_POWER }, - - /* Key codes for the Leadtek Winfast DTV Dongle */ - { 0x0042, KEY_POWER }, - { 0x077c, KEY_TUNER }, - { 0x0f4e, KEY_PRINT }, /* PREVIEW */ - { 0x0840, KEY_SCREEN }, /* full screen toggle*/ - { 0x0f71, KEY_DOT }, /* frequency */ - { 0x0743, KEY_0 }, - { 0x0c41, KEY_1 }, - { 0x0443, KEY_2 }, - { 0x0b7f, KEY_3 }, - { 0x0e41, KEY_4 }, - { 0x0643, KEY_5 }, - { 0x097f, KEY_6 }, - { 0x0d7e, KEY_7 }, - { 0x057c, KEY_8 }, - { 0x0a40, KEY_9 }, - { 0x0e4e, KEY_CLEAR }, - { 0x047c, KEY_CHANNEL }, /* show channel number */ - { 0x0f41, KEY_LAST }, /* recall */ - { 0x0342, KEY_MUTE }, - { 0x064c, KEY_RESERVED }, /* PIP button*/ - { 0x0172, KEY_SHUFFLE }, /* SNAPSHOT */ - { 0x0c4e, KEY_PLAYPAUSE }, /* TIMESHIFT */ - { 0x0b70, KEY_RECORD }, - { 0x037d, KEY_VOLUMEUP }, - { 0x017d, KEY_VOLUMEDOWN }, - { 0x0242, KEY_CHANNELUP }, - { 0x007d, KEY_CHANNELDOWN }, - - /* Key codes for Nova-TD "credit card" remote control. */ - { 0x1d00, KEY_0 }, - { 0x1d01, KEY_1 }, - { 0x1d02, KEY_2 }, - { 0x1d03, KEY_3 }, - { 0x1d04, KEY_4 }, - { 0x1d05, KEY_5 }, - { 0x1d06, KEY_6 }, - { 0x1d07, KEY_7 }, - { 0x1d08, KEY_8 }, - { 0x1d09, KEY_9 }, - { 0x1d0a, KEY_TEXT }, - { 0x1d0d, KEY_MENU }, - { 0x1d0f, KEY_MUTE }, - { 0x1d10, KEY_VOLUMEUP }, - { 0x1d11, KEY_VOLUMEDOWN }, - { 0x1d12, KEY_CHANNEL }, - { 0x1d14, KEY_UP }, - { 0x1d15, KEY_DOWN }, - { 0x1d16, KEY_LEFT }, - { 0x1d17, KEY_RIGHT }, - { 0x1d1c, KEY_TV }, - { 0x1d1e, KEY_NEXT }, - { 0x1d1f, KEY_BACK }, - { 0x1d20, KEY_CHANNELUP }, - { 0x1d21, KEY_CHANNELDOWN }, - { 0x1d24, KEY_LAST }, - { 0x1d25, KEY_OK }, - { 0x1d30, KEY_PAUSE }, - { 0x1d32, KEY_REWIND }, - { 0x1d34, KEY_FASTFORWARD }, - { 0x1d35, KEY_PLAY }, - { 0x1d36, KEY_STOP }, - { 0x1d37, KEY_RECORD }, - { 0x1d3b, KEY_GOTO }, - { 0x1d3d, KEY_POWER }, - - /* Key codes for the Pixelview SBTVD remote (proto NEC) */ - { 0x8613, KEY_MUTE }, - { 0x8612, KEY_POWER }, - { 0x8601, KEY_1 }, - { 0x8602, KEY_2 }, - { 0x8603, KEY_3 }, - { 0x8604, KEY_4 }, - { 0x8605, KEY_5 }, - { 0x8606, KEY_6 }, - { 0x8607, KEY_7 }, - { 0x8608, KEY_8 }, - { 0x8609, KEY_9 }, - { 0x8600, KEY_0 }, - { 0x860d, KEY_CHANNELUP }, - { 0x8619, KEY_CHANNELDOWN }, - { 0x8610, KEY_VOLUMEUP }, - { 0x860c, KEY_VOLUMEDOWN }, - - { 0x860a, KEY_CAMERA }, - { 0x860b, KEY_ZOOM }, - { 0x861b, KEY_BACKSPACE }, - { 0x8615, KEY_ENTER }, - - { 0x861d, KEY_UP }, - { 0x861e, KEY_DOWN }, - { 0x860e, KEY_LEFT }, - { 0x860f, KEY_RIGHT }, - - { 0x8618, KEY_RECORD }, - { 0x861a, KEY_STOP }, - - /* Key codes for the EvolutePC TVWay+ remote (proto NEC) */ - { 0x7a00, KEY_MENU }, - { 0x7a01, KEY_RECORD }, - { 0x7a02, KEY_PLAY }, - { 0x7a03, KEY_STOP }, - { 0x7a10, KEY_CHANNELUP }, - { 0x7a11, KEY_CHANNELDOWN }, - { 0x7a12, KEY_VOLUMEUP }, - { 0x7a13, KEY_VOLUMEDOWN }, - { 0x7a40, KEY_POWER }, - { 0x7a41, KEY_MUTE }, - - /* Key codes for the Elgato EyeTV Diversity silver remote, - set dvb_usb_dib0700_ir_proto=0 */ - { 0x4501, KEY_POWER }, - { 0x4502, KEY_MUTE }, - { 0x4503, KEY_1 }, - { 0x4504, KEY_2 }, - { 0x4505, KEY_3 }, - { 0x4506, KEY_4 }, - { 0x4507, KEY_5 }, - { 0x4508, KEY_6 }, - { 0x4509, KEY_7 }, - { 0x450a, KEY_8 }, - { 0x450b, KEY_9 }, - { 0x450c, KEY_LAST }, - { 0x450d, KEY_0 }, - { 0x450e, KEY_ENTER }, - { 0x450f, KEY_RED }, - { 0x4510, KEY_CHANNELUP }, - { 0x4511, KEY_GREEN }, - { 0x4512, KEY_VOLUMEDOWN }, - { 0x4513, KEY_OK }, - { 0x4514, KEY_VOLUMEUP }, - { 0x4515, KEY_YELLOW }, - { 0x4516, KEY_CHANNELDOWN }, - { 0x4517, KEY_BLUE }, - { 0x4518, KEY_LEFT }, /* Skip backwards */ - { 0x4519, KEY_PLAYPAUSE }, - { 0x451a, KEY_RIGHT }, /* Skip forward */ - { 0x451b, KEY_REWIND }, - { 0x451c, KEY_L }, /* Live */ - { 0x451d, KEY_FASTFORWARD }, - { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */ - { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */ - { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */ - { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */ - { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */ -}; - -static struct rc_keymap dib0700_map = { - .map = { - .scan = dib0700_table, - .size = ARRAY_SIZE(dib0700_table), - .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ - .name = RC_MAP_DIB0700_BIG_TABLE, - } -}; - -static int __init init_rc_map(void) -{ - return ir_register_map(&dib0700_map); -} - -static void __exit exit_rc_map(void) -{ - ir_unregister_map(&dib0700_map); -} - -module_init(init_rc_map) -module_exit(exit_rc_map) - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/IR/keymaps/rc-dib0700-nec.c b/drivers/media/IR/keymaps/rc-dib0700-nec.c new file mode 100644 index 000000000000..f5809f4757f9 --- /dev/null +++ b/drivers/media/IR/keymaps/rc-dib0700-nec.c @@ -0,0 +1,124 @@ +/* rc-dvb0700-big.c - Keytable for devices in dvb0700 + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + * + * TODO: This table is a real mess, as it merges RC codes from several + * devices into a big table. It also has both RC-5 and NEC codes inside. + * It should be broken into small tables, and the protocols should properly + * be indentificated. + * + * The table were imported from dib0700_devices.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +static struct ir_scancode dib0700_table[] = { + /* Key codes for the Pixelview SBTVD remote */ + { 0x8613, KEY_MUTE }, + { 0x8612, KEY_POWER }, + { 0x8601, KEY_1 }, + { 0x8602, KEY_2 }, + { 0x8603, KEY_3 }, + { 0x8604, KEY_4 }, + { 0x8605, KEY_5 }, + { 0x8606, KEY_6 }, + { 0x8607, KEY_7 }, + { 0x8608, KEY_8 }, + { 0x8609, KEY_9 }, + { 0x8600, KEY_0 }, + { 0x860d, KEY_CHANNELUP }, + { 0x8619, KEY_CHANNELDOWN }, + { 0x8610, KEY_VOLUMEUP }, + { 0x860c, KEY_VOLUMEDOWN }, + + { 0x860a, KEY_CAMERA }, + { 0x860b, KEY_ZOOM }, + { 0x861b, KEY_BACKSPACE }, + { 0x8615, KEY_ENTER }, + + { 0x861d, KEY_UP }, + { 0x861e, KEY_DOWN }, + { 0x860e, KEY_LEFT }, + { 0x860f, KEY_RIGHT }, + + { 0x8618, KEY_RECORD }, + { 0x861a, KEY_STOP }, + + /* Key codes for the EvolutePC TVWay+ remote */ + { 0x7a00, KEY_MENU }, + { 0x7a01, KEY_RECORD }, + { 0x7a02, KEY_PLAY }, + { 0x7a03, KEY_STOP }, + { 0x7a10, KEY_CHANNELUP }, + { 0x7a11, KEY_CHANNELDOWN }, + { 0x7a12, KEY_VOLUMEUP }, + { 0x7a13, KEY_VOLUMEDOWN }, + { 0x7a40, KEY_POWER }, + { 0x7a41, KEY_MUTE }, + + /* Key codes for the Elgato EyeTV Diversity silver remote */ + { 0x4501, KEY_POWER }, + { 0x4502, KEY_MUTE }, + { 0x4503, KEY_1 }, + { 0x4504, KEY_2 }, + { 0x4505, KEY_3 }, + { 0x4506, KEY_4 }, + { 0x4507, KEY_5 }, + { 0x4508, KEY_6 }, + { 0x4509, KEY_7 }, + { 0x450a, KEY_8 }, + { 0x450b, KEY_9 }, + { 0x450c, KEY_LAST }, + { 0x450d, KEY_0 }, + { 0x450e, KEY_ENTER }, + { 0x450f, KEY_RED }, + { 0x4510, KEY_CHANNELUP }, + { 0x4511, KEY_GREEN }, + { 0x4512, KEY_VOLUMEDOWN }, + { 0x4513, KEY_OK }, + { 0x4514, KEY_VOLUMEUP }, + { 0x4515, KEY_YELLOW }, + { 0x4516, KEY_CHANNELDOWN }, + { 0x4517, KEY_BLUE }, + { 0x4518, KEY_LEFT }, /* Skip backwards */ + { 0x4519, KEY_PLAYPAUSE }, + { 0x451a, KEY_RIGHT }, /* Skip forward */ + { 0x451b, KEY_REWIND }, + { 0x451c, KEY_L }, /* Live */ + { 0x451d, KEY_FASTFORWARD }, + { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */ + { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */ + { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */ + { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */ + { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */ +}; + +static struct rc_keymap dib0700_map = { + .map = { + .scan = dib0700_table, + .size = ARRAY_SIZE(dib0700_table), + .ir_type = IR_TYPE_NEC, + .name = RC_MAP_DIB0700_NEC_TABLE, + } +}; + +static int __init init_rc_map(void) +{ + return ir_register_map(&dib0700_map); +} + +static void __exit exit_rc_map(void) +{ + ir_unregister_map(&dib0700_map); +} + +module_init(init_rc_map) +module_exit(exit_rc_map) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/IR/keymaps/rc-dib0700-rc5.c b/drivers/media/IR/keymaps/rc-dib0700-rc5.c new file mode 100644 index 000000000000..e2d0fd2bbaf9 --- /dev/null +++ b/drivers/media/IR/keymaps/rc-dib0700-rc5.c @@ -0,0 +1,235 @@ +/* rc-dvb0700-big.c - Keytable for devices in dvb0700 + * + * Copyright (c) 2010 by Mauro Carvalho Chehab + * + * TODO: This table is a real mess, as it merges RC codes from several + * devices into a big table. It also has both RC-5 and NEC codes inside. + * It should be broken into small tables, and the protocols should properly + * be indentificated. + * + * The table were imported from dib0700_devices.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +static struct ir_scancode dib0700_table[] = { + /* Key codes for the tiny Pinnacle remote*/ + { 0x0700, KEY_MUTE }, + { 0x0701, KEY_MENU }, /* Pinnacle logo */ + { 0x0739, KEY_POWER }, + { 0x0703, KEY_VOLUMEUP }, + { 0x0709, KEY_VOLUMEDOWN }, + { 0x0706, KEY_CHANNELUP }, + { 0x070c, KEY_CHANNELDOWN }, + { 0x070f, KEY_1 }, + { 0x0715, KEY_2 }, + { 0x0710, KEY_3 }, + { 0x0718, KEY_4 }, + { 0x071b, KEY_5 }, + { 0x071e, KEY_6 }, + { 0x0711, KEY_7 }, + { 0x0721, KEY_8 }, + { 0x0712, KEY_9 }, + { 0x0727, KEY_0 }, + { 0x0724, KEY_SCREEN }, /* 'Square' key */ + { 0x072a, KEY_TEXT }, /* 'T' key */ + { 0x072d, KEY_REWIND }, + { 0x0730, KEY_PLAY }, + { 0x0733, KEY_FASTFORWARD }, + { 0x0736, KEY_RECORD }, + { 0x073c, KEY_STOP }, + { 0x073f, KEY_CANCEL }, /* '?' key */ + + /* Key codes for the Terratec Cinergy DT XS Diversity, similar to cinergyT2.c */ + { 0xeb01, KEY_POWER }, + { 0xeb02, KEY_1 }, + { 0xeb03, KEY_2 }, + { 0xeb04, KEY_3 }, + { 0xeb05, KEY_4 }, + { 0xeb06, KEY_5 }, + { 0xeb07, KEY_6 }, + { 0xeb08, KEY_7 }, + { 0xeb09, KEY_8 }, + { 0xeb0a, KEY_9 }, + { 0xeb0b, KEY_VIDEO }, + { 0xeb0c, KEY_0 }, + { 0xeb0d, KEY_REFRESH }, + { 0xeb0f, KEY_EPG }, + { 0xeb10, KEY_UP }, + { 0xeb11, KEY_LEFT }, + { 0xeb12, KEY_OK }, + { 0xeb13, KEY_RIGHT }, + { 0xeb14, KEY_DOWN }, + { 0xeb16, KEY_INFO }, + { 0xeb17, KEY_RED }, + { 0xeb18, KEY_GREEN }, + { 0xeb19, KEY_YELLOW }, + { 0xeb1a, KEY_BLUE }, + { 0xeb1b, KEY_CHANNELUP }, + { 0xeb1c, KEY_VOLUMEUP }, + { 0xeb1d, KEY_MUTE }, + { 0xeb1e, KEY_VOLUMEDOWN }, + { 0xeb1f, KEY_CHANNELDOWN }, + { 0xeb40, KEY_PAUSE }, + { 0xeb41, KEY_HOME }, + { 0xeb42, KEY_MENU }, /* DVD Menu */ + { 0xeb43, KEY_SUBTITLE }, + { 0xeb44, KEY_TEXT }, /* Teletext */ + { 0xeb45, KEY_DELETE }, + { 0xeb46, KEY_TV }, + { 0xeb47, KEY_DVD }, + { 0xeb48, KEY_STOP }, + { 0xeb49, KEY_VIDEO }, + { 0xeb4a, KEY_AUDIO }, /* Music */ + { 0xeb4b, KEY_SCREEN }, /* Pic */ + { 0xeb4c, KEY_PLAY }, + { 0xeb4d, KEY_BACK }, + { 0xeb4e, KEY_REWIND }, + { 0xeb4f, KEY_FASTFORWARD }, + { 0xeb54, KEY_PREVIOUS }, + { 0xeb58, KEY_RECORD }, + { 0xeb5c, KEY_NEXT }, + + /* Key codes for the Haupauge WinTV Nova-TD, copied from nova-t-usb2.c (Nova-T USB2) */ + { 0x1e00, KEY_0 }, + { 0x1e01, KEY_1 }, + { 0x1e02, KEY_2 }, + { 0x1e03, KEY_3 }, + { 0x1e04, KEY_4 }, + { 0x1e05, KEY_5 }, + { 0x1e06, KEY_6 }, + { 0x1e07, KEY_7 }, + { 0x1e08, KEY_8 }, + { 0x1e09, KEY_9 }, + { 0x1e0a, KEY_KPASTERISK }, + { 0x1e0b, KEY_RED }, + { 0x1e0c, KEY_RADIO }, + { 0x1e0d, KEY_MENU }, + { 0x1e0e, KEY_GRAVE }, /* # */ + { 0x1e0f, KEY_MUTE }, + { 0x1e10, KEY_VOLUMEUP }, + { 0x1e11, KEY_VOLUMEDOWN }, + { 0x1e12, KEY_CHANNEL }, + { 0x1e14, KEY_UP }, + { 0x1e15, KEY_DOWN }, + { 0x1e16, KEY_LEFT }, + { 0x1e17, KEY_RIGHT }, + { 0x1e18, KEY_VIDEO }, + { 0x1e19, KEY_AUDIO }, + { 0x1e1a, KEY_MEDIA }, + { 0x1e1b, KEY_EPG }, + { 0x1e1c, KEY_TV }, + { 0x1e1e, KEY_NEXT }, + { 0x1e1f, KEY_BACK }, + { 0x1e20, KEY_CHANNELUP }, + { 0x1e21, KEY_CHANNELDOWN }, + { 0x1e24, KEY_LAST }, /* Skip backwards */ + { 0x1e25, KEY_OK }, + { 0x1e29, KEY_BLUE}, + { 0x1e2e, KEY_GREEN }, + { 0x1e30, KEY_PAUSE }, + { 0x1e32, KEY_REWIND }, + { 0x1e34, KEY_FASTFORWARD }, + { 0x1e35, KEY_PLAY }, + { 0x1e36, KEY_STOP }, + { 0x1e37, KEY_RECORD }, + { 0x1e38, KEY_YELLOW }, + { 0x1e3b, KEY_GOTO }, + { 0x1e3d, KEY_POWER }, + + /* Key codes for the Leadtek Winfast DTV Dongle */ + { 0x0042, KEY_POWER }, + { 0x077c, KEY_TUNER }, + { 0x0f4e, KEY_PRINT }, /* PREVIEW */ + { 0x0840, KEY_SCREEN }, /* full screen toggle*/ + { 0x0f71, KEY_DOT }, /* frequency */ + { 0x0743, KEY_0 }, + { 0x0c41, KEY_1 }, + { 0x0443, KEY_2 }, + { 0x0b7f, KEY_3 }, + { 0x0e41, KEY_4 }, + { 0x0643, KEY_5 }, + { 0x097f, KEY_6 }, + { 0x0d7e, KEY_7 }, + { 0x057c, KEY_8 }, + { 0x0a40, KEY_9 }, + { 0x0e4e, KEY_CLEAR }, + { 0x047c, KEY_CHANNEL }, /* show channel number */ + { 0x0f41, KEY_LAST }, /* recall */ + { 0x0342, KEY_MUTE }, + { 0x064c, KEY_RESERVED }, /* PIP button*/ + { 0x0172, KEY_SHUFFLE }, /* SNAPSHOT */ + { 0x0c4e, KEY_PLAYPAUSE }, /* TIMESHIFT */ + { 0x0b70, KEY_RECORD }, + { 0x037d, KEY_VOLUMEUP }, + { 0x017d, KEY_VOLUMEDOWN }, + { 0x0242, KEY_CHANNELUP }, + { 0x007d, KEY_CHANNELDOWN }, + + /* Key codes for Nova-TD "credit card" remote control. */ + { 0x1d00, KEY_0 }, + { 0x1d01, KEY_1 }, + { 0x1d02, KEY_2 }, + { 0x1d03, KEY_3 }, + { 0x1d04, KEY_4 }, + { 0x1d05, KEY_5 }, + { 0x1d06, KEY_6 }, + { 0x1d07, KEY_7 }, + { 0x1d08, KEY_8 }, + { 0x1d09, KEY_9 }, + { 0x1d0a, KEY_TEXT }, + { 0x1d0d, KEY_MENU }, + { 0x1d0f, KEY_MUTE }, + { 0x1d10, KEY_VOLUMEUP }, + { 0x1d11, KEY_VOLUMEDOWN }, + { 0x1d12, KEY_CHANNEL }, + { 0x1d14, KEY_UP }, + { 0x1d15, KEY_DOWN }, + { 0x1d16, KEY_LEFT }, + { 0x1d17, KEY_RIGHT }, + { 0x1d1c, KEY_TV }, + { 0x1d1e, KEY_NEXT }, + { 0x1d1f, KEY_BACK }, + { 0x1d20, KEY_CHANNELUP }, + { 0x1d21, KEY_CHANNELDOWN }, + { 0x1d24, KEY_LAST }, + { 0x1d25, KEY_OK }, + { 0x1d30, KEY_PAUSE }, + { 0x1d32, KEY_REWIND }, + { 0x1d34, KEY_FASTFORWARD }, + { 0x1d35, KEY_PLAY }, + { 0x1d36, KEY_STOP }, + { 0x1d37, KEY_RECORD }, + { 0x1d3b, KEY_GOTO }, + { 0x1d3d, KEY_POWER }, +}; + +static struct rc_keymap dib0700_map = { + .map = { + .scan = dib0700_table, + .size = ARRAY_SIZE(dib0700_table), + .ir_type = IR_TYPE_RC5, + .name = RC_MAP_DIB0700_RC5_TABLE, + } +}; + +static int __init init_rc_map(void) +{ + return ir_register_map(&dib0700_map); +} + +static void __exit exit_rc_map(void) +{ + ir_unregister_map(&dib0700_map); +} + +module_init(init_rc_map) +module_exit(exit_rc_map) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mauro Carvalho Chehab "); diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index 6e587cd1f515..ee2a84beb553 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -1872,7 +1872,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -1902,7 +1902,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -1957,7 +1957,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, @@ -1994,7 +1994,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2066,7 +2066,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2106,7 +2106,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2139,7 +2139,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { } }, - .num_device_descs = 7, + .num_device_descs = 6, .devices = { { "DiBcom STK7070PD reference design", { &dib0700_usb_id_table[17], NULL }, @@ -2166,6 +2166,45 @@ struct dvb_usb_device_properties dib0700_devices[] = { { &dib0700_usb_id_table[44], NULL }, { NULL }, }, + }, + + .rc.core = { + .rc_interval = DEFAULT_RC_INTERVAL, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, + .module_name = "dib0700", + .rc_query = dib0700_rc_query_old_firmware + }, + }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, + + .num_adapters = 2, + .adapter = { + { + .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk70x0p_pid_filter, + .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, + .frontend_attach = stk7070pd_frontend_attach0, + .tuner_attach = dib7070p_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x02), + + .size_of_priv = sizeof(struct dib0700_adapter_state), + }, { + .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk70x0p_pid_filter, + .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, + .frontend_attach = stk7070pd_frontend_attach1, + .tuner_attach = dib7070p_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x03), + + .size_of_priv = sizeof(struct dib0700_adapter_state), + } + }, + + .num_device_descs = 1, + .devices = { { "Elgato EyeTV Diversity", { &dib0700_usb_id_table[68], NULL }, { NULL }, @@ -2174,7 +2213,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2239,7 +2278,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2271,7 +2310,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2335,7 +2374,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2375,7 +2414,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2420,7 +2459,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, @@ -2453,7 +2492,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, - .rc_codes = RC_MAP_DIB0700_BIG_TABLE, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware }, diff --git a/include/media/rc-map.h b/include/media/rc-map.h index adbcccb54c8b..9569d0863f8b 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -69,9 +69,8 @@ void rc_map_init(void); #define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old" #define RC_MAP_CINERGY_1400 "rc-cinergy-1400" #define RC_MAP_CINERGY "rc-cinergy" -/* Temporary table - should be broken into smaller tables */ -#define RC_MAP_DIB0700_BIG_TABLE "rc-dib0700-big" - +#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec" +#define RC_MAP_DIB0700_RC5_TABLE "rc-dib0700-rc5" #define RC_MAP_DM1105_NEC "rc-dm1105-nec" #define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro" #define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t" -- cgit v1.2.3 From 8cadd2831bf3abc94f4530e7fdbab7bb39b6b27d Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 10 May 2010 14:26:20 -0700 Subject: timer: add on-stack deferrable timer interfaces In some cases (for instance with kernel threads) it may be desireable to use on-stack deferrable timers to get their power saving benefits. Add interfaces to support this for the IPS driver. Signed-off-by: Jesse Barnes Signed-off-by: Matthew Garrett --- include/linux/timer.h | 15 +++++++++++++++ kernel/timer.c | 13 +++++++++++++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/include/linux/timer.h b/include/linux/timer.h index ea965b857a50..38cf093ef62c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -100,6 +100,13 @@ void init_timer_deferrable_key(struct timer_list *timer, setup_timer_on_stack_key((timer), #timer, &__key, \ (fn), (data)); \ } while (0) +#define setup_deferrable_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_deferrable_timer_on_stack_key((timer), #timer, \ + &__key, (fn), \ + (data)); \ + } while (0) #else #define init_timer(timer)\ init_timer_key((timer), NULL, NULL) @@ -111,6 +118,8 @@ void init_timer_deferrable_key(struct timer_list *timer, setup_timer_key((timer), NULL, NULL, (fn), (data)) #define setup_timer_on_stack(timer, fn, data)\ setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#define setup_deferrable_timer_on_stack(timer, fn, data)\ + setup_deferrable_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) #endif #ifdef CONFIG_DEBUG_OBJECTS_TIMERS @@ -150,6 +159,12 @@ static inline void setup_timer_on_stack_key(struct timer_list *timer, init_timer_on_stack_key(timer, name, key); } +extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data); + /** * timer_pending - is a timer pending? * @timer: the timer in question diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8d4e18..efde11e197c4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -577,6 +577,19 @@ static void __init_timer(struct timer_list *timer, lockdep_init_map(&timer->lockdep_map, name, key, 0); } +void setup_deferrable_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, + void (*function)(unsigned long), + unsigned long data) +{ + timer->function = function; + timer->data = data; + init_timer_on_stack_key(timer, name, key); + timer_set_deferrable(timer); +} +EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); + /** * init_timer_key - initialize a timer * @timer: the timer to be initialized -- cgit v1.2.3 From aa7ffc01d254c91a36bf854d57a14049c6134c72 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 14 May 2010 15:41:14 -0700 Subject: x86 platform driver: intelligent power sharing driver Intel Core i3/5 platforms with integrated graphics support both CPU and GPU turbo mode. CPU turbo mode is opportunistic: the CPU will use any available power to increase core frequencies if thermal headroom is available. The GPU side is more manual however; the graphics driver must monitor GPU power and temperature and coordinate with a core thermal driver to take advantage of available thermal and power headroom in the package. The intelligent power sharing (IPS) driver is intended to coordinate this activity by monitoring MCP (multi-chip package) temperature and power, allowing the CPU and/or GPU to increase their power consumption, and thus performance, when possible. The goal is to maximize performance within a given platform's TDP (thermal design point). Signed-off-by: Jesse Barnes Signed-off-by: Matthew Garrett --- drivers/platform/x86/Kconfig | 10 + drivers/platform/x86/Makefile | 1 + drivers/platform/x86/intel_ips.c | 1655 ++++++++++++++++++++++++++++++++++++++ include/drm/i915_drm.h | 9 + 4 files changed, 1675 insertions(+) create mode 100644 drivers/platform/x86/intel_ips.c (limited to 'include') diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index fd060016b7e9..724b2ed1a3cb 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -539,4 +539,14 @@ config INTEL_SCU_IPC some embedded Intel x86 platforms. This is not needed for PC-type machines. +config INTEL_IPS + tristate "Intel Intelligent Power Sharing" + depends on ACPI + ---help--- + Intel Calpella platforms support dynamic power sharing between the + CPU and GPU, maximizing performance in a given TDP. This driver, + along with the CPU frequency and i915 drivers, provides that + functionality. If in doubt, say Y here; it will only load on + supported platforms. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 8770bfe71431..7318fc2c1629 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -26,3 +26,4 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o +obj-$(CONFIG_INTEL_IPS) += intel_ips.o diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c new file mode 100644 index 000000000000..f1dce3b8372d --- /dev/null +++ b/drivers/platform/x86/intel_ips.c @@ -0,0 +1,1655 @@ +/* + * Copyright (c) 2009-2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Authors: + * Jesse Barnes + */ + +/* + * Some Intel Ibex Peak based platforms support so-called "intelligent + * power sharing", which allows the CPU and GPU to cooperate to maximize + * performance within a given TDP (thermal design point). This driver + * performs the coordination between the CPU and GPU, monitors thermal and + * power statistics in the platform, and initializes power monitoring + * hardware. It also provides a few tunables to control behavior. Its + * primary purpose is to safely allow CPU and GPU turbo modes to be enabled + * by tracking power and thermal budget; secondarily it can boost turbo + * performance by allocating more power or thermal budget to the CPU or GPU + * based on available headroom and activity. + * + * The basic algorithm is driven by a 5s moving average of tempurature. If + * thermal headroom is available, the CPU and/or GPU power clamps may be + * adjusted upwards. If we hit the thermal ceiling or a thermal trigger, + * we scale back the clamp. Aside from trigger events (when we're critically + * close or over our TDP) we don't adjust the clamps more than once every + * five seconds. + * + * The thermal device (device 31, function 6) has a set of registers that + * are updated by the ME firmware. The ME should also take the clamp values + * written to those registers and write them to the CPU, but we currently + * bypass that functionality and write the CPU MSR directly. + * + * UNSUPPORTED: + * - dual MCP configs + * + * TODO: + * - handle CPU hotplug + * - provide turbo enable/disable api + * - make sure we can write turbo enable/disable reg based on MISC_EN + * + * Related documents: + * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 + * - CDI 401376 - Ibex Peak EDS + * - ref 26037, 26641 - IPS BIOS spec + * - ref 26489 - Nehalem BIOS writer's guide + * - ref 26921 - Ibex Peak BIOS Specification + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32 + +/* + * Package level MSRs for monitor/control + */ +#define PLATFORM_INFO 0xce +#define PLATFORM_TDP (1<<29) +#define PLATFORM_RATIO (1<<28) + +#define IA32_MISC_ENABLE 0x1a0 +#define IA32_MISC_TURBO_EN (1ULL<<38) + +#define TURBO_POWER_CURRENT_LIMIT 0x1ac +#define TURBO_TDC_OVR_EN (1UL<<31) +#define TURBO_TDC_MASK (0x000000007fff0000UL) +#define TURBO_TDC_SHIFT (16) +#define TURBO_TDP_OVR_EN (1UL<<15) +#define TURBO_TDP_MASK (0x0000000000003fffUL) + +/* + * Core/thread MSRs for monitoring + */ +#define IA32_PERF_CTL 0x199 +#define IA32_PERF_TURBO_DIS (1ULL<<32) + +/* + * Thermal PCI device regs + */ +#define THM_CFG_TBAR 0x10 +#define THM_CFG_TBAR_HI 0x14 + +#define THM_TSIU 0x00 +#define THM_TSE 0x01 +#define TSE_EN 0xb8 +#define THM_TSS 0x02 +#define THM_TSTR 0x03 +#define THM_TSTTP 0x04 +#define THM_TSCO 0x08 +#define THM_TSES 0x0c +#define THM_TSGPEN 0x0d +#define TSGPEN_HOT_LOHI (1<<1) +#define TSGPEN_CRIT_LOHI (1<<2) +#define THM_TSPC 0x0e +#define THM_PPEC 0x10 +#define THM_CTA 0x12 +#define THM_PTA 0x14 +#define PTA_SLOPE_MASK (0xff00) +#define PTA_SLOPE_SHIFT 8 +#define PTA_OFFSET_MASK (0x00ff) +#define THM_MGTA 0x16 +#define MGTA_SLOPE_MASK (0xff00) +#define MGTA_SLOPE_SHIFT 8 +#define MGTA_OFFSET_MASK (0x00ff) +#define THM_TRC 0x1a +#define TRC_CORE2_EN (1<<15) +#define TRC_THM_EN (1<<12) +#define TRC_C6_WAR (1<<8) +#define TRC_CORE1_EN (1<<7) +#define TRC_CORE_PWR (1<<6) +#define TRC_PCH_EN (1<<5) +#define TRC_MCH_EN (1<<4) +#define TRC_DIMM4 (1<<3) +#define TRC_DIMM3 (1<<2) +#define TRC_DIMM2 (1<<1) +#define TRC_DIMM1 (1<<0) +#define THM_TES 0x20 +#define THM_TEN 0x21 +#define TEN_UPDATE_EN 1 +#define THM_PSC 0x24 +#define PSC_NTG (1<<0) /* No GFX turbo support */ +#define PSC_NTPC (1<<1) /* No CPU turbo support */ +#define PSC_PP_DEF (0<<2) /* Perf policy up to driver */ +#define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */ +#define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */ +#define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */ +#define PSP_PBRT (1<<4) /* BIOS run time support */ +#define THM_CTV1 0x30 +#define CTV_TEMP_ERROR (1<<15) +#define CTV_TEMP_MASK 0x3f +#define CTV_ +#define THM_CTV2 0x32 +#define THM_CEC 0x34 /* undocumented power accumulator in joules */ +#define THM_AE 0x3f +#define THM_HTS 0x50 /* 32 bits */ +#define HTS_PCPL_MASK (0x7fe00000) +#define HTS_PCPL_SHIFT 21 +#define HTS_GPL_MASK (0x001ff000) +#define HTS_GPL_SHIFT 12 +#define HTS_PP_MASK (0x00000c00) +#define HTS_PP_SHIFT 10 +#define HTS_PP_DEF 0 +#define HTS_PP_PROC 1 +#define HTS_PP_BAL 2 +#define HTS_PP_GFX 3 +#define HTS_PCTD_DIS (1<<9) +#define HTS_GTD_DIS (1<<8) +#define HTS_PTL_MASK (0x000000fe) +#define HTS_PTL_SHIFT 1 +#define HTS_NVV (1<<0) +#define THM_HTSHI 0x54 /* 16 bits */ +#define HTS2_PPL_MASK (0x03ff) +#define HTS2_PRST_MASK (0x3c00) +#define HTS2_PRST_SHIFT 10 +#define HTS2_PRST_UNLOADED 0 +#define HTS2_PRST_RUNNING 1 +#define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */ +#define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */ +#define HTS2_PRST_TDISUSR 4 /* user disabled turbo */ +#define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */ +#define HTS2_PRST_TDISPM 6 /* power management disabled turbo */ +#define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */ +#define THM_PTL 0x56 +#define THM_MGTV 0x58 +#define TV_MASK 0x000000000000ff00 +#define TV_SHIFT 8 +#define THM_PTV 0x60 +#define PTV_MASK 0x00ff +#define THM_MMGPC 0x64 +#define THM_MPPC 0x66 +#define THM_MPCPC 0x68 +#define THM_TSPIEN 0x82 +#define TSPIEN_AUX_LOHI (1<<0) +#define TSPIEN_HOT_LOHI (1<<1) +#define TSPIEN_CRIT_LOHI (1<<2) +#define TSPIEN_AUX2_LOHI (1<<3) +#define THM_TSLOCK 0x83 +#define THM_ATR 0x84 +#define THM_TOF 0x87 +#define THM_STS 0x98 +#define STS_PCPL_MASK (0x7fe00000) +#define STS_PCPL_SHIFT 21 +#define STS_GPL_MASK (0x001ff000) +#define STS_GPL_SHIFT 12 +#define STS_PP_MASK (0x00000c00) +#define STS_PP_SHIFT 10 +#define STS_PP_DEF 0 +#define STS_PP_PROC 1 +#define STS_PP_BAL 2 +#define STS_PP_GFX 3 +#define STS_PCTD_DIS (1<<9) +#define STS_GTD_DIS (1<<8) +#define STS_PTL_MASK (0x000000fe) +#define STS_PTL_SHIFT 1 +#define STS_NVV (1<<0) +#define THM_SEC 0x9c +#define SEC_ACK (1<<0) +#define THM_TC3 0xa4 +#define THM_TC1 0xa8 +#define STS_PPL_MASK (0x0003ff00) +#define STS_PPL_SHIFT 16 +#define THM_TC2 0xac +#define THM_DTV 0xb0 +#define THM_ITV 0xd8 +#define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */ +#define ITV_ME_SEQNO_SHIFT (16) +#define ITV_MCH_TEMP_MASK 0x0000ff00 +#define ITV_MCH_TEMP_SHIFT (8) +#define ITV_PCH_TEMP_MASK 0x000000ff + +#define thm_readb(off) readb(ips->regmap + (off)) +#define thm_readw(off) readw(ips->regmap + (off)) +#define thm_readl(off) readl(ips->regmap + (off)) +#define thm_readq(off) readq(ips->regmap + (off)) + +#define thm_writeb(off, val) writeb((val), ips->regmap + (off)) +#define thm_writew(off, val) writew((val), ips->regmap + (off)) +#define thm_writel(off, val) writel((val), ips->regmap + (off)) + +static const int IPS_ADJUST_PERIOD = 5000; /* ms */ + +/* For initial average collection */ +static const int IPS_SAMPLE_PERIOD = 200; /* ms */ +static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */ +#define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD) + +/* Per-SKU limits */ +struct ips_mcp_limits { + int cpu_family; + int cpu_model; /* includes extended model... */ + int mcp_power_limit; /* mW units */ + int core_power_limit; + int mch_power_limit; + int core_temp_limit; /* degrees C */ + int mch_temp_limit; +}; + +/* Max temps are -10 degrees C to avoid PROCHOT# */ + +struct ips_mcp_limits ips_sv_limits = { + .mcp_power_limit = 35000, + .core_power_limit = 29000, + .mch_power_limit = 20000, + .core_temp_limit = 95, + .mch_temp_limit = 90 +}; + +struct ips_mcp_limits ips_lv_limits = { + .mcp_power_limit = 25000, + .core_power_limit = 21000, + .mch_power_limit = 13000, + .core_temp_limit = 95, + .mch_temp_limit = 90 +}; + +struct ips_mcp_limits ips_ulv_limits = { + .mcp_power_limit = 18000, + .core_power_limit = 14000, + .mch_power_limit = 11000, + .core_temp_limit = 95, + .mch_temp_limit = 90 +}; + +struct ips_driver { + struct pci_dev *dev; + void *regmap; + struct task_struct *monitor; + struct task_struct *adjust; + struct dentry *debug_root; + + /* Average CPU core temps (all averages in .01 degrees C for precision) */ + u16 ctv1_avg_temp; + u16 ctv2_avg_temp; + /* GMCH average */ + u16 mch_avg_temp; + /* Average for the CPU (both cores?) */ + u16 mcp_avg_temp; + /* Average power consumption (in mW) */ + u32 cpu_avg_power; + u32 mch_avg_power; + + /* Offset values */ + u16 cta_val; + u16 pta_val; + u16 mgta_val; + + /* Maximums & prefs, protected by turbo status lock */ + spinlock_t turbo_status_lock; + u16 mcp_temp_limit; + u16 mcp_power_limit; + u16 core_power_limit; + u16 mch_power_limit; + bool cpu_turbo_enabled; + bool __cpu_turbo_on; + bool gpu_turbo_enabled; + bool __gpu_turbo_on; + bool gpu_preferred; + bool poll_turbo_status; + bool second_cpu; + struct ips_mcp_limits *limits; + + /* Optional MCH interfaces for if i915 is in use */ + unsigned long (*read_mch_val)(void); + bool (*gpu_raise)(void); + bool (*gpu_lower)(void); + bool (*gpu_busy)(void); + bool (*gpu_turbo_disable)(void); + + /* For restoration at unload */ + u64 orig_turbo_limit; + u64 orig_turbo_ratios; +}; + +/** + * ips_cpu_busy - is CPU busy? + * @ips: IPS driver struct + * + * Check CPU for load to see whether we should increase its thermal budget. + * + * RETURNS: + * True if the CPU could use more power, false otherwise. + */ +static bool ips_cpu_busy(struct ips_driver *ips) +{ + if ((avenrun[0] >> FSHIFT) > 1) + return true; + + return false; +} + +/** + * ips_cpu_raise - raise CPU power clamp + * @ips: IPS driver struct + * + * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for + * this platform. + * + * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as + * long as we haven't hit the TDP limit for the SKU). + */ +static void ips_cpu_raise(struct ips_driver *ips) +{ + u64 turbo_override; + u16 cur_tdp_limit, new_tdp_limit; + + if (!ips->cpu_turbo_enabled) + return; + + rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + + cur_tdp_limit = turbo_override & TURBO_TDP_MASK; + new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ + + /* Clamp to SKU TDP limit */ + if (((new_tdp_limit * 10) / 8) > ips->core_power_limit) + new_tdp_limit = cur_tdp_limit; + + thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); + + turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; + wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + + turbo_override &= ~TURBO_TDP_MASK; + turbo_override |= new_tdp_limit; + + wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); +} + +/** + * ips_cpu_lower - lower CPU power clamp + * @ips: IPS driver struct + * + * Lower CPU power clamp b %IPS_CPU_STEP if possible. + * + * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going + * as low as the platform limits will allow (though we could go lower there + * wouldn't be much point). + */ +static void ips_cpu_lower(struct ips_driver *ips) +{ + u64 turbo_override; + u16 cur_limit, new_limit; + + rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + + cur_limit = turbo_override & TURBO_TDP_MASK; + new_limit = cur_limit - 8; /* 1W decrease */ + + /* Clamp to SKU TDP limit */ + if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK)) + new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; + + thm_writew(THM_MPCPC, (new_limit * 10) / 8); + + turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; + wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + + turbo_override &= ~TURBO_TDP_MASK; + turbo_override |= new_limit; + + wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); +} + +/** + * do_enable_cpu_turbo - internal turbo enable function + * @data: unused + * + * Internal function for actually updating MSRs. When we enable/disable + * turbo, we need to do it on each CPU; this function is the one called + * by on_each_cpu() when needed. + */ +static void do_enable_cpu_turbo(void *data) +{ + u64 perf_ctl; + + rdmsrl(IA32_PERF_CTL, perf_ctl); + if (perf_ctl & IA32_PERF_TURBO_DIS) { + perf_ctl &= ~IA32_PERF_TURBO_DIS; + wrmsrl(IA32_PERF_CTL, perf_ctl); + } +} + +/** + * ips_enable_cpu_turbo - enable turbo mode on all CPUs + * @ips: IPS driver struct + * + * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on + * all logical threads. + */ +static void ips_enable_cpu_turbo(struct ips_driver *ips) +{ + /* Already on, no need to mess with MSRs */ + if (ips->__cpu_turbo_on) + return; + + on_each_cpu(do_enable_cpu_turbo, ips, 1); + + ips->__cpu_turbo_on = true; +} + +/** + * do_disable_cpu_turbo - internal turbo disable function + * @data: unused + * + * Internal function for actually updating MSRs. When we enable/disable + * turbo, we need to do it on each CPU; this function is the one called + * by on_each_cpu() when needed. + */ +static void do_disable_cpu_turbo(void *data) +{ + u64 perf_ctl; + + rdmsrl(IA32_PERF_CTL, perf_ctl); + if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { + perf_ctl |= IA32_PERF_TURBO_DIS; + wrmsrl(IA32_PERF_CTL, perf_ctl); + } +} + +/** + * ips_disable_cpu_turbo - disable turbo mode on all CPUs + * @ips: IPS driver struct + * + * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on + * all logical threads. + */ +static void ips_disable_cpu_turbo(struct ips_driver *ips) +{ + /* Already off, leave it */ + if (!ips->__cpu_turbo_on) + return; + + on_each_cpu(do_disable_cpu_turbo, ips, 1); + + ips->__cpu_turbo_on = false; +} + +/** + * ips_gpu_busy - is GPU busy? + * @ips: IPS driver struct + * + * Check GPU for load to see whether we should increase its thermal budget. + * We need to call into the i915 driver in this case. + * + * RETURNS: + * True if the GPU could use more power, false otherwise. + */ +static bool ips_gpu_busy(struct ips_driver *ips) +{ + return false; +} + +/** + * ips_gpu_raise - raise GPU power clamp + * @ips: IPS driver struct + * + * Raise the GPU frequency/power if possible. We need to call into the + * i915 driver in this case. + */ +static void ips_gpu_raise(struct ips_driver *ips) +{ + if (!ips->gpu_turbo_enabled) + return; + + if (!ips->gpu_raise()) + ips->gpu_turbo_enabled = false; + + return; +} + +/** + * ips_gpu_lower - lower GPU power clamp + * @ips: IPS driver struct + * + * Lower GPU frequency/power if possible. Need to call i915. + */ +static void ips_gpu_lower(struct ips_driver *ips) +{ + if (!ips->gpu_turbo_enabled) + return; + + if (!ips->gpu_lower()) + ips->gpu_turbo_enabled = false; + + return; +} + +/** + * ips_enable_gpu_turbo - notify the gfx driver turbo is available + * @ips: IPS driver struct + * + * Call into the graphics driver indicating that it can safely use + * turbo mode. + */ +static void ips_enable_gpu_turbo(struct ips_driver *ips) +{ + if (ips->__gpu_turbo_on) + return; + ips->__gpu_turbo_on = true; +} + +/** + * ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode + * @ips: IPS driver struct + * + * Request that the graphics driver disable turbo mode. + */ +static void ips_disable_gpu_turbo(struct ips_driver *ips) +{ + /* Avoid calling i915 if turbo is already disabled */ + if (!ips->__gpu_turbo_on) + return; + + if (!ips->gpu_turbo_disable()) + dev_err(&ips->dev->dev, "failed to disable graphis turbo\n"); + else + ips->__gpu_turbo_on = false; +} + +/** + * mcp_exceeded - check whether we're outside our thermal & power limits + * @ips: IPS driver struct + * + * Check whether the MCP is over its thermal or power budget. + */ +static bool mcp_exceeded(struct ips_driver *ips) +{ + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&ips->turbo_status_lock, flags); + if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100)) + ret = true; + if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit) + ret = true; + spin_unlock_irqrestore(&ips->turbo_status_lock, flags); + + if (ret) + dev_warn(&ips->dev->dev, + "MCP power or thermal limit exceeded\n"); + + return ret; +} + +/** + * cpu_exceeded - check whether a CPU core is outside its limits + * @ips: IPS driver struct + * @cpu: CPU number to check + * + * Check a given CPU's average temp or power is over its limit. + */ +static bool cpu_exceeded(struct ips_driver *ips, int cpu) +{ + unsigned long flags; + int avg; + bool ret = false; + + spin_lock_irqsave(&ips->turbo_status_lock, flags); + avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp; + if (avg > (ips->limits->core_temp_limit * 100)) + ret = true; + if (ips->cpu_avg_power > ips->core_power_limit) + ret = true; + spin_unlock_irqrestore(&ips->turbo_status_lock, flags); + + if (ret) + dev_warn(&ips->dev->dev, + "CPU power or thermal limit exceeded\n"); + + return ret; +} + +/** + * mch_exceeded - check whether the GPU is over budget + * @ips: IPS driver struct + * + * Check the MCH temp & power against their maximums. + */ +static bool mch_exceeded(struct ips_driver *ips) +{ + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&ips->turbo_status_lock, flags); + if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100)) + ret = true; + spin_unlock_irqrestore(&ips->turbo_status_lock, flags); + + return ret; +} + +/** + * update_turbo_limits - get various limits & settings from regs + * @ips: IPS driver struct + * + * Update the IPS power & temp limits, along with turbo enable flags, + * based on latest register contents. + * + * Used at init time and for runtime BIOS support, which requires polling + * the regs for updates (as a result of AC->DC transition for example). + * + * LOCKING: + * Caller must hold turbo_status_lock (outside of init) + */ +static void update_turbo_limits(struct ips_driver *ips) +{ + u32 hts = thm_readl(THM_HTS); + + ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); + ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); + ips->core_power_limit = thm_readw(THM_MPCPC); + ips->mch_power_limit = thm_readw(THM_MMGPC); + ips->mcp_temp_limit = thm_readw(THM_PTL); + ips->mcp_power_limit = thm_readw(THM_MPPC); + + /* Ignore BIOS CPU vs GPU pref */ +} + +/** + * ips_adjust - adjust power clamp based on thermal state + * @data: ips driver structure + * + * Wake up every 5s or so and check whether we should adjust the power clamp. + * Check CPU and GPU load to determine which needs adjustment. There are + * several things to consider here: + * - do we need to adjust up or down? + * - is CPU busy? + * - is GPU busy? + * - is CPU in turbo? + * - is GPU in turbo? + * - is CPU or GPU preferred? (CPU is default) + * + * So, given the above, we do the following: + * - up (TDP available) + * - CPU not busy, GPU not busy - nothing + * - CPU busy, GPU not busy - adjust CPU up + * - CPU not busy, GPU busy - adjust GPU up + * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from + * non-preferred unit if necessary + * - down (at TDP limit) + * - adjust both CPU and GPU down if possible + * + cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu- +cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing +cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu- +cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu- +cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu- + * + */ +static int ips_adjust(void *data) +{ + struct ips_driver *ips = data; + unsigned long flags; + + dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n"); + + /* + * Adjust CPU and GPU clamps every 5s if needed. Doing it more + * often isn't recommended due to ME interaction. + */ + do { + bool cpu_busy = ips_cpu_busy(ips); + bool gpu_busy = ips_gpu_busy(ips); + + spin_lock_irqsave(&ips->turbo_status_lock, flags); + if (ips->poll_turbo_status) + update_turbo_limits(ips); + spin_unlock_irqrestore(&ips->turbo_status_lock, flags); + + /* Update turbo status if necessary */ + if (ips->cpu_turbo_enabled) + ips_enable_cpu_turbo(ips); + else + ips_disable_cpu_turbo(ips); + + if (ips->gpu_turbo_enabled) + ips_enable_gpu_turbo(ips); + else + ips_disable_gpu_turbo(ips); + + /* We're outside our comfort zone, crank them down */ + if (!mcp_exceeded(ips)) { + ips_cpu_lower(ips); + ips_gpu_lower(ips); + goto sleep; + } + + if (!cpu_exceeded(ips, 0) && cpu_busy) + ips_cpu_raise(ips); + else + ips_cpu_lower(ips); + + if (!mch_exceeded(ips) && gpu_busy) + ips_gpu_raise(ips); + else + ips_gpu_lower(ips); + +sleep: + schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); + } while (!kthread_should_stop()); + + dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n"); + + return 0; +} + +/* + * Helpers for reading out temp/power values and calculating their + * averages for the decision making and monitoring functions. + */ + +static u16 calc_avg_temp(struct ips_driver *ips, u16 *array) +{ + u64 total = 0; + int i; + u16 avg; + + for (i = 0; i < IPS_SAMPLE_COUNT; i++) + total += (u64)(array[i] * 100); + + do_div(total, IPS_SAMPLE_COUNT); + + avg = (u16)total; + + return avg; +} + +static u16 read_mgtv(struct ips_driver *ips) +{ + u16 ret; + u64 slope, offset; + u64 val; + + val = thm_readq(THM_MGTV); + val = (val & TV_MASK) >> TV_SHIFT; + + slope = offset = thm_readw(THM_MGTA); + slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT; + offset = offset & MGTA_OFFSET_MASK; + + ret = ((val * slope + 0x40) >> 7) + offset; + + + return ret; +} + +static u16 read_ptv(struct ips_driver *ips) +{ + u16 val, slope, offset; + + slope = (ips->pta_val & PTA_SLOPE_MASK) >> PTA_SLOPE_SHIFT; + offset = ips->pta_val & PTA_OFFSET_MASK; + + val = thm_readw(THM_PTV) & PTV_MASK; + + return val; +} + +static u16 read_ctv(struct ips_driver *ips, int cpu) +{ + int reg = cpu ? THM_CTV2 : THM_CTV1; + u16 val; + + val = thm_readw(reg); + if (!(val & CTV_TEMP_ERROR)) + val = (val) >> 6; /* discard fractional component */ + else + val = 0; + + return val; +} + +static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period) +{ + u32 val; + u32 ret; + + /* + * CEC is in joules/65535. Take difference over time to + * get watts. + */ + val = thm_readl(THM_CEC); + + /* period is in ms and we want mW */ + ret = (((val - *last) * 1000) / period); + ret = (ret * 1000) / 65535; + *last = val; + + return ret; +} + +static const u16 temp_decay_factor = 2; +static u16 update_average_temp(u16 avg, u16 val) +{ + u16 ret; + + /* Multiply by 100 for extra precision */ + ret = (val * 100 / temp_decay_factor) + + (((temp_decay_factor - 1) * avg) / temp_decay_factor); + return ret; +} + +static const u16 power_decay_factor = 2; +static u16 update_average_power(u32 avg, u32 val) +{ + u32 ret; + + ret = (val / power_decay_factor) + + (((power_decay_factor - 1) * avg) / power_decay_factor); + + return ret; +} + +static u32 calc_avg_power(struct ips_driver *ips, u32 *array) +{ + u64 total = 0; + u32 avg; + int i; + + for (i = 0; i < IPS_SAMPLE_COUNT; i++) + total += array[i]; + + do_div(total, IPS_SAMPLE_COUNT); + avg = (u32)total; + + return avg; +} + +static void monitor_timeout(unsigned long arg) +{ + wake_up_process((struct task_struct *)arg); +} + +/** + * ips_monitor - temp/power monitoring thread + * @data: ips driver structure + * + * This is the main function for the IPS driver. It monitors power and + * tempurature in the MCP and adjusts CPU and GPU power clams accordingly. + * + * We keep a 5s moving average of power consumption and tempurature. Using + * that data, along with CPU vs GPU preference, we adjust the power clamps + * up or down. + */ +static int ips_monitor(void *data) +{ + struct ips_driver *ips = data; + struct timer_list timer; + unsigned long seqno_timestamp, expire, last_msecs, last_sample_period; + int i; + u32 *cpu_samples = NULL, *mchp_samples = NULL, old_cpu_power; + u16 *mcp_samples = NULL, *ctv1_samples = NULL, *ctv2_samples = NULL, + *mch_samples = NULL; + u8 cur_seqno, last_seqno; + + mcp_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); + ctv1_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); + ctv2_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); + mch_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); + cpu_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL); + mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL); + if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples) { + dev_err(&ips->dev->dev, + "failed to allocate sample array, ips disabled\n"); + kfree(mcp_samples); + kfree(ctv1_samples); + kfree(ctv2_samples); + kfree(mch_samples); + kfree(cpu_samples); + kthread_stop(ips->adjust); + return -ENOMEM; + } + + last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> + ITV_ME_SEQNO_SHIFT; + seqno_timestamp = get_jiffies_64(); + + old_cpu_power = thm_readl(THM_CEC) / 65535; + schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); + + /* Collect an initial average */ + for (i = 0; i < IPS_SAMPLE_COUNT; i++) { + u32 mchp, cpu_power; + u16 val; + + mcp_samples[i] = read_ptv(ips); + + val = read_ctv(ips, 0); + ctv1_samples[i] = val; + + val = read_ctv(ips, 1); + ctv2_samples[i] = val; + + val = read_mgtv(ips); + mch_samples[i] = val; + + cpu_power = get_cpu_power(ips, &old_cpu_power, + IPS_SAMPLE_PERIOD); + cpu_samples[i] = cpu_power; + + if (ips->read_mch_val) { + mchp = ips->read_mch_val(); + mchp_samples[i] = mchp; + } + + schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); + if (kthread_should_stop()) + break; + } + + ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples); + ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples); + ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples); + ips->mch_avg_temp = calc_avg_temp(ips, mch_samples); + ips->cpu_avg_power = calc_avg_power(ips, cpu_samples); + ips->mch_avg_power = calc_avg_power(ips, mchp_samples); + kfree(mcp_samples); + kfree(ctv1_samples); + kfree(ctv2_samples); + kfree(mch_samples); + kfree(cpu_samples); + kfree(mchp_samples); + + /* Start the adjustment thread now that we have data */ + wake_up_process(ips->adjust); + + /* + * Ok, now we have an initial avg. From here on out, we track the + * running avg using a decaying average calculation. This allows + * us to reduce the sample frequency if the CPU and GPU are idle. + */ + old_cpu_power = thm_readl(THM_CEC); + schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); + last_sample_period = IPS_SAMPLE_PERIOD; + + setup_deferrable_timer_on_stack(&timer, monitor_timeout, + (unsigned long)current); + do { + u32 cpu_val, mch_val; + u16 val; + + /* MCP itself */ + val = read_ptv(ips); + ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val); + + /* Processor 0 */ + val = read_ctv(ips, 0); + ips->ctv1_avg_temp = + update_average_temp(ips->ctv1_avg_temp, val); + /* Power */ + cpu_val = get_cpu_power(ips, &old_cpu_power, + last_sample_period); + ips->cpu_avg_power = + update_average_power(ips->cpu_avg_power, cpu_val); + + if (ips->second_cpu) { + /* Processor 1 */ + val = read_ctv(ips, 1); + ips->ctv2_avg_temp = + update_average_temp(ips->ctv2_avg_temp, val); + } + + /* MCH */ + val = read_mgtv(ips); + ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val); + /* Power */ + if (ips->read_mch_val) { + mch_val = ips->read_mch_val(); + ips->mch_avg_power = + update_average_power(ips->mch_avg_power, + mch_val); + } + + /* + * Make sure ME is updating thermal regs. + * Note: + * If it's been more than a second since the last update, + * the ME is probably hung. + */ + cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> + ITV_ME_SEQNO_SHIFT; + if (cur_seqno == last_seqno && + time_after(jiffies, seqno_timestamp + HZ)) { + dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n"); + } else { + seqno_timestamp = get_jiffies_64(); + last_seqno = cur_seqno; + } + + last_msecs = jiffies_to_msecs(jiffies); + expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); + + __set_current_state(TASK_UNINTERRUPTIBLE); + mod_timer(&timer, expire); + schedule(); + + /* Calculate actual sample period for power averaging */ + last_sample_period = jiffies_to_msecs(jiffies) - last_msecs; + if (!last_sample_period) + last_sample_period = 1; + } while (!kthread_should_stop()); + + del_timer_sync(&timer); + destroy_timer_on_stack(&timer); + + dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n"); + + return 0; +} + +#if 0 +#define THM_DUMPW(reg) \ + { \ + u16 val = thm_readw(reg); \ + dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \ + } +#define THM_DUMPL(reg) \ + { \ + u32 val = thm_readl(reg); \ + dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \ + } +#define THM_DUMPQ(reg) \ + { \ + u64 val = thm_readq(reg); \ + dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \ + } + +static void dump_thermal_info(struct ips_driver *ips) +{ + u16 ptl; + + ptl = thm_readw(THM_PTL); + dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl); + + THM_DUMPW(THM_CTA); + THM_DUMPW(THM_TRC); + THM_DUMPW(THM_CTV1); + THM_DUMPL(THM_STS); + THM_DUMPW(THM_PTV); + THM_DUMPQ(THM_MGTV); +} +#endif + +/** + * ips_irq_handler - handle temperature triggers and other IPS events + * @irq: irq number + * @arg: unused + * + * Handle temperature limit trigger events, generally by lowering the clamps. + * If we're at a critical limit, we clamp back to the lowest possible value + * to prevent emergency shutdown. + */ +static irqreturn_t ips_irq_handler(int irq, void *arg) +{ + struct ips_driver *ips = arg; + u8 tses = thm_readb(THM_TSES); + u8 tes = thm_readb(THM_TES); + + if (!tses && !tes) + return IRQ_NONE; + + dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses); + dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes); + + /* STS update from EC? */ + if (tes & 1) { + u32 sts, tc1; + + sts = thm_readl(THM_STS); + tc1 = thm_readl(THM_TC1); + + if (sts & STS_NVV) { + spin_lock(&ips->turbo_status_lock); + ips->core_power_limit = (sts & STS_PCPL_MASK) >> + STS_PCPL_SHIFT; + ips->mch_power_limit = (sts & STS_GPL_MASK) >> + STS_GPL_SHIFT; + /* ignore EC CPU vs GPU pref */ + ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); + ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); + ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> + STS_PTL_SHIFT; + ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> + STS_PPL_SHIFT; + spin_unlock(&ips->turbo_status_lock); + + thm_writeb(THM_SEC, SEC_ACK); + } + thm_writeb(THM_TES, tes); + } + + /* Thermal trip */ + if (tses) { + dev_warn(&ips->dev->dev, + "thermal trip occurred, tses: 0x%04x\n", tses); + thm_writeb(THM_TSES, tses); + } + + return IRQ_HANDLED; +} + +#ifndef CONFIG_DEBUG_FS +static void ips_debugfs_init(struct ips_driver *ips) { return; } +static void ips_debugfs_cleanup(struct ips_driver *ips) { return; } +#else + +/* Expose current state and limits in debugfs if possible */ + +struct ips_debugfs_node { + struct ips_driver *ips; + char *name; + int (*show)(struct seq_file *m, void *data); +}; + +static int show_cpu_temp(struct seq_file *m, void *data) +{ + struct ips_driver *ips = m->private; + + seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100, + ips->ctv1_avg_temp % 100); + + return 0; +} + +static int show_cpu_power(struct seq_file *m, void *data) +{ + struct ips_driver *ips = m->private; + + seq_printf(m, "%dmW\n", ips->cpu_avg_power); + + return 0; +} + +static int show_cpu_clamp(struct seq_file *m, void *data) +{ + u64 turbo_override; + int tdp, tdc; + + rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + + tdp = (int)(turbo_override & TURBO_TDP_MASK); + tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); + + /* Convert to .1W/A units */ + tdp = tdp * 10 / 8; + tdc = tdc * 10 / 8; + + /* Watts Amperes */ + seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10, + tdc / 10, tdc % 10); + + return 0; +} + +static int show_mch_temp(struct seq_file *m, void *data) +{ + struct ips_driver *ips = m->private; + + seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100, + ips->mch_avg_temp % 100); + + return 0; +} + +static int show_mch_power(struct seq_file *m, void *data) +{ + struct ips_driver *ips = m->private; + + seq_printf(m, "%dmW\n", ips->mch_avg_power); + + return 0; +} + +static struct ips_debugfs_node ips_debug_files[] = { + { NULL, "cpu_temp", show_cpu_temp }, + { NULL, "cpu_power", show_cpu_power }, + { NULL, "cpu_clamp", show_cpu_clamp }, + { NULL, "mch_temp", show_mch_temp }, + { NULL, "mch_power", show_mch_power }, +}; + +static int ips_debugfs_open(struct inode *inode, struct file *file) +{ + struct ips_debugfs_node *node = inode->i_private; + + return single_open(file, node->show, node->ips); +} + +static const struct file_operations ips_debugfs_ops = { + .owner = THIS_MODULE, + .open = ips_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void ips_debugfs_cleanup(struct ips_driver *ips) +{ + if (ips->debug_root) + debugfs_remove_recursive(ips->debug_root); + return; +} + +static void ips_debugfs_init(struct ips_driver *ips) +{ + int i; + + ips->debug_root = debugfs_create_dir("ips", NULL); + if (!ips->debug_root) { + dev_err(&ips->dev->dev, + "failed to create debugfs entries: %ld\n", + PTR_ERR(ips->debug_root)); + return; + } + + for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) { + struct dentry *ent; + struct ips_debugfs_node *node = &ips_debug_files[i]; + + node->ips = ips; + ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, + ips->debug_root, node, + &ips_debugfs_ops); + if (!ent) { + dev_err(&ips->dev->dev, + "failed to create debug file: %ld\n", + PTR_ERR(ent)); + goto err_cleanup; + } + } + + return; + +err_cleanup: + ips_debugfs_cleanup(ips); + return; +} +#endif /* CONFIG_DEBUG_FS */ + +/** + * ips_detect_cpu - detect whether CPU supports IPS + * + * Walk our list and see if we're on a supported CPU. If we find one, + * return the limits for it. + */ +static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) +{ + u64 turbo_power, misc_en; + struct ips_mcp_limits *limits = NULL; + u16 tdp; + + if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) { + dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n"); + goto out; + } + + rdmsrl(IA32_MISC_ENABLE, misc_en); + /* + * If the turbo enable bit isn't set, we shouldn't try to enable/disable + * turbo manually or we'll get an illegal MSR access, even though + * turbo will still be available. + */ + if (!(misc_en & IA32_MISC_TURBO_EN)) + ; /* add turbo MSR write allowed flag if necessary */ + + if (strstr(boot_cpu_data.x86_model_id, "CPU M")) + limits = &ips_sv_limits; + else if (strstr(boot_cpu_data.x86_model_id, "CPU L")) + limits = &ips_lv_limits; + else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) + limits = &ips_ulv_limits; + else + dev_info(&ips->dev->dev, "No CPUID match found.\n"); + + rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); + tdp = turbo_power & TURBO_TDP_MASK; + + /* Sanity check TDP against CPU */ + if (limits->mcp_power_limit != (tdp / 8) * 1000) { + dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n", + tdp / 8, limits->mcp_power_limit / 1000); + } + +out: + return limits; +} + +/** + * ips_get_i915_syms - try to get GPU control methods from i915 driver + * @ips: IPS driver + * + * The i915 driver exports several interfaces to allow the IPS driver to + * monitor and control graphics turbo mode. If we can find them, we can + * enable graphics turbo, otherwise we must disable it to avoid exceeding + * thermal and power limits in the MCP. + */ +static bool ips_get_i915_syms(struct ips_driver *ips) +{ + ips->read_mch_val = symbol_get(i915_read_mch_val); + if (!ips->read_mch_val) + goto out_err; + ips->gpu_raise = symbol_get(i915_gpu_raise); + if (!ips->gpu_raise) + goto out_put_mch; + ips->gpu_lower = symbol_get(i915_gpu_lower); + if (!ips->gpu_lower) + goto out_put_raise; + ips->gpu_busy = symbol_get(i915_gpu_busy); + if (!ips->gpu_busy) + goto out_put_lower; + ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable); + if (!ips->gpu_turbo_disable) + goto out_put_busy; + + return true; + +out_put_busy: + symbol_put(i915_gpu_turbo_disable); +out_put_lower: + symbol_put(i915_gpu_lower); +out_put_raise: + symbol_put(i915_gpu_raise); +out_put_mch: + symbol_put(i915_read_mch_val); +out_err: + return false; +} + +static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, ips_id_table); + +static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + u64 platform_info; + struct ips_driver *ips; + u32 hts; + int ret = 0; + u16 htshi, trc, trc_required_mask; + u8 tse; + + ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL); + if (!ips) + return -ENOMEM; + + pci_set_drvdata(dev, ips); + ips->dev = dev; + + ips->limits = ips_detect_cpu(ips); + if (!ips->limits) { + dev_info(&dev->dev, "IPS not supported on this CPU\n"); + ret = -ENXIO; + goto error_free; + } + + spin_lock_init(&ips->turbo_status_lock); + + if (!pci_resource_start(dev, 0)) { + dev_err(&dev->dev, "TBAR not assigned, aborting\n"); + ret = -ENXIO; + goto error_free; + } + + ret = pci_request_regions(dev, "ips thermal sensor"); + if (ret) { + dev_err(&dev->dev, "thermal resource busy, aborting\n"); + goto error_free; + } + + ret = pci_enable_device(dev); + if (ret) { + dev_err(&dev->dev, "can't enable PCI device, aborting\n"); + goto error_free; + } + + ips->regmap = ioremap(pci_resource_start(dev, 0), + pci_resource_len(dev, 0)); + if (!ips->regmap) { + dev_err(&dev->dev, "failed to map thermal regs, aborting\n"); + ret = -EBUSY; + goto error_release; + } + + tse = thm_readb(THM_TSE); + if (tse != TSE_EN) { + dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse); + ret = -ENXIO; + goto error_unmap; + } + + trc = thm_readw(THM_TRC); + trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN; + if ((trc & trc_required_mask) != trc_required_mask) { + dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n"); + ret = -ENXIO; + goto error_unmap; + } + + if (trc & TRC_CORE2_EN) + ips->second_cpu = true; + + if (!ips_get_i915_syms(ips)) { + dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n"); + ips->gpu_turbo_enabled = false; + } else { + dev_dbg(&dev->dev, "graphics turbo enabled\n"); + ips->gpu_turbo_enabled = true; + } + + update_turbo_limits(ips); + dev_dbg(&dev->dev, "max cpu power clamp: %dW\n", + ips->mcp_power_limit / 10); + dev_dbg(&dev->dev, "max core power clamp: %dW\n", + ips->core_power_limit / 10); + /* BIOS may update limits at runtime */ + if (thm_readl(THM_PSC) & PSP_PBRT) + ips->poll_turbo_status = true; + + /* + * Check PLATFORM_INFO MSR to make sure this chip is + * turbo capable. + */ + rdmsrl(PLATFORM_INFO, platform_info); + if (!(platform_info & PLATFORM_TDP)) { + dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); + ret = -ENODEV; + goto error_unmap; + } + + /* + * IRQ handler for ME interaction + * Note: don't use MSI here as the PCH has bugs. + */ + pci_disable_msi(dev); + ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips", + ips); + if (ret) { + dev_err(&dev->dev, "request irq failed, aborting\n"); + goto error_unmap; + } + + /* Enable aux, hot & critical interrupts */ + thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI | + TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI); + thm_writeb(THM_TEN, TEN_UPDATE_EN); + + /* Collect adjustment values */ + ips->cta_val = thm_readw(THM_CTA); + ips->pta_val = thm_readw(THM_PTA); + ips->mgta_val = thm_readw(THM_MGTA); + + /* Save turbo limits & ratios */ + rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + + ips_enable_cpu_turbo(ips); + ips->cpu_turbo_enabled = true; + + /* Set up the work queue and monitor/adjust threads */ + ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); + if (IS_ERR(ips->monitor)) { + dev_err(&dev->dev, + "failed to create thermal monitor thread, aborting\n"); + ret = -ENOMEM; + goto error_free_irq; + } + + ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); + if (IS_ERR(ips->adjust)) { + dev_err(&dev->dev, + "failed to create thermal adjust thread, aborting\n"); + ret = -ENOMEM; + goto error_thread_cleanup; + } + + hts = (ips->core_power_limit << HTS_PCPL_SHIFT) | + (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV; + htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT; + + thm_writew(THM_HTSHI, htshi); + thm_writel(THM_HTS, hts); + + ips_debugfs_init(ips); + + dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n", + ips->mcp_temp_limit); + return ret; + +error_thread_cleanup: + kthread_stop(ips->monitor); +error_free_irq: + free_irq(ips->dev->irq, ips); +error_unmap: + iounmap(ips->regmap); +error_release: + pci_release_regions(dev); +error_free: + kfree(ips); + return ret; +} + +static void ips_remove(struct pci_dev *dev) +{ + struct ips_driver *ips = pci_get_drvdata(dev); + u64 turbo_override; + + if (!ips) + return; + + ips_debugfs_cleanup(ips); + + /* Release i915 driver */ + if (ips->read_mch_val) + symbol_put(i915_read_mch_val); + if (ips->gpu_raise) + symbol_put(i915_gpu_raise); + if (ips->gpu_lower) + symbol_put(i915_gpu_lower); + if (ips->gpu_busy) + symbol_put(i915_gpu_busy); + if (ips->gpu_turbo_disable) + symbol_put(i915_gpu_turbo_disable); + + rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); + wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); + wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); + + free_irq(ips->dev->irq, ips); + if (ips->adjust) + kthread_stop(ips->adjust); + if (ips->monitor) + kthread_stop(ips->monitor); + iounmap(ips->regmap); + pci_release_regions(dev); + kfree(ips); + dev_dbg(&dev->dev, "IPS driver removed\n"); +} + +#ifdef CONFIG_PM +static int ips_suspend(struct pci_dev *dev, pm_message_t state) +{ + return 0; +} + +static int ips_resume(struct pci_dev *dev) +{ + return 0; +} +#else +#define ips_suspend NULL +#define ips_resume NULL +#endif /* CONFIG_PM */ + +static void ips_shutdown(struct pci_dev *dev) +{ +} + +static struct pci_driver ips_pci_driver = { + .name = "intel ips", + .id_table = ips_id_table, + .probe = ips_probe, + .remove = ips_remove, + .suspend = ips_suspend, + .resume = ips_resume, + .shutdown = ips_shutdown, +}; + +static int __init ips_init(void) +{ + return pci_register_driver(&ips_pci_driver); +} +module_init(ips_init); + +static void ips_exit(void) +{ + pci_unregister_driver(&ips_pci_driver); + return; +} +module_exit(ips_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jesse Barnes "); +MODULE_DESCRIPTION("Intelligent Power Sharing Driver"); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7f0028e1010b..8f8b072c4c7b 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -33,6 +33,15 @@ * subject to backwards-compatibility constraints. */ +#ifdef __KERNEL__ +/* For use by IPS driver */ +extern unsigned long i915_read_mch_val(void); +extern bool i915_gpu_raise(void); +extern bool i915_gpu_lower(void); +extern bool i915_gpu_busy(void); +extern bool i915_gpu_turbo_disable(void); +#endif + /* Each region is a minimum of 16k, and there are at most 255 of them. */ #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use -- cgit v1.2.3 From c715a38bb7fc22fb8018b916c8a9f7ff017a8ad7 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 18 Jun 2010 14:05:52 +0100 Subject: rar: Move the RAR driver into the right place as its now clean We exit staging rar! rar! rar!... Signed-off-by: Alan Cox Signed-off-by: Matthew Garrett Acked-by: Greg Kroah-Hartman --- drivers/platform/x86/Kconfig | 22 + drivers/platform/x86/Makefile | 1 + drivers/platform/x86/intel_rar_register.c | 671 +++++++++++++++++++++++++++ drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - drivers/staging/memrar/memrar_handler.c | 3 +- drivers/staging/rar_register/Kconfig | 30 -- drivers/staging/rar_register/Makefile | 2 - drivers/staging/rar_register/rar_register.c | 675 ---------------------------- drivers/staging/rar_register/rar_register.h | 44 -- include/linux/rar_register.h | 44 ++ 11 files changed, 739 insertions(+), 756 deletions(-) create mode 100644 drivers/platform/x86/intel_rar_register.c delete mode 100644 drivers/staging/rar_register/Kconfig delete mode 100644 drivers/staging/rar_register/Makefile delete mode 100644 drivers/staging/rar_register/rar_register.c delete mode 100644 drivers/staging/rar_register/rar_register.h create mode 100644 include/linux/rar_register.h (limited to 'include') diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 724b2ed1a3cb..2f173bc0ff05 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -539,6 +539,28 @@ config INTEL_SCU_IPC some embedded Intel x86 platforms. This is not needed for PC-type machines. +config RAR_REGISTER + bool "Restricted Access Region Register Driver" + depends on PCI && X86_MRST + default n + ---help--- + This driver allows other kernel drivers access to the + contents of the restricted access region control registers. + + The restricted access region control registers + (rar_registers) are used to pass address and + locking information on restricted access regions + to other drivers that use restricted access regions. + + The restricted access regions are regions of memory + on the Intel MID Platform that are not accessible to + the x86 processor, but are accessible to dedicated + processors on board peripheral devices. + + The purpose of the restricted access regions is to + protect sensitive data from compromise by unauthorized + programs running on the x86 processor. + config INTEL_IPS tristate "Intel Intelligent Power Sharing" depends on ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 7318fc2c1629..ed50eca1b556 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -26,4 +26,5 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o +obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o obj-$(CONFIG_INTEL_IPS) += intel_ips.o diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c new file mode 100644 index 000000000000..73f8e6d72669 --- /dev/null +++ b/drivers/platform/x86/intel_rar_register.c @@ -0,0 +1,671 @@ +/* + * rar_register.c - An Intel Restricted Access Region register driver + * + * Copyright(c) 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * ------------------------------------------------------------------- + * 20091204 Mark Allyn + * Ossama Othman + * Cleanup per feedback from Alan Cox and Arjan Van De Ven + * + * 20090806 Ossama Othman + * Return zero high address if upper 22 bits is zero. + * Cleaned up checkpatch errors. + * Clarified that driver is dealing with bus addresses. + * + * 20090702 Ossama Othman + * Removed unnecessary include directives + * Cleaned up spinlocks. + * Cleaned up logging. + * Improved invalid parameter checks. + * Fixed and simplified RAR address retrieval and RAR locking + * code. + * + * 20090626 Mark Allyn + * Initial publish + */ + +#include +#include +#include +#include +#include +#include + +/* === Lincroft Message Bus Interface === */ +#define LNC_MCR_OFFSET 0xD0 /* Message Control Register */ +#define LNC_MDR_OFFSET 0xD4 /* Message Data Register */ + +/* Message Opcodes */ +#define LNC_MESSAGE_READ_OPCODE 0xD0 +#define LNC_MESSAGE_WRITE_OPCODE 0xE0 + +/* Message Write Byte Enables */ +#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF + +/* B-unit Port */ +#define LNC_BUNIT_PORT 0x3 + +/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */ +#define LNC_BRAR0L 0x10 +#define LNC_BRAR0H 0x11 +#define LNC_BRAR1L 0x12 +#define LNC_BRAR1H 0x13 +/* Reserved for SeP */ +#define LNC_BRAR2L 0x14 +#define LNC_BRAR2H 0x15 + +/* Moorestown supports three restricted access regions. */ +#define MRST_NUM_RAR 3 + +/* RAR Bus Address Range */ +struct rar_addr { + dma_addr_t low; + dma_addr_t high; +}; + +/* + * We create one of these for each RAR + */ +struct client { + int (*callback)(unsigned long data); + unsigned long driver_priv; + bool busy; +}; + +static DEFINE_MUTEX(rar_mutex); +static DEFINE_MUTEX(lnc_reg_mutex); + +/* + * One per RAR device (currently only one device) + */ +struct rar_device { + struct rar_addr rar_addr[MRST_NUM_RAR]; + struct pci_dev *rar_dev; + bool registered; + bool allocated; + struct client client[MRST_NUM_RAR]; +}; + +/* Current platforms have only one rar_device for 3 rar regions */ +static struct rar_device my_rar_device; + +/* + * Abstract out multiple device support. Current platforms only + * have a single RAR device. + */ + +/** + * alloc_rar_device - return a new RAR structure + * + * Return a new (but not yet ready) RAR device object + */ +static struct rar_device *alloc_rar_device(void) +{ + if (my_rar_device.allocated) + return NULL; + my_rar_device.allocated = 1; + return &my_rar_device; +} + +/** + * free_rar_device - free a RAR object + * @rar: the RAR device being freed + * + * Release a RAR object and any attached resources + */ +static void free_rar_device(struct rar_device *rar) +{ + pci_dev_put(rar->rar_dev); + rar->allocated = 0; +} + +/** + * _rar_to_device - return the device handling this RAR + * @rar: RAR number + * @off: returned offset + * + * Internal helper for looking up RAR devices. This and alloc are the + * two functions that need touching to go to multiple RAR devices. + */ +static struct rar_device *_rar_to_device(int rar, int *off) +{ + if (rar >= 0 && rar <= 3) { + *off = rar; + return &my_rar_device; + } + return NULL; +} + +/** + * rar_to_device - return the device handling this RAR + * @rar: RAR number + * @off: returned offset + * + * Return the device this RAR maps to if one is present, otherwise + * returns NULL. Reports the offset relative to the base of this + * RAR device in off. + */ +static struct rar_device *rar_to_device(int rar, int *off) +{ + struct rar_device *rar_dev = _rar_to_device(rar, off); + if (rar_dev == NULL || !rar_dev->registered) + return NULL; + return rar_dev; +} + +/** + * rar_to_client - return the client handling this RAR + * @rar: RAR number + * + * Return the client this RAR maps to if a mapping is known, otherwise + * returns NULL. + */ +static struct client *rar_to_client(int rar) +{ + int idx; + struct rar_device *r = _rar_to_device(rar, &idx); + if (r != NULL) + return &r->client[idx]; + return NULL; +} + +/** + * rar_read_addr - retrieve a RAR mapping + * @pdev: PCI device for the RAR + * @offset: offset for message + * @addr: returned address + * + * Reads the address of a given RAR register. Returns 0 on success + * or an error code on failure. + */ +static int rar_read_addr(struct pci_dev *pdev, int offset, dma_addr_t *addr) +{ + /* + * ======== The Lincroft Message Bus Interface ======== + * Lincroft registers may be obtained via PCI from + * the host bridge using the Lincroft Message Bus + * Interface. That message bus interface is generally + * comprised of two registers: a control register (MCR, 0xDO) + * and a data register (MDR, 0xD4). + * + * The MCR (message control register) format is the following: + * 1. [31:24]: Opcode + * 2. [23:16]: Port + * 3. [15:8]: Register Offset + * 4. [7:4]: Byte Enables (use 0xF to set all of these bits + * to 1) + * 5. [3:0]: reserved + * + * Read (0xD0) and write (0xE0) opcodes are written to the + * control register when reading and writing to Lincroft + * registers, respectively. + * + * We're interested in registers found in the Lincroft + * B-unit. The B-unit port is 0x3. + * + * The six B-unit RAR register offsets we use are listed + * earlier in this file. + * + * Lastly writing to the MCR register requires the "Byte + * enables" bits to be set to 1. This may be achieved by + * writing 0xF at bit 4. + * + * The MDR (message data register) format is the following: + * 1. [31:0]: Read/Write Data + * + * Data being read from this register is only available after + * writing the appropriate control message to the MCR + * register. + * + * Data being written to this register must be written before + * writing the appropriate control message to the MCR + * register. + */ + + int result; + u32 addr32; + + /* Construct control message */ + u32 const message = + (LNC_MESSAGE_READ_OPCODE << 24) + | (LNC_BUNIT_PORT << 16) + | (offset << 8) + | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4); + + dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset); + + /* + * We synchronize access to the Lincroft MCR and MDR registers + * until BOTH the command is issued through the MCR register + * and the corresponding data is read from the MDR register. + * Otherwise a race condition would exist between accesses to + * both registers. + */ + + mutex_lock(&lnc_reg_mutex); + + /* Send the control message */ + result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message); + if (!result) { + /* Read back the address as a 32bit value */ + result = pci_read_config_dword(pdev, LNC_MDR_OFFSET, &addr32); + *addr = (dma_addr_t)addr32; + } + mutex_unlock(&lnc_reg_mutex); + return result; +} + +/** + * rar_set_addr - Set a RAR mapping + * @pdev: PCI device for the RAR + * @offset: offset for message + * @addr: address to set + * + * Sets the address of a given RAR register. Returns 0 on success + * or an error code on failure. + */ +static int rar_set_addr(struct pci_dev *pdev, + int offset, + dma_addr_t addr) +{ + /* + * Data being written to this register must be written before + * writing the appropriate control message to the MCR + * register. + * See rar_get_addrs() for a description of the + * message bus interface being used here. + */ + + int result; + + /* Construct control message */ + u32 const message = (LNC_MESSAGE_WRITE_OPCODE << 24) + | (LNC_BUNIT_PORT << 16) + | (offset << 8) + | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4); + + /* + * We synchronize access to the Lincroft MCR and MDR registers + * until BOTH the command is issued through the MCR register + * and the corresponding data is read from the MDR register. + * Otherwise a race condition would exist between accesses to + * both registers. + */ + + mutex_lock(&lnc_reg_mutex); + + /* Send the control message */ + result = pci_write_config_dword(pdev, LNC_MDR_OFFSET, addr); + if (!result) + /* And address */ + result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message); + + mutex_unlock(&lnc_reg_mutex); + return result; +} + +/* + * rar_init_params - Initialize RAR parameters + * @rar: RAR device to initialise + * + * Initialize RAR parameters, such as bus addresses, etc. Returns 0 + * on success, or an error code on failure. + */ +static int init_rar_params(struct rar_device *rar) +{ + struct pci_dev *pdev = rar->rar_dev; + unsigned int i; + int result = 0; + int offset = 0x10; /* RAR 0 to 2 in order low/high/low/high/... */ + + /* Retrieve RAR start and end bus addresses. + * Access the RAR registers through the Lincroft Message Bus + * Interface on PCI device: 00:00.0 Host bridge. + */ + + for (i = 0; i < MRST_NUM_RAR; ++i) { + struct rar_addr *addr = &rar->rar_addr[i]; + + result = rar_read_addr(pdev, offset++, &addr->low); + if (result != 0) + return result; + + result = rar_read_addr(pdev, offset++, &addr->high); + if (result != 0) + return result; + + + /* + * Only the upper 22 bits of the RAR addresses are + * stored in their corresponding RAR registers so we + * must set the lower 10 bits accordingly. + + * The low address has its lower 10 bits cleared, and + * the high address has all its lower 10 bits set, + * e.g.: + * low = 0x2ffffc00 + */ + + addr->low &= (dma_addr_t)0xfffffc00u; + + /* + * Set bits 9:0 on uppser address if bits 31:10 are non + * zero; otherwize clear all bits + */ + + if ((addr->high & 0xfffffc00u) == 0) + addr->high = 0; + else + addr->high |= 0x3ffu; + } + /* Done accessing the device. */ + + if (result == 0) { + for (i = 0; i != MRST_NUM_RAR; ++i) { + /* + * "BRAR" refers to the RAR registers in the + * Lincroft B-unit. + */ + dev_info(&pdev->dev, "BRAR[%u] bus address range = " + "[%lx, %lx]\n", i, + (unsigned long)rar->rar_addr[i].low, + (unsigned long)rar->rar_addr[i].high); + } + } + return result; +} + +/** + * rar_get_address - get the bus address in a RAR + * @start: return value of start address of block + * @end: return value of end address of block + * + * The rar_get_address function is used by other device drivers + * to obtain RAR address information on a RAR. It takes three + * parameters: + * + * The function returns a 0 upon success or an error if there is no RAR + * facility on this system. + */ +int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end) +{ + int idx; + struct rar_device *rar = rar_to_device(rar_index, &idx); + + if (rar == NULL) { + WARN_ON(1); + return -ENODEV; + } + + *start = rar->rar_addr[idx].low; + *end = rar->rar_addr[idx].high; + return 0; +} +EXPORT_SYMBOL(rar_get_address); + +/** + * rar_lock - lock a RAR register + * @rar_index: RAR to lock (0-2) + * + * The rar_lock function is ued by other device drivers to lock an RAR. + * once a RAR is locked, it stays locked until the next system reboot. + * + * The function returns a 0 upon success or an error if there is no RAR + * facility on this system, or the locking fails + */ +int rar_lock(int rar_index) +{ + struct rar_device *rar; + int result; + int idx; + dma_addr_t low, high; + + rar = rar_to_device(rar_index, &idx); + + if (rar == NULL) { + WARN_ON(1); + return -EINVAL; + } + + low = rar->rar_addr[idx].low & 0xfffffc00u; + high = rar->rar_addr[idx].high & 0xfffffc00u; + + /* + * Only allow I/O from the graphics and Langwell; + * not from the x86 processor + */ + + if (rar_index == RAR_TYPE_VIDEO) { + low |= 0x00000009; + high |= 0x00000015; + } else if (rar_index == RAR_TYPE_AUDIO) { + /* Only allow I/O from Langwell; nothing from x86 */ + low |= 0x00000008; + high |= 0x00000018; + } else + /* Read-only from all agents */ + high |= 0x00000018; + + /* + * Now program the register using the Lincroft message + * bus interface. + */ + result = rar_set_addr(rar->rar_dev, + 2 * idx, low); + + if (result == 0) + result = rar_set_addr(rar->rar_dev, + 2 * idx + 1, high); + + return result; +} +EXPORT_SYMBOL(rar_lock); + +/** + * register_rar - register a RAR handler + * @num: RAR we wish to register for + * @callback: function to call when RAR support is available + * @data: data to pass to this function + * + * The register_rar function is to used by other device drivers + * to ensure that this driver is ready. As we cannot be sure of + * the compile/execute order of drivers in ther kernel, it is + * best to give this driver a callback function to call when + * it is ready to give out addresses. The callback function + * would have those steps that continue the initialization of + * a driver that do require a valid RAR address. One of those + * steps would be to call rar_get_address() + * + * This function return 0 on success or an error code on failure. + */ +int register_rar(int num, int (*callback)(unsigned long data), + unsigned long data) +{ + /* For now we hardcode a single RAR device */ + struct rar_device *rar; + struct client *c; + int idx; + int retval = 0; + + mutex_lock(&rar_mutex); + + /* Do we have a client mapping for this RAR number ? */ + c = rar_to_client(num); + if (c == NULL) { + retval = -ERANGE; + goto done; + } + /* Is it claimed ? */ + if (c->busy) { + retval = -EBUSY; + goto done; + } + c->busy = 1; + + /* See if we have a handler for this RAR yet, if we do then fire it */ + rar = rar_to_device(num, &idx); + + if (rar) { + /* + * if the driver already registered, then we can simply + * call the callback right now + */ + (*callback)(data); + goto done; + } + + /* Arrange to be called back when the hardware is found */ + c->callback = callback; + c->driver_priv = data; +done: + mutex_unlock(&rar_mutex); + return retval; +} +EXPORT_SYMBOL(register_rar); + +/** + * unregister_rar - release a RAR allocation + * @num: RAR number + * + * Releases a RAR allocation, or pending allocation. If a callback is + * pending then this function will either complete before the unregister + * returns or not at all. + */ + +void unregister_rar(int num) +{ + struct client *c; + + mutex_lock(&rar_mutex); + c = rar_to_client(num); + if (c == NULL || !c->busy) + WARN_ON(1); + else + c->busy = 0; + mutex_unlock(&rar_mutex); +} +EXPORT_SYMBOL(unregister_rar); + +/** + * rar_callback - Process callbacks + * @rar: new RAR device + * + * Process the callbacks for a newly found RAR device. + */ + +static void rar_callback(struct rar_device *rar) +{ + struct client *c = &rar->client[0]; + int i; + + mutex_lock(&rar_mutex); + + rar->registered = 1; /* Ensure no more callbacks queue */ + + for (i = 0; i < MRST_NUM_RAR; i++) { + if (c->callback && c->busy) { + c->callback(c->driver_priv); + c->callback = NULL; + } + c++; + } + mutex_unlock(&rar_mutex); +} + +/** + * rar_probe - PCI probe callback + * @dev: PCI device + * @id: matching entry in the match table + * + * A RAR device has been discovered. Initialise it and if successful + * process any pending callbacks that can now be completed. + */ +static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int error; + struct rar_device *rar; + + dev_dbg(&dev->dev, "PCI probe starting\n"); + + rar = alloc_rar_device(); + if (rar == NULL) + return -EBUSY; + + /* Enable the device */ + error = pci_enable_device(dev); + if (error) { + dev_err(&dev->dev, + "Error enabling RAR register PCI device\n"); + goto end_function; + } + + /* Fill in the rar_device structure */ + rar->rar_dev = pci_dev_get(dev); + pci_set_drvdata(dev, rar); + + /* + * Initialize the RAR parameters, which have to be retrieved + * via the message bus interface. + */ + error = init_rar_params(rar); + if (error) { + pci_disable_device(dev); + dev_err(&dev->dev, "Error retrieving RAR addresses\n"); + goto end_function; + } + /* now call anyone who has registered (using callbacks) */ + rar_callback(rar); + return 0; +end_function: + free_rar_device(rar); + return error; +} + +const struct pci_device_id rar_pci_id_tbl[] = { + { PCI_VDEVICE(INTEL, 0x4110) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); + +const struct pci_device_id *my_id_table = rar_pci_id_tbl; + +/* field for registering driver to PCI device */ +static struct pci_driver rar_pci_driver = { + .name = "rar_register_driver", + .id_table = rar_pci_id_tbl, + .probe = rar_probe, + /* Cannot be unplugged - no remove */ +}; + +static int __init rar_init_handler(void) +{ + return pci_register_driver(&rar_pci_driver); +} + +static void __exit rar_exit_handler(void) +{ + pci_unregister_driver(&rar_pci_driver); +} + +module_init(rar_init_handler); +module_exit(rar_exit_handler); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel Restricted Access Region Register Driver"); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 984a75440710..9dfef8a59974 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -109,8 +109,6 @@ source "drivers/staging/hv/Kconfig" source "drivers/staging/vme/Kconfig" -source "drivers/staging/rar_register/Kconfig" - source "drivers/staging/memrar/Kconfig" source "drivers/staging/sep/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 9fa25133874a..3dbf681ca64e 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -35,7 +35,6 @@ obj-$(CONFIG_VT6656) += vt6656/ obj-$(CONFIG_FB_UDL) += udlfb/ obj-$(CONFIG_HYPERV) += hv/ obj-$(CONFIG_VME_BUS) += vme/ -obj-$(CONFIG_RAR_REGISTER) += rar_register/ obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c index efa7fd62d390..41876f2b0e54 100644 --- a/drivers/staging/memrar/memrar_handler.c +++ b/drivers/staging/memrar/memrar_handler.c @@ -47,8 +47,7 @@ #include #include #include - -#include "../rar_register/rar_register.h" +#include #include "memrar.h" #include "memrar_allocator.h" diff --git a/drivers/staging/rar_register/Kconfig b/drivers/staging/rar_register/Kconfig deleted file mode 100644 index e9c27738199b..000000000000 --- a/drivers/staging/rar_register/Kconfig +++ /dev/null @@ -1,30 +0,0 @@ -# -# RAR device configuration -# - -menu "RAR Register Driver" -# -# Restricted Access Register Manager -# -config RAR_REGISTER - tristate "Restricted Access Region Register Driver" - depends on PCI - default n - ---help--- - This driver allows other kernel drivers access to the - contents of the restricted access region control registers. - - The restricted access region control registers - (rar_registers) are used to pass address and - locking information on restricted access regions - to other drivers that use restricted access regions. - - The restricted access regions are regions of memory - on the Intel MID Platform that are not accessible to - the x86 processor, but are accessible to dedicated - processors on board peripheral devices. - - The purpose of the restricted access regions is to - protect sensitive data from compromise by unauthorized - programs running on the x86 processor. -endmenu diff --git a/drivers/staging/rar_register/Makefile b/drivers/staging/rar_register/Makefile deleted file mode 100644 index d5954ccc16c9..000000000000 --- a/drivers/staging/rar_register/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -EXTRA_CFLAGS += -DLITTLE__ENDIAN -obj-$(CONFIG_RAR_REGISTER) += rar_register.o diff --git a/drivers/staging/rar_register/rar_register.c b/drivers/staging/rar_register/rar_register.c deleted file mode 100644 index 618503f422ef..000000000000 --- a/drivers/staging/rar_register/rar_register.c +++ /dev/null @@ -1,675 +0,0 @@ -/* - * rar_register.c - An Intel Restricted Access Region register driver - * - * Copyright(c) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA - * 02111-1307, USA. - * - * ------------------------------------------------------------------- - * 20091204 Mark Allyn - * Ossama Othman - * Cleanup per feedback from Alan Cox and Arjan Van De Ven - * - * 20090806 Ossama Othman - * Return zero high address if upper 22 bits is zero. - * Cleaned up checkpatch errors. - * Clarified that driver is dealing with bus addresses. - * - * 20090702 Ossama Othman - * Removed unnecessary include directives - * Cleaned up spinlocks. - * Cleaned up logging. - * Improved invalid parameter checks. - * Fixed and simplified RAR address retrieval and RAR locking - * code. - * - * 20090626 Mark Allyn - * Initial publish - */ - -#define DEBUG 1 - -#include "rar_register.h" - -#include -#include -#include -#include -#include - -/* === Lincroft Message Bus Interface === */ -#define LNC_MCR_OFFSET 0xD0 /* Message Control Register */ -#define LNC_MDR_OFFSET 0xD4 /* Message Data Register */ - -/* Message Opcodes */ -#define LNC_MESSAGE_READ_OPCODE 0xD0 -#define LNC_MESSAGE_WRITE_OPCODE 0xE0 - -/* Message Write Byte Enables */ -#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF - -/* B-unit Port */ -#define LNC_BUNIT_PORT 0x3 - -/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */ -#define LNC_BRAR0L 0x10 -#define LNC_BRAR0H 0x11 -#define LNC_BRAR1L 0x12 -#define LNC_BRAR1H 0x13 -/* Reserved for SeP */ -#define LNC_BRAR2L 0x14 -#define LNC_BRAR2H 0x15 - -/* Moorestown supports three restricted access regions. */ -#define MRST_NUM_RAR 3 - -/* RAR Bus Address Range */ -struct rar_addr { - dma_addr_t low; - dma_addr_t high; -}; - -/* - * We create one of these for each RAR - */ -struct client { - int (*callback)(unsigned long data); - unsigned long driver_priv; - bool busy; -}; - -static DEFINE_MUTEX(rar_mutex); -static DEFINE_MUTEX(lnc_reg_mutex); - -/* - * One per RAR device (currently only one device) - */ -struct rar_device { - struct rar_addr rar_addr[MRST_NUM_RAR]; - struct pci_dev *rar_dev; - bool registered; - bool allocated; - struct client client[MRST_NUM_RAR]; -}; - -/* Current platforms have only one rar_device for 3 rar regions */ -static struct rar_device my_rar_device; - -/* - * Abstract out multiple device support. Current platforms only - * have a single RAR device. - */ - -/** - * alloc_rar_device - return a new RAR structure - * - * Return a new (but not yet ready) RAR device object - */ -static struct rar_device *alloc_rar_device(void) -{ - if (my_rar_device.allocated) - return NULL; - my_rar_device.allocated = 1; - return &my_rar_device; -} - -/** - * free_rar_device - free a RAR object - * @rar: the RAR device being freed - * - * Release a RAR object and any attached resources - */ -static void free_rar_device(struct rar_device *rar) -{ - pci_dev_put(rar->rar_dev); - rar->allocated = 0; -} - -/** - * _rar_to_device - return the device handling this RAR - * @rar: RAR number - * @off: returned offset - * - * Internal helper for looking up RAR devices. This and alloc are the - * two functions that need touching to go to multiple RAR devices. - */ -static struct rar_device *_rar_to_device(int rar, int *off) -{ - if (rar >= 0 && rar <= 3) { - *off = rar; - return &my_rar_device; - } - return NULL; -} - - -/** - * rar_to_device - return the device handling this RAR - * @rar: RAR number - * @off: returned offset - * - * Return the device this RAR maps to if one is present, otherwise - * returns NULL. Reports the offset relative to the base of this - * RAR device in off. - */ -static struct rar_device *rar_to_device(int rar, int *off) -{ - struct rar_device *rar_dev = _rar_to_device(rar, off); - if (rar_dev == NULL || !rar_dev->registered) - return NULL; - return rar_dev; -} - -/** - * rar_to_client - return the client handling this RAR - * @rar: RAR number - * - * Return the client this RAR maps to if a mapping is known, otherwise - * returns NULL. - */ -static struct client *rar_to_client(int rar) -{ - int idx; - struct rar_device *r = _rar_to_device(rar, &idx); - if (r != NULL) - return &r->client[idx]; - return NULL; -} - -/** - * rar_read_addr - retrieve a RAR mapping - * @pdev: PCI device for the RAR - * @offset: offset for message - * @addr: returned address - * - * Reads the address of a given RAR register. Returns 0 on success - * or an error code on failure. - */ -static int rar_read_addr(struct pci_dev *pdev, int offset, dma_addr_t *addr) -{ - /* - * ======== The Lincroft Message Bus Interface ======== - * Lincroft registers may be obtained via PCI from - * the host bridge using the Lincroft Message Bus - * Interface. That message bus interface is generally - * comprised of two registers: a control register (MCR, 0xDO) - * and a data register (MDR, 0xD4). - * - * The MCR (message control register) format is the following: - * 1. [31:24]: Opcode - * 2. [23:16]: Port - * 3. [15:8]: Register Offset - * 4. [7:4]: Byte Enables (use 0xF to set all of these bits - * to 1) - * 5. [3:0]: reserved - * - * Read (0xD0) and write (0xE0) opcodes are written to the - * control register when reading and writing to Lincroft - * registers, respectively. - * - * We're interested in registers found in the Lincroft - * B-unit. The B-unit port is 0x3. - * - * The six B-unit RAR register offsets we use are listed - * earlier in this file. - * - * Lastly writing to the MCR register requires the "Byte - * enables" bits to be set to 1. This may be achieved by - * writing 0xF at bit 4. - * - * The MDR (message data register) format is the following: - * 1. [31:0]: Read/Write Data - * - * Data being read from this register is only available after - * writing the appropriate control message to the MCR - * register. - * - * Data being written to this register must be written before - * writing the appropriate control message to the MCR - * register. - */ - - int result; - u32 addr32; - - /* Construct control message */ - u32 const message = - (LNC_MESSAGE_READ_OPCODE << 24) - | (LNC_BUNIT_PORT << 16) - | (offset << 8) - | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4); - - dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset); - - /* - * We synchronize access to the Lincroft MCR and MDR registers - * until BOTH the command is issued through the MCR register - * and the corresponding data is read from the MDR register. - * Otherwise a race condition would exist between accesses to - * both registers. - */ - - mutex_lock(&lnc_reg_mutex); - - /* Send the control message */ - result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message); - if (!result) { - /* Read back the address as a 32bit value */ - result = pci_read_config_dword(pdev, LNC_MDR_OFFSET, &addr32); - *addr = (dma_addr_t)addr32; - } - mutex_unlock(&lnc_reg_mutex); - return result; -} - -/** - * rar_set_addr - Set a RAR mapping - * @pdev: PCI device for the RAR - * @offset: offset for message - * @addr: address to set - * - * Sets the address of a given RAR register. Returns 0 on success - * or an error code on failure. - */ -static int rar_set_addr(struct pci_dev *pdev, - int offset, - dma_addr_t addr) -{ - /* - * Data being written to this register must be written before - * writing the appropriate control message to the MCR - * register. - * See rar_get_addrs() for a description of the - * message bus interface being used here. - */ - - int result; - - /* Construct control message */ - u32 const message = (LNC_MESSAGE_WRITE_OPCODE << 24) - | (LNC_BUNIT_PORT << 16) - | (offset << 8) - | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4); - - /* - * We synchronize access to the Lincroft MCR and MDR registers - * until BOTH the command is issued through the MCR register - * and the corresponding data is read from the MDR register. - * Otherwise a race condition would exist between accesses to - * both registers. - */ - - mutex_lock(&lnc_reg_mutex); - - /* Send the control message */ - result = pci_write_config_dword(pdev, LNC_MDR_OFFSET, addr); - if (!result) - /* And address */ - result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message); - - mutex_unlock(&lnc_reg_mutex); - return result; -} - -/* - * rar_init_params - Initialize RAR parameters - * @rar: RAR device to initialise - * - * Initialize RAR parameters, such as bus addresses, etc. Returns 0 - * on success, or an error code on failure. - */ -static int init_rar_params(struct rar_device *rar) -{ - struct pci_dev *pdev = rar->rar_dev; - unsigned int i; - int result = 0; - int offset = 0x10; /* RAR 0 to 2 in order low/high/low/high/... */ - - /* Retrieve RAR start and end bus addresses. - * Access the RAR registers through the Lincroft Message Bus - * Interface on PCI device: 00:00.0 Host bridge. - */ - - for (i = 0; i < MRST_NUM_RAR; ++i) { - struct rar_addr *addr = &rar->rar_addr[i]; - - result = rar_read_addr(pdev, offset++, &addr->low); - if (result != 0) - return result; - - result = rar_read_addr(pdev, offset++, &addr->high); - if (result != 0) - return result; - - - /* - * Only the upper 22 bits of the RAR addresses are - * stored in their corresponding RAR registers so we - * must set the lower 10 bits accordingly. - - * The low address has its lower 10 bits cleared, and - * the high address has all its lower 10 bits set, - * e.g.: - * low = 0x2ffffc00 - */ - - addr->low &= (dma_addr_t)0xfffffc00u; - - /* - * Set bits 9:0 on uppser address if bits 31:10 are non - * zero; otherwize clear all bits - */ - - if ((addr->high & 0xfffffc00u) == 0) - addr->high = 0; - else - addr->high |= 0x3ffu; - } - /* Done accessing the device. */ - - if (result == 0) { - for (i = 0; i != MRST_NUM_RAR; ++i) { - /* - * "BRAR" refers to the RAR registers in the - * Lincroft B-unit. - */ - dev_info(&pdev->dev, "BRAR[%u] bus address range = " - "[%lx, %lx]\n", i, - (unsigned long)rar->rar_addr[i].low, - (unsigned long)rar->rar_addr[i].high); - } - } - return result; -} - -/** - * rar_get_address - get the bus address in a RAR - * @start: return value of start address of block - * @end: return value of end address of block - * - * The rar_get_address function is used by other device drivers - * to obtain RAR address information on a RAR. It takes three - * parameters: - * - * The function returns a 0 upon success or an error if there is no RAR - * facility on this system. - */ -int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end) -{ - int idx; - struct rar_device *rar = rar_to_device(rar_index, &idx); - - if (rar == NULL) { - WARN_ON(1); - return -ENODEV; - } - - *start = rar->rar_addr[idx].low; - *end = rar->rar_addr[idx].high; - return 0; -} -EXPORT_SYMBOL(rar_get_address); - -/** - * rar_lock - lock a RAR register - * @rar_index: RAR to lock (0-2) - * - * The rar_lock function is ued by other device drivers to lock an RAR. - * once a RAR is locked, it stays locked until the next system reboot. - * - * The function returns a 0 upon success or an error if there is no RAR - * facility on this system, or the locking fails - */ -int rar_lock(int rar_index) -{ - struct rar_device *rar; - int result; - int idx; - dma_addr_t low, high; - - rar = rar_to_device(rar_index, &idx); - - if (rar == NULL) { - WARN_ON(1); - return -EINVAL; - } - - low = rar->rar_addr[idx].low & 0xfffffc00u; - high = rar->rar_addr[idx].high & 0xfffffc00u; - - /* - * Only allow I/O from the graphics and Langwell; - * not from the x86 processor - */ - - if (rar_index == RAR_TYPE_VIDEO) { - low |= 0x00000009; - high |= 0x00000015; - } else if (rar_index == RAR_TYPE_AUDIO) { - /* Only allow I/O from Langwell; nothing from x86 */ - low |= 0x00000008; - high |= 0x00000018; - } else - /* Read-only from all agents */ - high |= 0x00000018; - - /* - * Now program the register using the Lincroft message - * bus interface. - */ - result = rar_set_addr(rar->rar_dev, - 2 * idx, low); - - if (result == 0) - result = rar_set_addr(rar->rar_dev, - 2 * idx + 1, high); - - return result; -} -EXPORT_SYMBOL(rar_lock); - -/** - * register_rar - register a RAR handler - * @num: RAR we wish to register for - * @callback: function to call when RAR support is available - * @data: data to pass to this function - * - * The register_rar function is to used by other device drivers - * to ensure that this driver is ready. As we cannot be sure of - * the compile/execute order of drivers in ther kernel, it is - * best to give this driver a callback function to call when - * it is ready to give out addresses. The callback function - * would have those steps that continue the initialization of - * a driver that do require a valid RAR address. One of those - * steps would be to call rar_get_address() - * - * This function return 0 on success an error code on failure. - */ -int register_rar(int num, int (*callback)(unsigned long data), - unsigned long data) -{ - /* For now we hardcode a single RAR device */ - struct rar_device *rar; - struct client *c; - int idx; - int retval = 0; - - mutex_lock(&rar_mutex); - - /* Do we have a client mapping for this RAR number ? */ - c = rar_to_client(num); - if (c == NULL) { - retval = -ERANGE; - goto done; - } - /* Is it claimed ? */ - if (c->busy) { - retval = -EBUSY; - goto done; - } - c->busy = 1; - - /* See if we have a handler for this RAR yet, if we do then fire it */ - rar = rar_to_device(num, &idx); - - if (rar) { - /* - * if the driver already registered, then we can simply - * call the callback right now - */ - (*callback)(data); - goto done; - } - - /* Arrange to be called back when the hardware is found */ - c->callback = callback; - c->driver_priv = data; -done: - mutex_unlock(&rar_mutex); - return retval; -} -EXPORT_SYMBOL(register_rar); - -/** - * unregister_rar - release a RAR allocation - * @num: RAR number - * - * Releases a RAR allocation, or pending allocation. If a callback is - * pending then this function will either complete before the unregister - * returns or not at all. - */ - -void unregister_rar(int num) -{ - struct client *c; - - mutex_lock(&rar_mutex); - c = rar_to_client(num); - if (c == NULL || !c->busy) - WARN_ON(1); - else - c->busy = 0; - mutex_unlock(&rar_mutex); -} -EXPORT_SYMBOL(unregister_rar); - -/** - * rar_callback - Process callbacks - * @rar: new RAR device - * - * Process the callbacks for a newly found RAR device. - */ - -static void rar_callback(struct rar_device *rar) -{ - struct client *c = &rar->client[0]; - int i; - - mutex_lock(&rar_mutex); - - rar->registered = 1; /* Ensure no more callbacks queue */ - - for (i = 0; i < MRST_NUM_RAR; i++) { - if (c->callback && c->busy) { - c->callback(c->driver_priv); - c->callback = NULL; - } - c++; - } - mutex_unlock(&rar_mutex); -} - -/** - * rar_probe - PCI probe callback - * @dev: PCI device - * @id: matching entry in the match table - * - * A RAR device has been discovered. Initialise it and if successful - * process any pending callbacks that can now be completed. - */ -static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id) -{ - int error; - struct rar_device *rar; - - dev_dbg(&dev->dev, "PCI probe starting\n"); - - rar = alloc_rar_device(); - if (rar == NULL) - return -EBUSY; - - /* Enable the device */ - error = pci_enable_device(dev); - if (error) { - dev_err(&dev->dev, - "Error enabling RAR register PCI device\n"); - goto end_function; - } - - /* Fill in the rar_device structure */ - rar->rar_dev = pci_dev_get(dev); - pci_set_drvdata(dev, rar); - - /* - * Initialize the RAR parameters, which have to be retrieved - * via the message bus interface. - */ - error = init_rar_params(rar); - if (error) { - pci_disable_device(dev); - dev_err(&dev->dev, "Error retrieving RAR addresses\n"); - goto end_function; - } - /* now call anyone who has registered (using callbacks) */ - rar_callback(rar); - return 0; -end_function: - free_rar_device(rar); - return error; -} - -const struct pci_device_id rar_pci_id_tbl[] = { - { PCI_VDEVICE(INTEL, 0x4110) }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); - -const struct pci_device_id *my_id_table = rar_pci_id_tbl; - -/* field for registering driver to PCI device */ -static struct pci_driver rar_pci_driver = { - .name = "rar_register_driver", - .id_table = rar_pci_id_tbl, - .probe = rar_probe, - /* Cannot be unplugged - no remove */ -}; - -static int __init rar_init_handler(void) -{ - return pci_register_driver(&rar_pci_driver); -} - -static void __exit rar_exit_handler(void) -{ - pci_unregister_driver(&rar_pci_driver); -} - -module_init(rar_init_handler); -module_exit(rar_exit_handler); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Intel Restricted Access Region Register Driver"); diff --git a/drivers/staging/rar_register/rar_register.h b/drivers/staging/rar_register/rar_register.h deleted file mode 100644 index ffa805780f85..000000000000 --- a/drivers/staging/rar_register/rar_register.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2010 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General - * Public License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied - * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the Free - * Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - * The full GNU General Public License is included in this - * distribution in the file called COPYING. - */ - - -#ifndef _RAR_REGISTER_H -#define _RAR_REGISTER_H - -#include - -/* following are used both in drivers as well as user space apps */ - -#define RAR_TYPE_VIDEO 0 -#define RAR_TYPE_AUDIO 1 -#define RAR_TYPE_IMAGE 2 -#define RAR_TYPE_DATA 3 - -#ifdef __KERNEL__ - -struct rar_device; - -int register_rar(int num, - int (*callback)(unsigned long data), unsigned long data); -void unregister_rar(int num); -int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end); -int rar_lock(int rar_index); - -#endif /* __KERNEL__ */ -#endif /* _RAR_REGISTER_H */ diff --git a/include/linux/rar_register.h b/include/linux/rar_register.h new file mode 100644 index 000000000000..ffa805780f85 --- /dev/null +++ b/include/linux/rar_register.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * The full GNU General Public License is included in this + * distribution in the file called COPYING. + */ + + +#ifndef _RAR_REGISTER_H +#define _RAR_REGISTER_H + +#include + +/* following are used both in drivers as well as user space apps */ + +#define RAR_TYPE_VIDEO 0 +#define RAR_TYPE_AUDIO 1 +#define RAR_TYPE_IMAGE 2 +#define RAR_TYPE_DATA 3 + +#ifdef __KERNEL__ + +struct rar_device; + +int register_rar(int num, + int (*callback)(unsigned long data), unsigned long data); +void unregister_rar(int num); +int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end); +int rar_lock(int rar_index); + +#endif /* __KERNEL__ */ +#endif /* _RAR_REGISTER_H */ -- cgit v1.2.3 From 8950778704cf8483cc5cc0140f557adf0d3f45a5 Mon Sep 17 00:00:00 2001 From: Alek Du Date: Tue, 13 Jul 2010 10:56:25 +0100 Subject: gpio: Add PMIC GPIO block support Moorestown has PMIC chip which contains GPIO blocks. The PMIC chip is connected to Langwell by SPI interface. So this GPIO driver will be regarded as SPI GPIO expander though the actual GPIO access is through IPC and SRAM. The SPI master contoller will probe this device driver by parsing SPIB table. Cleaned up for new IPC, GPE removed and some printk and other tidying by Alan Cox. Fixes for points noted by Matthew Garrett Signed-off-by: Alek Du Signed-off-by: Alan Cox Signed-off-by: Matthew Garrett --- drivers/platform/x86/Kconfig | 7 + drivers/platform/x86/Makefile | 2 + drivers/platform/x86/intel_pmic_gpio.c | 340 +++++++++++++++++++++++++++++++++ include/linux/intel_pmic_gpio.h | 15 ++ 4 files changed, 364 insertions(+) create mode 100644 drivers/platform/x86/intel_pmic_gpio.c create mode 100644 include/linux/intel_pmic_gpio.h (limited to 'include') diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2189565d9e0e..ca30e561859e 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -540,6 +540,13 @@ config INTEL_SCU_IPC some embedded Intel x86 platforms. This is not needed for PC-type machines. +config GPIO_INTEL_PMIC + bool "Intel PMIC GPIO support" + depends on INTEL_SCU_IPC && GPIOLIB + ---help--- + Say Y here to support GPIO via the SCU IPC interface + on Intel MID platforms. + config RAR_REGISTER bool "Restricted Access Region Register Driver" depends on PCI && X86_MRST diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index ed50eca1b556..4744c7744ffa 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -28,3 +28,5 @@ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o obj-$(CONFIG_RAR_REGISTER) += intel_rar_register.o obj-$(CONFIG_INTEL_IPS) += intel_ips.o +obj-$(CONFIG_GPIO_INTEL_PMIC) += intel_pmic_gpio.o + diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c new file mode 100644 index 000000000000..5cdcff653918 --- /dev/null +++ b/drivers/platform/x86/intel_pmic_gpio.c @@ -0,0 +1,340 @@ +/* Moorestown PMIC GPIO (access through IPC) driver + * Copyright (c) 2008 - 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * Moorestown platform PMIC chip + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "pmic_gpio" + +/* register offset that IPC driver should use + * 8 GPIO + 8 GPOSW (6 controllable) + 8GPO + */ +enum pmic_gpio_register { + GPIO0 = 0xE0, + GPIO7 = 0xE7, + GPIOINT = 0xE8, + GPOSWCTL0 = 0xEC, + GPOSWCTL5 = 0xF1, + GPO = 0xF4, +}; + +/* bits definition for GPIO & GPOSW */ +#define GPIO_DRV 0x01 +#define GPIO_DIR 0x02 +#define GPIO_DIN 0x04 +#define GPIO_DOU 0x08 +#define GPIO_INTCTL 0x30 +#define GPIO_DBC 0xc0 + +#define GPOSW_DRV 0x01 +#define GPOSW_DOU 0x08 +#define GPOSW_RDRV 0x30 + + +#define NUM_GPIO 24 + +struct pmic_gpio_irq { + spinlock_t lock; + u32 trigger[NUM_GPIO]; + u32 dirty; + struct work_struct work; +}; + + +struct pmic_gpio { + struct gpio_chip chip; + struct pmic_gpio_irq irqtypes; + void *gpiointr; + int irq; + unsigned irq_base; +}; + +static void pmic_program_irqtype(int gpio, int type) +{ + if (type & IRQ_TYPE_EDGE_RISING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); + + if (type & IRQ_TYPE_EDGE_FALLING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); +}; + +static void pmic_irqtype_work(struct work_struct *work) +{ + struct pmic_gpio_irq *t = + container_of(work, struct pmic_gpio_irq, work); + unsigned long flags; + int i; + u16 type; + + spin_lock_irqsave(&t->lock, flags); + /* As we drop the lock, we may need multiple scans if we race the + pmic_irq_type function */ + while (t->dirty) { + /* + * For each pin that has the dirty bit set send an IPC + * message to configure the hardware via the PMIC + */ + for (i = 0; i < NUM_GPIO; i++) { + if (!(t->dirty & (1 << i))) + continue; + t->dirty &= ~(1 << i); + /* We can't trust the array entry or dirty + once the lock is dropped */ + type = t->trigger[i]; + spin_unlock_irqrestore(&t->lock, flags); + pmic_program_irqtype(i, type); + spin_lock_irqsave(&t->lock, flags); + } + } + spin_unlock_irqrestore(&t->lock, flags); +} + +static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + if (offset > 8) { + printk(KERN_ERR + "%s: only pin 0-7 support input\n", __func__); + return -1;/* we only have 8 GPIO can use as input */ + } + return intel_scu_ipc_update_register(GPIO0 + offset, + GPIO_DIR, GPIO_DIR); +} + +static int pmic_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + int rc = 0; + + if (offset < 8)/* it is GPIO */ + rc = intel_scu_ipc_update_register(GPIO0 + offset, + GPIO_DRV | GPIO_DOU | GPIO_DIR, + GPIO_DRV | (value ? GPIO_DOU : 0)); + else if (offset < 16)/* it is GPOSW */ + rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8, + GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV, + GPOSW_DRV | (value ? GPOSW_DOU : 0)); + else if (offset > 15 && offset < 24)/* it is GPO */ + rc = intel_scu_ipc_update_register(GPO, + 1 << (offset - 16), + value ? 1 << (offset - 16) : 0); + else { + printk(KERN_ERR + "%s: invalid PMIC GPIO pin %d!\n", __func__, offset); + WARN_ON(1); + } + + return rc; +} + +static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + u8 r; + int ret; + + /* we only have 8 GPIO pins we can use as input */ + if (offset > 8) + return -EOPNOTSUPP; + ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r); + if (ret < 0) + return ret; + return r & GPIO_DIN; +} + +static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + if (offset < 8)/* it is GPIO */ + intel_scu_ipc_update_register(GPIO0 + offset, + GPIO_DRV | GPIO_DOU, + GPIO_DRV | (value ? GPIO_DOU : 0)); + else if (offset < 16)/* it is GPOSW */ + intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8, + GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV, + GPOSW_DRV | (value ? GPOSW_DOU : 0)); + else if (offset > 15 && offset < 24) /* it is GPO */ + intel_scu_ipc_update_register(GPO, + 1 << (offset - 16), + value ? 1 << (offset - 16) : 0); +} + +static int pmic_irq_type(unsigned irq, unsigned type) +{ + struct pmic_gpio *pg = get_irq_chip_data(irq); + u32 gpio = irq - pg->irq_base; + unsigned long flags; + + if (gpio > pg->chip.ngpio) + return -EINVAL; + + spin_lock_irqsave(&pg->irqtypes.lock, flags); + pg->irqtypes.trigger[gpio] = type; + pg->irqtypes.dirty |= (1 << gpio); + spin_unlock_irqrestore(&pg->irqtypes.lock, flags); + schedule_work(&pg->irqtypes.work); + return 0; +} + + + +static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); + + return pg->irq_base + offset; +} + +/* the gpiointr register is read-clear, so just do nothing. */ +static void pmic_irq_unmask(unsigned irq) +{ +}; + +static void pmic_irq_mask(unsigned irq) +{ +}; + +static struct irq_chip pmic_irqchip = { + .name = "PMIC-GPIO", + .mask = pmic_irq_mask, + .unmask = pmic_irq_unmask, + .set_type = pmic_irq_type, +}; + +static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) +{ + struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); + u8 intsts = *((u8 *)pg->gpiointr + 4); + int gpio; + + for (gpio = 0; gpio < 8; gpio++) { + if (intsts & (1 << gpio)) { + pr_debug("pmic pin %d triggered\n", gpio); + generic_handle_irq(pg->irq_base + gpio); + } + } + desc->chip->eoi(irq); +} + +static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int irq = platform_get_irq(pdev, 0); + struct intel_pmic_gpio_platform_data *pdata = dev->platform_data; + + struct pmic_gpio *pg; + int retval; + int i; + + if (irq < 0) { + dev_dbg(dev, "no IRQ line\n"); + return -EINVAL; + } + + if (!pdata || !pdata->gpio_base || !pdata->irq_base) { + dev_dbg(dev, "incorrect or missing platform data\n"); + return -EINVAL; + } + + pg = kzalloc(sizeof(*pg), GFP_KERNEL); + if (!pg) + return -ENOMEM; + + dev_set_drvdata(dev, pg); + + pg->irq = irq; + /* setting up SRAM mapping for GPIOINT register */ + pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8); + if (!pg->gpiointr) { + printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__); + retval = -EINVAL; + goto err2; + } + pg->irq_base = pdata->irq_base; + pg->chip.label = "intel_pmic"; + pg->chip.direction_input = pmic_gpio_direction_input; + pg->chip.direction_output = pmic_gpio_direction_output; + pg->chip.get = pmic_gpio_get; + pg->chip.set = pmic_gpio_set; + pg->chip.to_irq = pmic_gpio_to_irq; + pg->chip.base = pdata->gpio_base; + pg->chip.ngpio = NUM_GPIO; + pg->chip.can_sleep = 1; + pg->chip.dev = dev; + + INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); + spin_lock_init(&pg->irqtypes.lock); + + pg->chip.dev = dev; + retval = gpiochip_add(&pg->chip); + if (retval) { + printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); + goto err; + } + set_irq_data(pg->irq, pg); + set_irq_chained_handler(pg->irq, pmic_irq_handler); + for (i = 0; i < 8; i++) { + set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, + handle_simple_irq, "demux"); + set_irq_chip_data(i + pg->irq_base, pg); + } + return 0; +err: + iounmap(pg->gpiointr); +err2: + kfree(pg); + return retval; +} + +/* at the same time, register a platform driver + * this supports the sfi 0.81 fw */ +static struct platform_driver platform_pmic_gpio_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .probe = platform_pmic_gpio_probe, +}; + +static int __init platform_pmic_gpio_init(void) +{ + return platform_driver_register(&platform_pmic_gpio_driver); +} + +subsys_initcall(platform_pmic_gpio_init); + +MODULE_AUTHOR("Alek Du "); +MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h new file mode 100644 index 000000000000..920109a29191 --- /dev/null +++ b/include/linux/intel_pmic_gpio.h @@ -0,0 +1,15 @@ +#ifndef LINUX_INTEL_PMIC_H +#define LINUX_INTEL_PMIC_H + +struct intel_pmic_gpio_platform_data { + /* the first IRQ of the chip */ + unsigned irq_base; + /* number assigned to the first GPIO */ + unsigned gpio_base; + /* sram address for gpiointr register, the langwell chip will map + * the PMIC spi GPIO expander's GPIOINTR register in sram. + */ + unsigned gpiointr; +}; + +#endif -- cgit v1.2.3 From 226528c6100e4191842e61997110c8ace40605f7 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Thu, 4 Mar 2010 03:23:36 -0500 Subject: [CPUFREQ] unexport (un)lock_policy_rwsem* functions lock_policy_rwsem_* and unlock_policy_rwsem_* functions are scheduled to be unexported when 2.6.33. Now there are no other callers of them out of cpufreq.c, unexport them and make them static. Signed-off-by: WANG Cong Cc: Venkatesh Pallipadi Signed-off-by: Dave Jones --- Documentation/feature-removal-schedule.txt | 10 ---------- drivers/cpufreq/cpufreq.c | 10 +++------- include/linux/cpufreq.h | 5 ----- 3 files changed, 3 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 1571c0c83dba..182bbe49429b 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -377,16 +377,6 @@ Who: Eric Paris ---------------------------- -What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be - exported interface anymore. -When: 2.6.33 -Why: cpu_policy_rwsem has a new cleaner definition making it local to - cpufreq core and contained inside cpufreq.c. Other dependent - drivers should not use it in order to safely avoid lockdep issues. -Who: Venkatesh Pallipadi - ----------------------------- - What: sound-slot/service-* module aliases and related clutters in sound/sound_core.c When: August 2010 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 938b74ea9ffb..40877d219081 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -68,7 +68,7 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); #define lock_policy_rwsem(mode, cpu) \ -int lock_policy_rwsem_##mode \ +static int lock_policy_rwsem_##mode \ (int cpu) \ { \ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ @@ -83,26 +83,22 @@ int lock_policy_rwsem_##mode \ } lock_policy_rwsem(read, cpu); -EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); lock_policy_rwsem(write, cpu); -EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); -void unlock_policy_rwsem_read(int cpu) +static void unlock_policy_rwsem_read(int cpu) { int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); BUG_ON(policy_cpu == -1); up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); } -EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); -void unlock_policy_rwsem_write(int cpu) +static void unlock_policy_rwsem_write(int cpu) { int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); BUG_ON(policy_cpu == -1); up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); } -EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); /* internal prototypes */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9f15150ce8d6..c3e9de8321c6 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -196,11 +196,6 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); -int lock_policy_rwsem_read(int cpu); -int lock_policy_rwsem_write(int cpu); -void unlock_policy_rwsem_read(int cpu); -void unlock_policy_rwsem_write(int cpu); - /********************************************************************* * CPUFREQ DRIVER INTERFACE * -- cgit v1.2.3 From 6f4f2723d08534fd4e407e1ef8500b0f4d12c30c Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Tue, 20 Apr 2010 13:17:36 +0200 Subject: [CPUFREQ] x86 cpufreq: Make trace_power_frequency cpufreq driver independent and fix the broken case if a core's frequency depends on others. trace_power_frequency was only implemented in a rather ungeneric way in acpi-cpufreq driver's target() function only. -> Move the call to trace_power_frequency to cpufreq.c:cpufreq_notify_transition() where CPUFREQ_POSTCHANGE notifier is triggered. This will support power frequency tracing by all cpufreq drivers trace_power_frequency did not trace frequency changes correctly when the userspace governor was used or when CPU cores' frequency depend on each other. -> Moving this into the CPUFREQ_POSTCHANGE notifier and pass the cpu which gets switched automatically fixes this. Robert Schoene provided some important fixes on top of my initial quick shot version which are integrated in this patch: - Forgot some changes in power_end trace (TP_printk/variable names) - Variable dummy in power_end must now be cpu_id - Use static 64 bit variable instead of unsigned int for cpu_id Signed-off-by: Thomas Renninger CC: davej@redhat.com CC: arjan@infradead.org CC: linux-kernel@vger.kernel.org CC: robert.schoene@tu-dresden.de Tested-by: robert.schoene@tu-dresden.de Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 3 --- arch/x86/kernel/process.c | 8 ++++---- drivers/cpufreq/cpufreq.c | 5 +++++ drivers/cpuidle/cpuidle.c | 2 +- include/trace/events/power.h | 27 +++++++++++++++------------ tools/perf/builtin-timechart.c | 11 ++++++----- 6 files changed, 31 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index cee7aa949c35..246cd3afbb5f 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include @@ -324,8 +323,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } } - trace_power_frequency(POWER_PSTATE, data->freq_table[next_state].frequency); - switch (data->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e7e35219b32f..787572d43d9c 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -371,7 +371,7 @@ static inline int hlt_use_halt(void) void default_idle(void) { if (hlt_use_halt()) { - trace_power_start(POWER_CSTATE, 1); + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we @@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); */ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { - trace_power_start(POWER_CSTATE, (ax>>4)+1); + trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); if (!need_resched()) { if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -457,7 +457,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) static void mwait_idle(void) { if (!need_resched()) { - trace_power_start(POWER_CSTATE, 1); + trace_power_start(POWER_CSTATE, 1, smp_processor_id()); if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); @@ -478,7 +478,7 @@ static void mwait_idle(void) */ static void poll_idle(void) { - trace_power_start(POWER_CSTATE, 0); + trace_power_start(POWER_CSTATE, 0, smp_processor_id()); local_irq_enable(); while (!need_resched()) cpu_relax(); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 40877d219081..6ce1bb735635 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -29,6 +29,8 @@ #include #include +#include + #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ "cpufreq-core", msg) @@ -350,6 +352,9 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); + dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, + (unsigned long)freqs->cpu); + trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 199488576a05..dbefe15bd582 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -95,7 +95,7 @@ static void cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); - trace_power_end(0); + trace_power_end(smp_processor_id()); } /** diff --git a/include/trace/events/power.h b/include/trace/events/power.h index c4efe9b8280d..35a2a6e7bf1e 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -18,52 +18,55 @@ enum { DECLARE_EVENT_CLASS(power, - TP_PROTO(unsigned int type, unsigned int state), + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), - TP_ARGS(type, state), + TP_ARGS(type, state, cpu_id), TP_STRUCT__entry( __field( u64, type ) __field( u64, state ) + __field( u64, cpu_id ) ), TP_fast_assign( __entry->type = type; __entry->state = state; + __entry->cpu_id = cpu_id; ), - TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) + TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type, + (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) ); DEFINE_EVENT(power, power_start, - TP_PROTO(unsigned int type, unsigned int state), + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), - TP_ARGS(type, state) + TP_ARGS(type, state, cpu_id) ); DEFINE_EVENT(power, power_frequency, - TP_PROTO(unsigned int type, unsigned int state), + TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), - TP_ARGS(type, state) + TP_ARGS(type, state, cpu_id) ); TRACE_EVENT(power_end, - TP_PROTO(int dummy), + TP_PROTO(unsigned int cpu_id), - TP_ARGS(dummy), + TP_ARGS(cpu_id), TP_STRUCT__entry( - __field( u64, dummy ) + __field( u64, cpu_id ) ), TP_fast_assign( - __entry->dummy = 0xffff; + __entry->cpu_id = cpu_id; ), - TP_printk("dummy=%lu", (unsigned long)__entry->dummy) + TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id) ); diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 5a52ed9fc10b..5161619d4714 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -300,8 +300,9 @@ struct trace_entry { struct power_entry { struct trace_entry te; - s64 type; - s64 value; + u64 type; + u64 value; + u64 cpu_id; }; #define TASK_COMM_LEN 16 @@ -498,13 +499,13 @@ static int process_sample_event(event_t *event, struct perf_session *session) return 0; if (strcmp(event_str, "power:power_start") == 0) - c_state_start(data.cpu, data.time, pe->value); + c_state_start(pe->cpu_id, data.time, pe->value); if (strcmp(event_str, "power:power_end") == 0) - c_state_end(data.cpu, data.time); + c_state_end(pe->cpu_id, data.time); if (strcmp(event_str, "power:power_frequency") == 0) - p_state_change(data.cpu, data.time, pe->value); + p_state_change(pe->cpu_id, data.time, pe->value); if (strcmp(event_str, "sched:sched_wakeup") == 0) sched_wakeup(data.cpu, data.time, data.pid, te); -- cgit v1.2.3 From f9599ce11192f24dbb0f4fdb70121a05edc58342 Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Wed, 4 Aug 2010 04:43:44 +0000 Subject: sk_buff: introduce pskb_network_may_pull() Signed-off-by: Changli Gao Signed-off-by: David S. Miller --- include/linux/skbuff.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d20d9e7a9bbd..77eb60d2b496 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1379,6 +1379,11 @@ static inline int skb_network_offset(const struct sk_buff *skb) return skb_network_header(skb) - skb->data; } +static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull(skb, skb_network_offset(skb) + len); +} + /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the -- cgit v1.2.3 From d7100da026317fcf07411f765fe1cdb044053917 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 4 Aug 2010 07:34:36 +0000 Subject: ppp: make channel_ops const The PPP channel ops structure should be const. Cleanup the declarations to use standard C99 format. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/char/pcmcia/ipwireless/network.c | 2 +- drivers/net/ppp_async.c | 6 +++--- drivers/net/ppp_synctty.c | 6 +++--- drivers/net/pppoe.c | 4 ++-- include/linux/ppp_channel.h | 2 +- net/atm/pppoatm.c | 2 +- net/irda/irnet/irnet_ppp.c | 2 +- net/l2tp/l2tp_ppp.c | 5 ++++- 8 files changed, 16 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index 65920163f53d..9fe538347932 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c @@ -239,7 +239,7 @@ static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel, return err; } -static struct ppp_channel_ops ipwireless_ppp_channel_ops = { +static const struct ppp_channel_ops ipwireless_ppp_channel_ops = { .start_xmit = ipwireless_ppp_start_xmit, .ioctl = ipwireless_ppp_ioctl }; diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 6c2e8fa0ca31..af50a530daee 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -108,9 +108,9 @@ static void ppp_async_process(unsigned long arg); static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, int len, int inbound); -static struct ppp_channel_ops async_ops = { - ppp_async_send, - ppp_async_ioctl +static const struct ppp_channel_ops async_ops = { + .start_xmit = ppp_async_send, + .ioctl = ppp_async_ioctl, }; /* diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 52938da1e542..4c95ec3fb8d4 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -97,9 +97,9 @@ static void ppp_sync_flush_output(struct syncppp *ap); static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf, char *flags, int count); -static struct ppp_channel_ops sync_ops = { - ppp_sync_send, - ppp_sync_ioctl +static const struct ppp_channel_ops sync_ops = { + .start_xmit = ppp_sync_send, + .ioctl = ppp_sync_ioctl, }; /* diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 344ef330e123..c07de359dc07 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -92,7 +92,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; -static struct ppp_channel_ops pppoe_chan_ops; +static const struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ static int pppoe_net_id __read_mostly; @@ -963,7 +963,7 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) return __pppoe_xmit(sk, skb); } -static struct ppp_channel_ops pppoe_chan_ops = { +static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index bff98ec1bfed..5d87f810a3b7 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -36,7 +36,7 @@ struct ppp_channel_ops { struct ppp_channel { void *private; /* channel private data */ - struct ppp_channel_ops *ops; /* operations for this channel */ + const struct ppp_channel_ops *ops; /* operations for this channel */ int mtu; /* max transmit packet size */ int hdrlen; /* amount of headroom channel needs */ void *ppp; /* opaque to channel */ diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index e49bb6d948a1..e9aced0ec56b 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -260,7 +260,7 @@ static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd, return -ENOTTY; } -static /*const*/ struct ppp_channel_ops pppoatm_ops = { +static const struct ppp_channel_ops pppoatm_ops = { .start_xmit = pppoatm_send, .ioctl = pppoatm_devppp_ioctl, }; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 800bc53b7f63..dfe7b38dd4af 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -20,7 +20,7 @@ /* Please put other headers in irnet.h - Thanks */ /* Generic PPP callbacks (to call us) */ -static struct ppp_channel_ops irnet_ppp_ops = { +static const struct ppp_channel_ops irnet_ppp_ops = { .start_xmit = ppp_irnet_send, .ioctl = ppp_irnet_ioctl }; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 90d82b3f2889..ff954b3e94b6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -135,7 +135,10 @@ struct pppol2tp_session { static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); -static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; +static const struct ppp_channel_ops pppol2tp_chan_ops = { + .start_xmit = pppol2tp_xmit, +}; + static const struct proto_ops pppol2tp_ops; /* Helpers to obtain tunnel/session contexts from sockets. -- cgit v1.2.3 From fa235562fbde8703aabeeedfa0772f08608d1542 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Thu, 5 Aug 2010 15:54:20 -0700 Subject: Bluetooth: Change default L2CAP ERTM retransmit timeout The L2CAP specification requires that the ERTM retransmit timeout be at least 2 seconds for BR/EDR connections. Signed-off-by: Mat Martineau Signed-off-by: Marcel Holtmann --- include/net/bluetooth/l2cap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 636724b203ee..16e412f3722b 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -33,7 +33,7 @@ #define L2CAP_DEFAULT_FLUSH_TO 0xffff #define L2CAP_DEFAULT_TX_WINDOW 63 #define L2CAP_DEFAULT_MAX_TX 3 -#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */ +#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 672 #define L2CAP_DEFAULT_ACK_TO 200 -- cgit v1.2.3 From db12d647ccc971ed120199dc7ea5be2b5887d328 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Thu, 5 Aug 2010 15:54:27 -0700 Subject: Bluetooth: Use 3-DH5 payload size for default ERTM max PDU size The previous value of 672 for L2CAP_DEFAULT_MAX_PDU_SIZE is based on the default L2CAP MTU. That default MTU is calculated from the size of two DH5 packets, minus ACL and L2CAP b-frame header overhead. ERTM is used with newer basebands that typically support larger 3-DH5 packets, and i-frames and s-frames have more header overhead. With clean RF conditions, basebands will typically attempt to use 1021-byte 3-DH5 packets for maximum throughput. Adjusting for 2 bytes of ACL headers plus 10 bytes of worst-case L2CAP headers yields 1009 bytes of payload. This PDU size imposes less overhead for header bytes and gives the baseband the option to choose 3-DH5 packets, but is small enough for ERTM traffic to interleave well with other L2CAP or SCO data. 672-byte payloads do not allow the most efficient over-the-air packet choice, and cannot achieve maximum throughput over BR/EDR. Signed-off-by: Mat Martineau Signed-off-by: Marcel Holtmann --- include/net/bluetooth/l2cap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 16e412f3722b..6c241444f902 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -35,7 +35,7 @@ #define L2CAP_DEFAULT_MAX_TX 3 #define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ -#define L2CAP_DEFAULT_MAX_PDU_SIZE 672 +#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_LOCAL_BUSY_TRIES 12 -- cgit v1.2.3