From c6e8c6ccf96e9249805d0e9828b994f4c926ad51 Mon Sep 17 00:00:00 2001 From: KaiGai Kohei Date: Thu, 29 Jun 2006 15:33:02 +0100 Subject: [JFFS2][XATTR] Fix xd->refcnt race condition When xd->refcnt is checked whether this xdatum should be released or not, atomic_dec_and_lock() is used to ensure holding the c->erase_completion_lock. This fix change a specification of delete_xattr_datum(). Previously, it's only called when xd->refcnt equals zero. (calling it with positive xd->refcnt cause a BUG()) If you applied this patch, the function checks whether xd->refcnt is zero or not under the spinlock if necessary. Then, it marks xd DEAD flahs and links with xattr_dead_list or releases it immediately when xd->refcnt become zero. Signed-off-by: KaiGai Kohei Signed-off-by: David Woodhouse --- fs/jffs2/xattr.c | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 18e66dbf23b4..25bc1ae08648 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -50,9 +50,10 @@ * is used to write xdatum to medium. xd->version will be incremented. * create_xattr_datum(c, xprefix, xname, xvalue, xsize) * is used to create new xdatum and write to medium. - * delete_xattr_datum(c, xd) - * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows - * GC to reclaim those physical nodes. + * unrefer_xattr_datum(c, xd) + * is used to delete a xdatum. When nobody refers this xdatum, JFFS2_XFLAGS_DEAD + * is set on xd->flags and chained xattr_dead_list or release it immediately. + * In the first case, the garbage collector release it later. * -------------------------------------------------- */ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) { @@ -394,22 +395,24 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, return xd; } -static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) { /* must be called under down_write(xattr_sem) */ - BUG_ON(atomic_read(&xd->refcnt)); + if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) { + uint32_t xid = xd->xid, version = xd->version; - unload_xattr_datum(c, xd); - xd->flags |= JFFS2_XFLAGS_DEAD; - spin_lock(&c->erase_completion_lock); - if (xd->node == (void *)xd) { - BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); - jffs2_free_xattr_datum(xd); - } else { - list_add(&xd->xindex, &c->xattr_dead_list); + unload_xattr_datum(c, xd); + xd->flags |= JFFS2_XFLAGS_DEAD; + if (xd->node == (void *)xd) { + BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); + jffs2_free_xattr_datum(xd); + } else { + list_add(&xd->xindex, &c->xattr_dead_list); + } + spin_unlock(&c->erase_completion_lock); + + dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xid, version); } - spin_unlock(&c->erase_completion_lock); - dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version); } /* -------- xref related functions ------------------ @@ -580,8 +583,7 @@ static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *re dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n", ref->ino, ref->xid, ref->xseqno); - if (atomic_dec_and_test(&xd->refcnt)) - delete_xattr_datum(c, xd); + unrefer_xattr_datum(c, xd); } void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) @@ -1119,8 +1121,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, ref->next = c->xref_dead_list; c->xref_dead_list = ref; spin_unlock(&c->erase_completion_lock); - if (atomic_dec_and_test(&xd->refcnt)) - delete_xattr_datum(c, xd); + unrefer_xattr_datum(c, xd); } else { ref->ic = ic; ref->xd = xd; @@ -1156,8 +1157,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, down_write(&c->xattr_sem); if (rc) { JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); - if (atomic_dec_and_test(&xd->refcnt)) - delete_xattr_datum(c, xd); + unrefer_xattr_datum(c, xd); up_write(&c->xattr_sem); return rc; } @@ -1170,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, ic->xref = ref; } rc = PTR_ERR(newref); - if (atomic_dec_and_test(&xd->refcnt)) - delete_xattr_datum(c, xd); + unrefer_xattr_datum(c, xd); } else if (ref) { delete_xattr_ref(c, ref); } -- cgit v1.2.3 From c05d52c748da10a3f27f6e638875514153776b15 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Thu, 22 Jun 2006 12:03:35 +0200 Subject: fs/jffs2/: make 2 functions static This patch makes two needlessly global functions static. Signed-off-by: Adrian Bunk Signed-off-by: David Woodhouse --- fs/jffs2/malloc.c | 2 +- fs/jffs2/nodelist.h | 2 -- fs/jffs2/scan.c | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 8310c95478e9..33f291005012 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c @@ -190,7 +190,7 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) kmem_cache_free(tmp_dnode_info_slab, x); } -struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) +static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) { struct jffs2_raw_node_ref *ret; diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index b16c60bbcf6e..be6d4a4dd8e0 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h @@ -427,8 +427,6 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); /* scan.c */ int jffs2_scan_medium(struct jffs2_sb_info *c); void jffs2_rotate_lists(struct jffs2_sb_info *c); -int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, - uint32_t ofs, uint32_t len); struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size); diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 2bfdc33752d3..e2413466ddd5 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -274,8 +274,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) return ret; } -int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf, - uint32_t ofs, uint32_t len) +static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, + uint32_t ofs, uint32_t len) { int ret; size_t retlen; -- cgit v1.2.3 From c7afb0f97700e73109564f83c35bfeeb14cb653b Mon Sep 17 00:00:00 2001 From: KaiGai Kohei Date: Sun, 2 Jul 2006 15:13:46 +0100 Subject: [JFFS2][XATTR] Fix memory leak in POSIX-ACL support jffs2_clear_acl() which releases acl caches allocated by kmalloc() was defined but it was never called. Thus, we faced to the risk of memory leaking. This patch plugs jffs2_clear_acl() into jffs2_do_clear_inode(). It ensures to release acl cache when inode is cleared. Signed-off-by: KaiGai Kohei Signed-off-by: David Woodhouse --- fs/jffs2/acl.c | 4 +--- fs/jffs2/acl.h | 4 ++-- fs/jffs2/readinode.c | 1 + 3 files changed, 4 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 9c2077e7e081..0ae3cd10702c 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -345,10 +345,8 @@ int jffs2_init_acl(struct inode *inode, struct inode *dir) return rc; } -void jffs2_clear_acl(struct inode *inode) +void jffs2_clear_acl(struct jffs2_inode_info *f) { - struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) { posix_acl_release(f->i_acl_access); f->i_acl_access = JFFS2_ACL_NOT_CACHED; diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h index 8893bd1a6ba7..fa327dbd3171 100644 --- a/fs/jffs2/acl.h +++ b/fs/jffs2/acl.h @@ -30,7 +30,7 @@ struct jffs2_acl_header { extern int jffs2_permission(struct inode *, int, struct nameidata *); extern int jffs2_acl_chmod(struct inode *); extern int jffs2_init_acl(struct inode *, struct inode *); -extern void jffs2_clear_acl(struct inode *); +extern void jffs2_clear_acl(struct jffs2_inode_info *); extern struct xattr_handler jffs2_acl_access_xattr_handler; extern struct xattr_handler jffs2_acl_default_xattr_handler; @@ -40,6 +40,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler; #define jffs2_permission NULL #define jffs2_acl_chmod(inode) (0) #define jffs2_init_acl(inode,dir) (0) -#define jffs2_clear_acl(inode) +#define jffs2_clear_acl(f) #endif /* CONFIG_JFFS2_FS_POSIX_ACL */ diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index cc1899268c43..266423b2709d 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) struct jffs2_full_dirent *fd, *fds; int deleted; + jffs2_clear_acl(f); jffs2_xattr_delete_inode(c, f->inocache); down(&f->sem); deleted = f->inocache && !f->inocache->nlink; -- cgit v1.2.3 From ce51059be56f63762089412b3ece348067afda85 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert <76306.1226@compuserve.com> Date: Mon, 3 Jul 2006 00:24:14 -0700 Subject: [PATCH] binfmt_elf: fix checks for bad address Fix check for bad address; use macro instead of open-coding two checks. Taken from RHEL4 kernel update. From: Ernie Petrides For background, the BAD_ADDR() macro should return TRUE if the address is TASK_SIZE, because that's the lowest address that is *not* valid for user-space mappings. The macro was correct in binfmt_aout.c but was wrong for the "equal to" case in binfmt_elf.c. There were two in-line validations of user-space addresses in binfmt_elf.c, which have been appropriately converted to use the corrected BAD_ADDR() macro in the patch you posted yesterday. Note that the size checks against TASK_SIZE are okay as coded. The additional changes that I propose are below. These are in the error paths for bad ELF entry addresses once load_elf_binary() has already committed to exec'ing the new image (following the tearing down of the task's original address space). The 1st hunk deals with the interp-side of the outer "if". There were two problems here. The printk() should be removed because this path can be triggered at will by a bogus interpreter image created and used by a malicious user. Further, the error code should not be ENOEXEC, because that causes the loop in search_binary_handler() to continue trying other exec handlers (twice, in fact). But it's too late for this to work correctly, because the user address space has already been torn down, and an exec() failure cannot be returned to the user code because the code no longer exists. The only recovery is to force a SIGSEGV, but it's best to terminate the search loop immediately. I somewhat arbitrarily chose EINVAL as a fallback error code, but any error returned by load_elf_interp() will override that (but this value will never be seen by user-space). The 2nd hunk deals with the non-interp-side of the outer "if". There were two problems here as well. The SIGSEGV needs to be forced, because a prior sigaction() syscall might have set the associated disposition to SIG_IGN. And the ENOEXEC should be changed to EINVAL as described above. Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com> Signed-off-by: Ernie Petrides Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index d0434406eaeb..f42e64210ee5 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -84,7 +84,7 @@ static struct linux_binfmt elf_format = { .min_coredump = ELF_EXEC_PAGESIZE }; -#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE) +#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) static int set_brk(unsigned long start, unsigned long end) { @@ -394,7 +394,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, * <= p_memsize so it's only necessary to check p_memsz. */ k = load_addr + eppnt->p_vaddr; - if (k > TASK_SIZE || + if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz || eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) { @@ -887,7 +887,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ - if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || + if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || elf_ppnt->p_memsz > TASK_SIZE || TASK_SIZE - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ @@ -941,10 +941,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) interpreter, &interp_load_addr); if (BAD_ADDR(elf_entry)) { - printk(KERN_ERR "Unable to load interpreter %.128s\n", - elf_interpreter); force_sig(SIGSEGV, current); - retval = -ENOEXEC; /* Nobody gets to see this, but.. */ + retval = IS_ERR((void *)elf_entry) ? + (int)elf_entry : -EINVAL; goto out_free_dentry; } reloc_func_desc = interp_load_addr; @@ -955,8 +954,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) } else { elf_entry = loc->elf_ex.e_entry; if (BAD_ADDR(elf_entry)) { - send_sig(SIGSEGV, current, 0); - retval = -ENOEXEC; /* Nobody gets to see this, but.. */ + force_sig(SIGSEGV, current); + retval = -EINVAL; goto out_free_dentry; } } -- cgit v1.2.3 From e4d919188554a77c798a267e098059bc9aa39726 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:34 -0700 Subject: [PATCH] lockdep: locking init debugging improvement Locking init improvement: - introduce and use __SPIN_LOCK_UNLOCKED for array initializations, to pass in the name string of locks, used by debugging Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/random.c | 6 +++--- fs/dcache.c | 2 +- include/linux/idr.h | 2 +- include/linux/init_task.h | 10 +++++----- include/linux/notifier.h | 2 +- include/linux/seqlock.h | 12 ++++++++++-- include/linux/spinlock_types.h | 15 +++++++++------ include/linux/wait.h | 2 +- kernel/rcupdate.c | 4 ++-- kernel/timer.c | 2 +- mm/swap_state.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/tcp_minisocks.c | 2 +- 13 files changed, 37 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/drivers/char/random.c b/drivers/char/random.c index 164bddae047f..4c3a5ca9d8f7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -416,7 +416,7 @@ static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], .name = "input", .limit = 1, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), .pool = input_pool_data }; @@ -425,7 +425,7 @@ static struct entropy_store blocking_pool = { .name = "blocking", .limit = 1, .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), .pool = blocking_pool_data }; @@ -433,7 +433,7 @@ static struct entropy_store nonblocking_pool = { .poolinfo = &poolinfo_table[1], .name = "nonblocking", .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), .pool = nonblocking_pool_data }; diff --git a/fs/dcache.c b/fs/dcache.c index c6e3535be192..bec4de176c81 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -38,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); -static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(dcache_lock); diff --git a/include/linux/idr.h b/include/linux/idr.h index f559a719dbe8..826803449db7 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -66,7 +66,7 @@ struct idr { .id_free = NULL, \ .layers = 0, \ .id_free_cnt = 0, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 678c1a90380d..1b7bb37624bb 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -21,7 +21,7 @@ .count = ATOMIC_INIT(1), \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ @@ -36,7 +36,7 @@ .user_id = 0, \ .next = NULL, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = SPIN_LOCK_UNLOCKED, \ + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ .reqs_active = 0U, \ .max_reqs = ~0U, \ } @@ -48,7 +48,7 @@ .mm_users = ATOMIC_INIT(2), \ .mm_count = ATOMIC_INIT(1), \ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = SPIN_LOCK_UNLOCKED, \ + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ } @@ -69,7 +69,7 @@ #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ - .siglock = SPIN_LOCK_UNLOCKED, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ } extern struct group_info init_groups; @@ -119,7 +119,7 @@ extern struct group_info init_groups; .list = LIST_HEAD_INIT(tsk.pending.list), \ .signal = {{0}}}, \ .blocked = {{0}}, \ - .alloc_lock = SPIN_LOCK_UNLOCKED, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 51dbab9710c7..7ff386a6ae87 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -65,7 +65,7 @@ struct raw_notifier_head { } while (0) #define ATOMIC_NOTIFIER_INIT(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7bc5c7c12b54..46000936f8f1 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,9 +38,17 @@ typedef struct { * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } -#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define SEQLOCK_UNLOCKED \ + __SEQLOCK_UNLOCKED(old_style_seqlock_init) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 9cb51e070390..f5d4ed7bc785 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -44,24 +44,27 @@ typedef struct { #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } #else -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } #endif -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 544e855c7c02..bc4f389c49bb 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -68,7 +68,7 @@ struct task_struct; wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f464f5ae3f11..759805c9859a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -53,13 +53,13 @@ static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; diff --git a/kernel/timer.c b/kernel/timer.c index 5a8960253063..4dd9a10d67d0 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1208,7 +1208,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES; * playing with xtime and avenrun. */ #ifndef ARCH_HAVE_XTIME_LOCK -seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); EXPORT_SYMBOL(xtime_lock); #endif diff --git a/mm/swap_state.c b/mm/swap_state.c index fccbd9bba77b..5f7cf2a4cb55 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -38,7 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = { struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = RW_LOCK_UNLOCKED, + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa95..823717285c6d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -90,7 +90,7 @@ static struct socket *tcp_socket; void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { - .lhash_lock = RW_LOCK_UNLOCKED, + .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), .lhash_users = ATOMIC_INIT(0), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), }; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5e..0ccb7cb22b15 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow; struct inet_timewait_death_row tcp_death_row = { .sysctl_max_tw_buckets = NR_FILE * 2, .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, - .death_lock = SPIN_LOCK_UNLOCKED, + .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), .hashinfo = &tcp_hashinfo, .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), -- cgit v1.2.3 From d8aa905b429700e8b6b6b301a8ac2d4a24f2c19b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:02 -0700 Subject: [PATCH] lockdep: annotate direct io Teach special (rwsem-in-irq) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/direct-io.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/direct-io.c b/fs/direct-io.c index 538fb0418fba..5981e17f46f0 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -220,7 +220,8 @@ static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) if (dio->end_io && dio->result) dio->end_io(dio->iocb, offset, bytes, dio->map_bh.b_private); if (dio->lock_type == DIO_LOCKING) - up_read(&dio->inode->i_alloc_sem); + /* lockdep: non-owner release */ + up_read_non_owner(&dio->inode->i_alloc_sem); } /* @@ -1261,7 +1262,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } if (dio_lock_type == DIO_LOCKING) - down_read(&inode->i_alloc_sem); + /* lockdep: not the owner will release it */ + down_read_non_owner(&inode->i_alloc_sem); } /* -- cgit v1.2.3 From a90b9c05df3c1e58eaedc28795d0f5abd896c098 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:04 -0700 Subject: [PATCH] lockdep: annotate dcache Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dcache.c | 4 ++-- include/linux/dcache.h | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index bec4de176c81..1b4a3a34ec57 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1339,10 +1339,10 @@ void d_move(struct dentry * dentry, struct dentry * target) */ if (target < dentry) { spin_lock(&target->d_lock); - spin_lock(&dentry->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); } else { spin_lock(&dentry->d_lock); - spin_lock(&target->d_lock); + spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); } /* Move the dentry to the target hash queue, if on different bucket */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 0dd1610a94a9..471781ffeab1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -114,6 +114,18 @@ struct dentry { unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; +/* + * dentry->d_lock spinlock nesting subclasses: + * + * 0: normal + * 1: nested + */ +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ + DENTRY_D_LOCK_NESTED +}; + struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); -- cgit v1.2.3 From f2eace23e924bd3f05aedea4fc505eb5508d2d93 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:05 -0700 Subject: [PATCH] lockdep: annotate i_mutex Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/usb/core/inode.c | 2 +- fs/namei.c | 20 ++++++++++---------- include/linux/fs.h | 19 +++++++++++++++++++ 3 files changed, 30 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index e47e3a8ed6e4..cf11196e012d 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -200,7 +200,7 @@ static void update_sb(struct super_block *sb) if (!root) return; - mutex_lock(&root->d_inode->i_mutex); + mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_PARENT); list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) { if (bus->d_inode) { diff --git a/fs/namei.c b/fs/namei.c index c784e8bb57a3..c9750d755aff 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1423,7 +1423,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) struct dentry *p; if (p1 == p2) { - mutex_lock(&p1->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } @@ -1431,22 +1431,22 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { - mutex_lock(&p2->d_inode->i_mutex); - mutex_lock(&p1->d_inode->i_mutex); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } } for (p = p2; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p1) { - mutex_lock(&p1->d_inode->i_mutex); - mutex_lock(&p2->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } } - mutex_lock(&p1->d_inode->i_mutex); - mutex_lock(&p2->d_inode->i_mutex); + mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); + mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return NULL; } @@ -1751,7 +1751,7 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); - mutex_lock(&nd->dentry->d_inode->i_mutex); + mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) @@ -2008,7 +2008,7 @@ static long do_rmdir(int dfd, const char __user *pathname) error = -EBUSY; goto exit1; } - mutex_lock(&nd.dentry->d_inode->i_mutex); + mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { @@ -2082,7 +2082,7 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; - mutex_lock(&nd.dentry->d_inode->i_mutex); + mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { diff --git a/include/linux/fs.h b/include/linux/fs.h index e04a5cfe874f..05ded9e76b23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -542,6 +542,25 @@ struct inode { #endif }; +/* + * inode->i_mutex nesting subclasses for the lock validator: + * + * 0: the object of the current VFS operation + * 1: parent + * 2: child/target + * 3: quota file + * + * The locking order between these classes is + * parent -> child -> normal -> quota + */ +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_QUOTA +}; + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic -- cgit v1.2.3 From 59345374742ee6673c2d04b0fa8c888e881b7209 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:18 -0700 Subject: [PATCH] lockdep: annotate NTFS locking rules NTFS uses lots of type-opaque objects which acquire their true identity runtime - so the lock validator needs to be helped in a couple of places to figure out object types. Many thanks to Anton Altaparmakov for giving lots of explanations about NTFS locking rules. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Anton Altaparmakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ntfs/inode.c | 33 +++++++++++++++++++++++++++++++++ fs/ntfs/super.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) (limited to 'fs') diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 4c86b7e1d1eb..d313f356e66a 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -367,6 +367,12 @@ static void ntfs_destroy_extent_inode(ntfs_inode *ni) kmem_cache_free(ntfs_inode_cache, ni); } +/* + * The attribute runlist lock has separate locking rules from the + * normal runlist lock, so split the two lock-classes: + */ +static struct lock_class_key attr_list_rl_lock_class; + /** * __ntfs_init_inode - initialize ntfs specific part of an inode * @sb: super block of mounted volume @@ -394,6 +400,8 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) ni->attr_list_size = 0; ni->attr_list = NULL; ntfs_init_runlist(&ni->attr_list_rl); + lockdep_set_class(&ni->attr_list_rl.lock, + &attr_list_rl_lock_class); ni->itype.index.bmp_ino = NULL; ni->itype.index.block_size = 0; ni->itype.index.vcn_size = 0; @@ -405,6 +413,13 @@ void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) ni->ext.base_ntfs_ino = NULL; } +/* + * Extent inodes get MFT-mapped in a nested way, while the base inode + * is still mapped. Teach this nesting to the lock validator by creating + * a separate class for nested inode's mrec_lock's: + */ +static struct lock_class_key extent_inode_mrec_lock_key; + inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb, unsigned long mft_no) { @@ -413,6 +428,7 @@ inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb, ntfs_debug("Entering."); if (likely(ni != NULL)) { __ntfs_init_inode(sb, ni); + lockdep_set_class(&ni->mrec_lock, &extent_inode_mrec_lock_key); ni->mft_no = mft_no; ni->type = AT_UNUSED; ni->name = NULL; @@ -1722,6 +1738,15 @@ err_out: return err; } +/* + * The MFT inode has special locking, so teach the lock validator + * about this by splitting off the locking rules of the MFT from + * the locking rules of other inodes. The MFT inode can never be + * accessed from the VFS side (or even internally), only by the + * map_mft functions. + */ +static struct lock_class_key mft_ni_runlist_lock_key, mft_ni_mrec_lock_key; + /** * ntfs_read_inode_mount - special read_inode for mount time use only * @vi: inode to read @@ -2148,6 +2173,14 @@ int ntfs_read_inode_mount(struct inode *vi) ntfs_attr_put_search_ctx(ctx); ntfs_debug("Done."); ntfs_free(m); + + /* + * Split the locking rules of the MFT inode from the + * locking rules of other inodes: + */ + lockdep_set_class(&ni->runlist.lock, &mft_ni_runlist_lock_key); + lockdep_set_class(&ni->mrec_lock, &mft_ni_mrec_lock_key); + return 0; em_put_err_out: diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 0e14acea3f8b..74e0ee8fce72 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -1724,6 +1724,14 @@ upcase_failed: return FALSE; } +/* + * The lcn and mft bitmap inodes are NTFS-internal inodes with + * their own special locking rules: + */ +static struct lock_class_key + lcnbmp_runlist_lock_key, lcnbmp_mrec_lock_key, + mftbmp_runlist_lock_key, mftbmp_mrec_lock_key; + /** * load_system_files - open the system files using normal functions * @vol: ntfs super block describing device whose system files to load @@ -1780,6 +1788,10 @@ static BOOL load_system_files(ntfs_volume *vol) ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute."); goto iput_mirr_err_out; } + lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->runlist.lock, + &mftbmp_runlist_lock_key); + lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->mrec_lock, + &mftbmp_mrec_lock_key); /* Read upcase table and setup @vol->upcase and @vol->upcase_len. */ if (!load_and_init_upcase(vol)) goto iput_mftbmp_err_out; @@ -1802,6 +1814,11 @@ static BOOL load_system_files(ntfs_volume *vol) iput(vol->lcnbmp_ino); goto bitmap_failed; } + lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->runlist.lock, + &lcnbmp_runlist_lock_key); + lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->mrec_lock, + &lcnbmp_mrec_lock_key); + NInoSetSparseDisabled(NTFS_I(vol->lcnbmp_ino)); if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) { iput(vol->lcnbmp_ino); @@ -2743,6 +2760,17 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) struct inode *tmp_ino; int blocksize, result; + /* + * We do a pretty difficult piece of bootstrap by reading the + * MFT (and other metadata) from disk into memory. We'll only + * release this metadata during umount, so the locking patterns + * observed during bootstrap do not count. So turn off the + * observation of locking patterns (strictly for this context + * only) while mounting NTFS. [The validator is still active + * otherwise, even for this context: it will for example record + * lock class registrations.] + */ + lockdep_off(); ntfs_debug("Entering."); #ifndef NTFS_RW sb->s_flags |= MS_RDONLY; @@ -2754,6 +2782,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) if (!silent) ntfs_error(sb, "Allocation of NTFS volume structure " "failed. Aborting mount..."); + lockdep_on(); return -ENOMEM; } /* Initialize ntfs_volume structure. */ @@ -2940,6 +2969,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) mutex_unlock(&ntfs_lock); sb->s_export_op = &ntfs_export_ops; lock_kernel(); + lockdep_on(); return 0; } ntfs_error(sb, "Failed to allocate root directory."); @@ -3059,6 +3089,7 @@ err_out_now: sb->s_fs_info = NULL; kfree(vol); ntfs_debug("Failed, returning -EINVAL."); + lockdep_on(); return -EINVAL; } -- cgit v1.2.3 From 5c81a4197de38411fe3e27f8593fff73a5d6b868 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 3 Jul 2006 00:25:20 -0700 Subject: [PATCH] lockdep: annotate the quota code The quota code plays interesting games with the lock ordering; to quote Jan: | i_mutex of inode containing quota file is acquired after all other | quota locks. i_mutex of all other inodes is acquired before quota | locks. Quota code makes sure (by resetting inode operations and | setting special flag on inode) that noone tries to enter quota code | while holding i_mutex on a quota file... The good news is that all of this special case i_mutex grabbing happens in the (per filesystem) low level quota write function. For this special case we need a new I_MUTEX_* nesting level, since this just entirely outside any of the regular VFS locking rules for i_mutex. I trust Jan on his blue eyes that this is not ever going to deadlock; and based on that the patch below is what it takes to inform lockdep of these very interesting new locking rules. The new locking rule for the I_MUTEX_QUOTA nesting level is that this is the deepest possible level of nesting for i_mutex, and that this only should be used in quota write (and possibly read) function of filesystems. This makes the lock ordering of the I_MUTEX_* levels: I_MUTEX_PARENT -> I_MUTEX_CHILD -> I_MUTEX_NORMAL -> I_MUTEX_QUOTA Has no effect on non-lockdep kernels. Signed-off-by: Arjan van de Ven Acked-by: Ingo Molnar Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/super.c | 2 +- fs/ext3/super.c | 2 +- fs/reiserfs/super.c | 2 +- fs/ufs/super.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9f43879d6d68..f2702cda9779 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -1157,7 +1157,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, struct buffer_head tmp_bh; struct buffer_head *bh; - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index f2dd71336612..813d589cc6c0 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -2614,7 +2614,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type, struct buffer_head *bh; handle_t *handle = journal_current_handle(); - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 28eb3c886034..5567328f1041 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -2203,7 +2203,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, size_t towrite = len; struct buffer_head tmp_bh, *bh; - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 19a99726e58d..992ee0b87cc3 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1326,7 +1326,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type, size_t towrite = len; struct buffer_head *bh; - mutex_lock(&inode->i_mutex); + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; -- cgit v1.2.3 From cf51624999e56c88154b5f7d451a265db6aabff7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:27 -0700 Subject: [PATCH] lockdep: annotate ->s_lock Teach special (per-filesystem) locking code to the lock validator. Minimal effect on non-lockdep kernels: one extra parameter to alloc_super(). Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 10 ++++++++-- include/linux/fs.h | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 9b780c42d845..5a4fe8be462a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -53,7 +53,7 @@ DEFINE_SPINLOCK(sb_lock); * Allocates and initializes a new &struct super_block. alloc_super() * returns a pointer new superblock or %NULL if allocation had failed. */ -static struct super_block *alloc_super(void) +static struct super_block *alloc_super(struct file_system_type *type) { struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); static struct super_operations default_op; @@ -72,6 +72,12 @@ static struct super_block *alloc_super(void) INIT_LIST_HEAD(&s->s_inodes); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); + /* + * The locking rules for s_lock are up to the + * filesystem. For example ext3fs has different + * lock ordering than usbfs: + */ + lockdep_set_class(&s->s_lock, &type->s_lock_key); down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); @@ -295,7 +301,7 @@ retry: } if (!s) { spin_unlock(&sb_lock); - s = alloc_super(); + s = alloc_super(type); if (!s) return ERR_PTR(-ENOMEM); goto retry; diff --git a/include/linux/fs.h b/include/linux/fs.h index 05ded9e76b23..0a3ea52d711e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1295,6 +1295,7 @@ struct file_system_type { struct module *owner; struct file_system_type * next; struct list_head fs_supers; + struct lock_class_key s_lock_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, -- cgit v1.2.3 From 897c6ff9568bcb102ffc6b465ebe1def0cba829d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 3 Jul 2006 00:25:28 -0700 Subject: [PATCH] lockdep: annotate sb ->s_umount The s_umount rwsem needs to be classified as per-superblock since it's perfectly legit to keep multiple of those recursively in the VFS locking rules. Has no effect on non-lockdep kernels. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 1 + include/linux/fs.h | 1 + 2 files changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 5a4fe8be462a..6d4e8174b6db 100644 --- a/fs/super.c +++ b/fs/super.c @@ -72,6 +72,7 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_LIST_HEAD(&s->s_inodes); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); + lockdep_set_class(&s->s_umount, &type->s_umount_key); /* * The locking rules for s_lock are up to the * filesystem. For example ext3fs has different diff --git a/include/linux/fs.h b/include/linux/fs.h index 0a3ea52d711e..e26de68059af 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1296,6 +1296,7 @@ struct file_system_type { struct file_system_type * next; struct list_head fs_supers; struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, -- cgit v1.2.3 From 663d440eaa496db903cc58be04b9b602ba45e43b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:33 -0700 Subject: [PATCH] lockdep: annotate blkdev nesting Teach special (recursive) locking code to the lock validator. Effects on non-lockdep kernels: - the introduction of the following function variants: extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern int blkdev_put_partition(struct block_device *); static int blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); which on non-lockdep are the same as open_by_devnum(), blkdev_put() and blkdev_get(). - a subclass parameter to do_open(). [unused on non-lockdep] - a subclass parameter to __blkdev_put(), which is a new internal function for the main blkdev_put*() functions. [parameter unused on non-lockdep kernels, except for two sanity check WARN_ON()s] these functions carry no semantical difference - they only express object dependencies towards the lockdep subsystem. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 6 ++-- fs/block_dev.c | 102 +++++++++++++++++++++++++++++++++++++++++++++-------- include/linux/fs.h | 17 +++++++++ 3 files changed, 107 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2fe32c261922..e4e161372a3e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1404,7 +1404,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE); if (IS_ERR(bdev)) { printk(KERN_ERR "md: could not open %s.\n", __bdevname(dev, b)); @@ -1414,7 +1414,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) if (err) { printk(KERN_ERR "md: could not bd_claim %s.\n", bdevname(bdev, b)); - blkdev_put(bdev); + blkdev_put_partition(bdev); return err; } rdev->bdev = bdev; @@ -1428,7 +1428,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) if (!bdev) MD_BUG(); bd_release(bdev); - blkdev_put(bdev); + blkdev_put_partition(bdev); } void md_autodetect_dev(dev_t dev); diff --git a/fs/block_dev.c b/fs/block_dev.c index 9633a490dab0..37534573960b 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -739,7 +739,7 @@ static int bd_claim_by_kobject(struct block_device *bdev, void *holder, if (!bo) return -ENOMEM; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION); res = bd_claim(bdev, holder); if (res || !add_bd_holder(bdev, bo)) free_bd_holder(bo); @@ -764,7 +764,7 @@ static void bd_release_from_kobject(struct block_device *bdev, if (!kobj) return; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_PARTITION); bd_release(bdev); if ((bo = del_bd_holder(bdev, kobj))) free_bd_holder(bo); @@ -822,6 +822,22 @@ struct block_device *open_by_devnum(dev_t dev, unsigned mode) EXPORT_SYMBOL(open_by_devnum); +static int +blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags); + +struct block_device *open_partition_by_devnum(dev_t dev, unsigned mode) +{ + struct block_device *bdev = bdget(dev); + int err = -ENOMEM; + int flags = mode & FMODE_WRITE ? O_RDWR : O_RDONLY; + if (bdev) + err = blkdev_get_partition(bdev, mode, flags); + return err ? ERR_PTR(err) : bdev; +} + +EXPORT_SYMBOL(open_partition_by_devnum); + + /* * This routine checks whether a removable media has been changed, * and invalidates all buffer-cache-entries in that case. This @@ -868,7 +884,11 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); -static int do_open(struct block_device *bdev, struct file *file) +static int +blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); + +static int +do_open(struct block_device *bdev, struct file *file, unsigned int subclass) { struct module *owner = NULL; struct gendisk *disk; @@ -885,7 +905,8 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, subclass); + if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -912,11 +933,11 @@ static int do_open(struct block_device *bdev, struct file *file) ret = -ENOMEM; if (!whole) goto out_first; - ret = blkdev_get(whole, file->f_mode, file->f_flags); + ret = blkdev_get_whole(whole, file->f_mode, file->f_flags); if (ret) goto out_first; bdev->bd_contains = whole; - mutex_lock(&whole->bd_mutex); + mutex_lock_nested(&whole->bd_mutex, BD_MUTEX_WHOLE); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = @@ -944,7 +965,8 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + BD_MUTEX_PARTITION); bdev->bd_contains->bd_part_count++; mutex_unlock(&bdev->bd_contains->bd_mutex); } @@ -985,11 +1007,49 @@ int blkdev_get(struct block_device *bdev, mode_t mode, unsigned flags) fake_file.f_dentry = &fake_dentry; fake_dentry.d_inode = bdev->bd_inode; - return do_open(bdev, &fake_file); + return do_open(bdev, &fake_file, BD_MUTEX_NORMAL); } EXPORT_SYMBOL(blkdev_get); +static int +blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags) +{ + /* + * This crockload is due to bad choice of ->open() type. + * It will go away. + * For now, block device ->open() routine must _not_ + * examine anything in 'inode' argument except ->i_rdev. + */ + struct file fake_file = {}; + struct dentry fake_dentry = {}; + fake_file.f_mode = mode; + fake_file.f_flags = flags; + fake_file.f_dentry = &fake_dentry; + fake_dentry.d_inode = bdev->bd_inode; + + return do_open(bdev, &fake_file, BD_MUTEX_WHOLE); +} + +static int +blkdev_get_partition(struct block_device *bdev, mode_t mode, unsigned flags) +{ + /* + * This crockload is due to bad choice of ->open() type. + * It will go away. + * For now, block device ->open() routine must _not_ + * examine anything in 'inode' argument except ->i_rdev. + */ + struct file fake_file = {}; + struct dentry fake_dentry = {}; + fake_file.f_mode = mode; + fake_file.f_flags = flags; + fake_file.f_dentry = &fake_dentry; + fake_dentry.d_inode = bdev->bd_inode; + + return do_open(bdev, &fake_file, BD_MUTEX_PARTITION); +} + static int blkdev_open(struct inode * inode, struct file * filp) { struct block_device *bdev; @@ -1005,7 +1065,7 @@ static int blkdev_open(struct inode * inode, struct file * filp) bdev = bd_acquire(inode); - res = do_open(bdev, filp); + res = do_open(bdev, filp, BD_MUTEX_NORMAL); if (res) return res; @@ -1019,13 +1079,13 @@ static int blkdev_open(struct inode * inode, struct file * filp) return res; } -int blkdev_put(struct block_device *bdev) +static int __blkdev_put(struct block_device *bdev, unsigned int subclass) { int ret = 0; struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - mutex_lock(&bdev->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, subclass); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -1035,7 +1095,8 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + mutex_lock_nested(&bdev->bd_contains->bd_mutex, + subclass + 1); bdev->bd_contains->bd_part_count--; mutex_unlock(&bdev->bd_contains->bd_mutex); } @@ -1051,9 +1112,8 @@ int blkdev_put(struct block_device *bdev) } bdev->bd_disk = NULL; bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; - if (bdev != bdev->bd_contains) { - blkdev_put(bdev->bd_contains); - } + if (bdev != bdev->bd_contains) + __blkdev_put(bdev->bd_contains, subclass + 1); bdev->bd_contains = NULL; } unlock_kernel(); @@ -1062,8 +1122,20 @@ int blkdev_put(struct block_device *bdev) return ret; } +int blkdev_put(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_NORMAL); +} + EXPORT_SYMBOL(blkdev_put); +int blkdev_put_partition(struct block_device *bdev) +{ + return __blkdev_put(bdev, BD_MUTEX_PARTITION); +} + +EXPORT_SYMBOL(blkdev_put_partition); + static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); diff --git a/include/linux/fs.h b/include/linux/fs.h index e26de68059af..134b32068246 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -435,6 +435,21 @@ struct block_device { unsigned long bd_private; }; +/* + * bdev->bd_mutex nesting subclasses for the lock validator: + * + * 0: normal + * 1: 'whole' + * 2: 'partition' + */ +enum bdev_bd_mutex_lock_class +{ + BD_MUTEX_NORMAL, + BD_MUTEX_WHOLE, + BD_MUTEX_PARTITION +}; + + /* * Radix-tree tags, for tagging dirty and writeback pages within the pagecache * radix trees @@ -1425,6 +1440,7 @@ extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, unsigned); +extern struct block_device *open_partition_by_devnum(dev_t, unsigned); extern const struct file_operations def_blk_fops; extern const struct address_space_operations def_blk_aops; extern const struct file_operations def_chr_fops; @@ -1435,6 +1451,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *, mode_t, unsigned); extern int blkdev_put(struct block_device *); +extern int blkdev_put_partition(struct block_device *); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); #ifdef CONFIG_SYSFS -- cgit v1.2.3 From 36c8b586896f60cb91a4fd526233190b34316baf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:25:41 -0700 Subject: [PATCH] sched: cleanup, remove task_t, convert to struct task_struct cleanup: remove task_t and convert all the uses to struct task_struct. I introduced it for the scheduler anno and it was a mistake. Conversion was mostly scripted, the result was reviewed and all secondary whitespace and style impact (if any) was fixed up by hand. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/process.c | 2 +- arch/ia64/kernel/mca.c | 10 +- arch/ia64/kernel/smpboot.c | 2 +- arch/mips/kernel/entry.S | 2 +- arch/mips/kernel/mips-mt.c | 6 +- arch/um/kernel/tt/process_kern.c | 2 +- drivers/char/tty_io.c | 2 +- fs/eventpoll.c | 4 +- include/asm-ia64/thread_info.h | 2 +- include/asm-m32r/system.h | 2 +- include/asm-sh/system.h | 2 +- include/linux/sched.h | 55 +++++------ kernel/capability.c | 8 +- kernel/exit.c | 35 +++---- kernel/fork.c | 18 ++-- kernel/hrtimer.c | 2 +- kernel/pid.c | 6 +- kernel/ptrace.c | 6 +- kernel/rtmutex-debug.c | 5 +- kernel/rtmutex-tester.c | 4 +- kernel/rtmutex.c | 11 ++- kernel/sched.c | 192 ++++++++++++++++++++------------------- kernel/timer.c | 2 +- kernel/workqueue.c | 2 +- mm/oom_kill.c | 8 +- 25 files changed, 203 insertions(+), 187 deletions(-) (limited to 'fs') diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 01c8c8b23337..41ebf51a107a 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -474,7 +474,7 @@ out: */ unsigned long -thread_saved_pc(task_t *t) +thread_saved_pc(struct task_struct *t) { unsigned long base = (unsigned long)task_stack_page(t); unsigned long fp, sp = task_thread_info(t)->pcb.ksp; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index eb8e8dc5ac8e..2fbe4536fe18 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -678,7 +678,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) */ static void -ia64_mca_modify_comm(const task_t *previous_current) +ia64_mca_modify_comm(const struct task_struct *previous_current) { char *p, comm[sizeof(current->comm)]; if (previous_current->pid) @@ -709,7 +709,7 @@ ia64_mca_modify_comm(const task_t *previous_current) * that we can do backtrace on the MCA/INIT handler code itself. */ -static task_t * +static struct task_struct * ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, @@ -719,7 +719,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const pal_min_state_area_t *ms = sos->pal_min_state; - task_t *previous_current; + struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + @@ -1023,7 +1023,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, pal_processor_state_info_t *psp = (pal_processor_state_info_t *) &sos->proc_state_param; int recover, cpu = smp_processor_id(); - task_t *previous_current; + struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; @@ -1352,7 +1352,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, { static atomic_t slaves; static atomic_t monarchs; - task_t *previous_current; + struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e1960979be29..6203ed4ec8cf 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -124,7 +124,7 @@ extern void __devinit calibrate_delay (void); extern void start_ap (void); extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu; /* * State for each CPU diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ecfd637d702a..01e7fa86aa43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -65,7 +65,7 @@ need_resched: #endif FEXPORT(ret_from_fork) - jal schedule_tail # a0 = task_t *prev + jal schedule_tail # a0 = struct task_struct *prev FEXPORT(syscall_exit) local_irq_disable # make sure need_resched and diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 02237a685ec7..4dcc39f42951 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0; * used in sys_sched_set/getaffinity() in kernel/sched.c, so * cloned here. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, cpumask_t new_mask; cpumask_t effective_mask; int retval; - task_t *p; + struct task_struct *p; if (len < sizeof(new_mask)) return -EINVAL; @@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned int real_len; cpumask_t mask; int retval; - task_t *p; + struct task_struct *p; real_len = sizeof(mask); if (len < real_len) diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c index a9c1443fc548..8368c2dbe635 100644 --- a/arch/um/kernel/tt/process_kern.c +++ b/arch/um/kernel/tt/process_kern.c @@ -119,7 +119,7 @@ void suspend_new_thread(int fd) panic("read failed in suspend_new_thread, err = %d", -err); } -void schedule_tail(task_t *prev); +void schedule_tail(struct task_struct *prev); static void new_thread_handler(int sig) { diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6fb77952562d..bfdb90242a90 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -2336,7 +2336,7 @@ static int fionbio(struct file *file, int __user *p) static int tiocsctty(struct tty_struct *tty, int arg) { - task_t *p; + struct task_struct *p; if (current->signal->leader && (current->signal->session == tty->session)) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9c677bbd0b08..19ffb043abbc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -120,7 +120,7 @@ struct epoll_filefd { */ struct wake_task_node { struct list_head llink; - task_t *task; + struct task_struct *task; wait_queue_head_t *wq; }; @@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) { int wake_nests = 0; unsigned long flags; - task_t *this_task = current; + struct task_struct *this_task = current; struct list_head *lsthead = &psw->wake_task_list, *lnk; struct wake_task_node *tncur; struct wake_task_node tnode; diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8bc9869e5765..8adcde0934ca 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -68,7 +68,7 @@ struct thread_info { #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) #endif /* !__ASSEMBLY */ diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 66c4742f09e7..311cebf44eff 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -18,7 +18,7 @@ * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. * - * `next' and `prev' should be task_t, but it isn't always defined + * `next' and `prev' should be struct task_struct, but it isn't always defined */ #define switch_to(prev, next, last) do { \ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index b752e5cbb830..ce2e60664a86 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -12,7 +12,7 @@ */ #define switch_to(prev, next, last) do { \ - task_t *__last; \ + struct task_struct *__last; \ register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8ebddba4448d..c2797f04d931 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; +struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -383,7 +383,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - task_t *curr_target; + struct task_struct *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct*); +extern void prefetch_stack(struct task_struct *t); #else static inline void prefetch_stack(struct task_struct *t) { } #endif @@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t) #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(task_t *p); -extern void rt_mutex_setprio(task_t *p, int prio); -extern void rt_mutex_adjust_pi(task_t *p); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); #else -static inline int rt_mutex_getprio(task_t *p) +static inline int rt_mutex_getprio(struct task_struct *p) { return p->normal_prio; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); @@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p); #else #define wait_task_inactive(p) do { } while (0) #endif @@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p); /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p) { return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); + struct task_struct, thread_group); } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } diff --git a/kernel/capability.c b/kernel/capability.c index 1a4d8a40d3f9..c7685ad00a97 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) int ret = 0; pid_t pid; __u32 version; - task_t *target; + struct task_struct *target; struct __user_cap_data_struct data; if (get_user(version, &header->version)) @@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - task_t *g, *target; + struct task_struct *g, *target; int ret = -EPERM; int found = 0; @@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) { kernel_cap_t inheritable, permitted, effective; __u32 version; - task_t *target; + struct task_struct *target; int ret; pid_t pid; diff --git a/kernel/exit.c b/kernel/exit.c index c595db14cf25..6664c084783d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -134,8 +134,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp) void release_task(struct task_struct * p) { + struct task_struct *leader; int zap_leader; - task_t *leader; repeat: atomic_dec(&p->user->processes); write_lock_irq(&tasklist_lock); @@ -209,7 +209,7 @@ out: * * "I ask you, have you ever known what it is to be an orphan?" */ -static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) +static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) { struct task_struct *p; int ret = 1; @@ -582,7 +582,8 @@ static void exit_mm(struct task_struct * tsk) mmput(mm); } -static inline void choose_new_parent(task_t *p, task_t *reaper) +static inline void +choose_new_parent(struct task_struct *p, struct task_struct *reaper) { /* * Make sure we're not reparenting to ourselves and that @@ -592,7 +593,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper) p->real_parent = reaper; } -static void reparent_thread(task_t *p, task_t *father, int traced) +static void +reparent_thread(struct task_struct *p, struct task_struct *father, int traced) { /* We don't want people slaying init. */ if (p->exit_signal != -1) @@ -656,8 +658,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced) * group, and if no such member exists, give it to * the global child reaper process (ie "init") */ -static void forget_original_parent(struct task_struct * father, - struct list_head *to_release) +static void +forget_original_parent(struct task_struct *father, struct list_head *to_release) { struct task_struct *p, *reaper = father; struct list_head *_p, *_n; @@ -680,7 +682,7 @@ static void forget_original_parent(struct task_struct * father, */ list_for_each_safe(_p, _n, &father->children) { int ptrace; - p = list_entry(_p,struct task_struct,sibling); + p = list_entry(_p, struct task_struct, sibling); ptrace = p->ptrace; @@ -709,7 +711,7 @@ static void forget_original_parent(struct task_struct * father, list_add(&p->ptrace_list, to_release); } list_for_each_safe(_p, _n, &father->ptrace_children) { - p = list_entry(_p,struct task_struct,ptrace_list); + p = list_entry(_p, struct task_struct, ptrace_list); choose_new_parent(p, reaper); reparent_thread(p, father, 1); } @@ -829,7 +831,7 @@ static void exit_notify(struct task_struct *tsk) list_for_each_safe(_p, _n, &ptrace_dead) { list_del_init(_p); - t = list_entry(_p,struct task_struct,ptrace_list); + t = list_entry(_p, struct task_struct, ptrace_list); release_task(t); } @@ -1010,7 +1012,7 @@ asmlinkage void sys_exit_group(int error_code) do_group_exit((error_code & 0xff) << 8); } -static int eligible_child(pid_t pid, int options, task_t *p) +static int eligible_child(pid_t pid, int options, struct task_struct *p) { if (pid > 0) { if (p->pid != pid) @@ -1051,12 +1053,13 @@ static int eligible_child(pid_t pid, int options, task_t *p) return 1; } -static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, +static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) { int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; + put_task_struct(p); if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); @@ -1081,7 +1084,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(task_t *p, int noreap, +static int wait_task_zombie(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1243,8 +1246,8 @@ static int wait_task_zombie(task_t *p, int noreap, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, - struct siginfo __user *infop, +static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, + int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; @@ -1358,7 +1361,7 @@ bail_ref: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(task_t *p, int noreap, +static int wait_task_continued(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1444,7 +1447,7 @@ repeat: int ret; list_for_each(_p,&tsk->children) { - p = list_entry(_p,struct task_struct,sibling); + p = list_entry(_p, struct task_struct, sibling); ret = eligible_child(pid, options, p); if (!ret) diff --git a/kernel/fork.c b/kernel/fork.c index 54953d8a6f17..56e4e07e45f7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,13 +933,13 @@ static inline void rt_mutex_init_task(struct task_struct *p) * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ -static task_t *copy_process(unsigned long clone_flags, - unsigned long stack_start, - struct pt_regs *regs, - unsigned long stack_size, - int __user *parent_tidptr, - int __user *child_tidptr, - int pid) +static struct task_struct *copy_process(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size, + int __user *parent_tidptr, + int __user *child_tidptr, + int pid) { int retval; struct task_struct *p = NULL; @@ -1294,9 +1294,9 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) return regs; } -task_t * __devinit fork_idle(int cpu) +struct task_struct * __devinit fork_idle(int cpu) { - task_t *task; + struct task_struct *task; struct pt_regs regs; task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 617304ce67db..d17766d40dab 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer *timer) return HRTIMER_NORESTART; } -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; sl->task = task; diff --git a/kernel/pid.c b/kernel/pid.c index eeb836b65ca4..93e212f20671 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr) return NULL; } -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) { struct pid_link *link; struct pid *pid; @@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr) return 0; } -void fastcall detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(struct task_struct *task, enum pid_type type) { struct pid_link *link; struct pid *pid; @@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -task_t *find_task_by_pid_type(int type, int nr) +struct task_struct *find_task_by_pid_type(int type, int nr) { return pid_task(find_pid(nr), type); } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335c5b932e14..9a111f70145c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -28,7 +28,7 @@ * * Must be called with the tasklist lock write-held. */ -void __ptrace_link(task_t *child, task_t *new_parent) +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) { BUG_ON(!list_empty(&child->ptrace_list)); if (child->parent == new_parent) @@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t *new_parent) * TASK_TRACED, resume it now. * Requires that irqs be disabled. */ -void ptrace_untrace(task_t *child) +void ptrace_untrace(struct task_struct *child) { spin_lock(&child->sighand->siglock); if (child->state == TASK_TRACED) { @@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child) * * Must be called with the tasklist lock write-held. */ -void __ptrace_unlink(task_t *child) +void __ptrace_unlink(struct task_struct *child) { BUG_ON(!child->ptrace); diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 353a853bc390..0c1faa950af7 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -96,7 +96,7 @@ void deadlock_trace_off(void) rt_trace_on = 0; } -static void printk_task(task_t *p) +static void printk_task(struct task_struct *p) { if (p) printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); @@ -231,7 +231,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) lock->name = name; } -void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) +void +rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) { } diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index e82c2f848249..494dac872a13 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -33,7 +33,7 @@ struct test_thread_data { }; static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; -static task_t *threads[MAX_RT_TEST_THREADS]; +static struct task_struct *threads[MAX_RT_TEST_THREADS]; static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; enum test_opcodes { @@ -361,8 +361,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) { struct test_thread_data *td; + struct task_struct *tsk; char *curr = buf; - task_t *tsk; int i; td = container_of(dev, struct test_thread_data, sysdev); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 91b699aa658b..d2ef13b485e7 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -157,7 +157,7 @@ int max_lock_depth = 1024; * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ -static int rt_mutex_adjust_prio_chain(task_t *task, +static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, @@ -282,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); + return ret; } @@ -403,10 +404,10 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int detect_deadlock) { + struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - task_t *owner = rt_mutex_owner(lock); - int boost = 0, res; unsigned long flags; + int boost = 0, res; spin_lock_irqsave(¤t->pi_lock, flags); __rt_mutex_adjust_prio(current); @@ -527,9 +528,9 @@ static void remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { int first = (waiter == rt_mutex_top_waiter(lock)); - int boost = 0; - task_t *owner = rt_mutex_owner(lock); + struct task_struct *owner = rt_mutex_owner(lock); unsigned long flags; + int boost = 0; spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); diff --git a/kernel/sched.c b/kernel/sched.c index b0326141f841..021b31219516 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -179,7 +179,7 @@ static unsigned int static_prio_timeslice(int static_prio) return SCALE_PRIO(DEF_TIMESLICE, static_prio); } -static inline unsigned int task_timeslice(task_t *p) +static inline unsigned int task_timeslice(struct task_struct *p) { return static_prio_timeslice(p->static_prio); } @@ -227,7 +227,7 @@ struct runqueue { unsigned long expired_timestamp; unsigned long long timestamp_last_tick; - task_t *curr, *idle; + struct task_struct *curr, *idle; struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int best_expired_prio; @@ -240,7 +240,7 @@ struct runqueue { int active_balance; int push_cpu; - task_t *migration_thread; + struct task_struct *migration_thread; struct list_head migration_queue; #endif @@ -291,16 +291,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { return rq->curr == p; } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ @@ -317,7 +317,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p) { #ifdef CONFIG_SMP return p->oncpu; @@ -326,7 +326,7 @@ static inline int task_running(runqueue_t *rq, task_t *p) #endif } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) { #ifdef CONFIG_SMP /* @@ -343,7 +343,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) #endif } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) { #ifdef CONFIG_SMP /* @@ -364,7 +364,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) * __task_rq_lock - lock the runqueue a given task resides on. * Must be called interrupts disabled. */ -static inline runqueue_t *__task_rq_lock(task_t *p) +static inline runqueue_t *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct runqueue *rq; @@ -384,7 +384,7 @@ repeat_lock_task: * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq; @@ -541,7 +541,7 @@ static inline runqueue_t *this_rq_lock(void) * long it was from the *first* time it was queued to the time that it * finally hit a cpu. */ -static inline void sched_info_dequeued(task_t *t) +static inline void sched_info_dequeued(struct task_struct *t) { t->sched_info.last_queued = 0; } @@ -551,7 +551,7 @@ static inline void sched_info_dequeued(task_t *t) * long it was waiting to run. We also note when it began so that we * can keep stats on how long its timeslice is. */ -static void sched_info_arrive(task_t *t) +static void sched_info_arrive(struct task_struct *t) { unsigned long now = jiffies, diff = 0; struct runqueue *rq = task_rq(t); @@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t) * the timestamp if it is already not set. It's assumed that * sched_info_dequeued() will clear that stamp when appropriate. */ -static inline void sched_info_queued(task_t *t) +static inline void sched_info_queued(struct task_struct *t) { if (!t->sched_info.last_queued) t->sched_info.last_queued = jiffies; @@ -595,7 +595,7 @@ static inline void sched_info_queued(task_t *t) * Called when a process ceases being the active-running process, either * voluntarily or involuntarily. Now we can calculate how long we ran. */ -static inline void sched_info_depart(task_t *t) +static inline void sched_info_depart(struct task_struct *t) { struct runqueue *rq = task_rq(t); unsigned long diff = jiffies - t->sched_info.last_arrival; @@ -611,7 +611,8 @@ static inline void sched_info_depart(task_t *t) * their time slice. (This may also be called when switching to or from * the idle task.) We are only called when prev != next. */ -static inline void sched_info_switch(task_t *prev, task_t *next) +static inline void +sched_info_switch(struct task_struct *prev, struct task_struct *next) { struct runqueue *rq = task_rq(prev); @@ -683,7 +684,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) * Both properties are important to certain workloads. */ -static inline int __normal_prio(task_t *p) +static inline int __normal_prio(struct task_struct *p) { int bonus, prio; @@ -719,7 +720,7 @@ static inline int __normal_prio(task_t *p) #define RTPRIO_TO_LOAD_WEIGHT(rp) \ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) -static void set_load_weight(task_t *p) +static void set_load_weight(struct task_struct *p) { if (has_rt_policy(p)) { #ifdef CONFIG_SMP @@ -737,23 +738,25 @@ static void set_load_weight(task_t *p) p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); } -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load += p->load_weight; } -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) { rq->raw_weighted_load -= p->load_weight; } -static inline void inc_nr_running(task_t *p, runqueue_t *rq) +static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running++; inc_raw_weighted_load(rq, p); } -static inline void dec_nr_running(task_t *p, runqueue_t *rq) +static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); @@ -766,7 +769,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq) * setprio syscalls, and whenever the interactivity * estimator recalculates. */ -static inline int normal_prio(task_t *p) +static inline int normal_prio(struct task_struct *p) { int prio; @@ -784,7 +787,7 @@ static inline int normal_prio(task_t *p) * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */ -static int effective_prio(task_t *p) +static int effective_prio(struct task_struct *p) { p->normal_prio = normal_prio(p); /* @@ -800,7 +803,7 @@ static int effective_prio(task_t *p) /* * __activate_task - move a task to the runqueue. */ -static void __activate_task(task_t *p, runqueue_t *rq) +static void __activate_task(struct task_struct *p, runqueue_t *rq) { prio_array_t *target = rq->active; @@ -813,7 +816,7 @@ static void __activate_task(task_t *p, runqueue_t *rq) /* * __activate_idle_task - move idle task to the _front_ of runqueue. */ -static inline void __activate_idle_task(task_t *p, runqueue_t *rq) +static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); inc_nr_running(p, rq); @@ -823,7 +826,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq) * Recalculate p->normal_prio and p->prio after having slept, * updating the sleep-average too: */ -static int recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(struct task_struct *p, unsigned long long now) { /* Caller must always ensure 'now >= p->timestamp' */ unsigned long sleep_time = now - p->timestamp; @@ -895,7 +898,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now) * Update all the scheduling statistics stuff. (sleep average * calculation, priority modifiers, etc.) */ -static void activate_task(task_t *p, runqueue_t *rq, int local) +static void activate_task(struct task_struct *p, runqueue_t *rq, int local) { unsigned long long now; @@ -962,7 +965,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void resched_task(task_t *p) +static void resched_task(struct task_struct *p) { int cpu; @@ -983,7 +986,7 @@ static void resched_task(task_t *p) smp_send_reschedule(cpu); } #else -static inline void resched_task(task_t *p) +static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); @@ -994,7 +997,7 @@ static inline void resched_task(task_t *p) * task_curr - is this task currently executing on a CPU? * @p: the task in question. */ -inline int task_curr(const task_t *p) +inline int task_curr(const struct task_struct *p) { return cpu_curr(task_cpu(p)) == p; } @@ -1009,7 +1012,7 @@ unsigned long weighted_cpuload(const int cpu) typedef struct { struct list_head list; - task_t *task; + struct task_struct *task; int dest_cpu; struct completion done; @@ -1019,7 +1022,8 @@ typedef struct { * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) +static int +migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) { runqueue_t *rq = task_rq(p); @@ -1049,7 +1053,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */ -void wait_task_inactive(task_t *p) +void wait_task_inactive(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1083,7 +1087,7 @@ repeat: * to another CPU then no harm is done and the purpose has been * achieved as well. */ -void kick_process(task_t *p) +void kick_process(struct task_struct *p) { int cpu; @@ -1286,7 +1290,7 @@ nextlevel: * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, task_t *p) +static int wake_idle(int cpu, struct task_struct *p) { cpumask_t tmp; struct sched_domain *sd; @@ -1309,7 +1313,7 @@ static int wake_idle(int cpu, task_t *p) return cpu; } #else -static inline int wake_idle(int cpu, task_t *p) +static inline int wake_idle(int cpu, struct task_struct *p) { return cpu; } @@ -1329,7 +1333,7 @@ static inline int wake_idle(int cpu, task_t *p) * * returns failure only if the task is already active. */ -static int try_to_wake_up(task_t *p, unsigned int state, int sync) +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) { int cpu, this_cpu, success = 0; unsigned long flags; @@ -1487,14 +1491,14 @@ out: return success; } -int fastcall wake_up_process(task_t *p) +int fastcall wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -1503,7 +1507,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state) * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void fastcall sched_fork(task_t *p, int clone_flags) +void fastcall sched_fork(struct task_struct *p, int clone_flags) { int cpu = get_cpu(); @@ -1571,7 +1575,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) +void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; int this_cpu, cpu; @@ -1655,7 +1659,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */ -void fastcall sched_exit(task_t *p) +void fastcall sched_exit(struct task_struct *p) { unsigned long flags; runqueue_t *rq; @@ -1689,7 +1693,7 @@ void fastcall sched_exit(task_t *p) * prepare_task_switch sets up locking and calls architecture specific * hooks. */ -static inline void prepare_task_switch(runqueue_t *rq, task_t *next) +static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) { prepare_lock_switch(rq, next); prepare_arch_switch(next); @@ -1710,7 +1714,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next) * with the lock held can cause deadlocks; see schedule() for * details.) */ -static inline void finish_task_switch(runqueue_t *rq, task_t *prev) +static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; @@ -1748,7 +1752,7 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */ -asmlinkage void schedule_tail(task_t *prev) +asmlinkage void schedule_tail(struct task_struct *prev) __releases(rq->lock) { runqueue_t *rq = this_rq(); @@ -1765,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev) * context_switch - switch to the new MM and the new * thread's register state. */ -static inline -task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) +static inline struct task_struct * +context_switch(runqueue_t *rq, struct task_struct *prev, + struct task_struct *next) { struct mm_struct *mm = next->mm; struct mm_struct *oldmm = prev->active_mm; @@ -1937,7 +1942,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ -static void sched_migrate_task(task_t *p, int dest_cpu) +static void sched_migrate_task(struct task_struct *p, int dest_cpu) { migration_req_t req; runqueue_t *rq; @@ -1952,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu) if (migrate_task(p, dest_cpu, &req)) { /* Need to wait for migration thread (might exit: take ref). */ struct task_struct *mt = rq->migration_thread; + get_task_struct(mt); task_rq_unlock(rq, &flags); wake_up_process(mt); put_task_struct(mt); wait_for_completion(&req.done); + return; } out: @@ -1980,9 +1987,9 @@ void sched_exec(void) * pull_task - move a task from a remote runqueue to the local runqueue. * Both runqueues must be locked. */ -static -void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, - runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, + struct task_struct *p, runqueue_t *this_rq, + prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); dec_nr_running(p, src_rq); @@ -2003,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? */ static -int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, +int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, struct sched_domain *sd, enum idle_type idle, int *all_pinned) { @@ -2052,8 +2059,8 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, best_prio_seen, skip_for_load; prio_array_t *array, *dst_array; struct list_head *head, *curr; + struct task_struct *tmp; long rem_load_move; - task_t *tmp; if (max_nr_move == 0 || max_load_move == 0) goto out; @@ -2105,7 +2112,7 @@ skip_bitmap: head = array->queue + idx; curr = head->prev; skip_queue: - tmp = list_entry(curr, task_t, run_list); + tmp = list_entry(curr, struct task_struct, run_list); curr = curr->prev; @@ -2819,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat); * Bank in p->sched_time the ns elapsed since the last tick or switch. */ static inline void -update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) +update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) { p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); } @@ -2828,7 +2835,7 @@ update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) * Return current->sched_time plus any more ns on the sched_clock * that have not yet been banked. */ -unsigned long long current_sched_time(const task_t *p) +unsigned long long current_sched_time(const struct task_struct *p) { unsigned long long ns; unsigned long flags; @@ -2945,9 +2952,9 @@ void account_steal_time(struct task_struct *p, cputime_t steal) void scheduler_tick(void) { unsigned long long now = sched_clock(); + struct task_struct *p = current; int cpu = smp_processor_id(); runqueue_t *rq = this_rq(); - task_t *p = current; update_cpu_clock(p, rq, now); @@ -3079,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu) * utilize, if another task runs on a sibling. This models the * slowdown effect of other tasks running on siblings: */ -static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) +static inline unsigned long +smt_slice(struct task_struct *p, struct sched_domain *sd) { return p->time_slice * (100 - sd->per_cpu_gain) / 100; } @@ -3090,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) * acquire their lock. As we only trylock the normal locking order does not * need to be obeyed. */ -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +static int +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { struct sched_domain *tmp, *sd = NULL; int ret = 0, i; @@ -3110,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) return 0; for_each_cpu_mask(i, sd->span) { + struct task_struct *smt_curr; runqueue_t *smt_rq; - task_t *smt_curr; if (i == this_cpu) continue; @@ -3157,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu) { } static inline int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) { return 0; } @@ -3211,11 +3220,11 @@ static inline int interactive_sleep(enum sleep_type sleep_type) */ asmlinkage void __sched schedule(void) { + struct task_struct *prev, *next; struct list_head *queue; unsigned long long now; unsigned long run_time; int cpu, idx, new_prio; - task_t *prev, *next; prio_array_t *array; long *switch_count; runqueue_t *rq; @@ -3308,7 +3317,7 @@ need_resched_nonpreemptible: idx = sched_find_first_bit(array->bitmap); queue = array->queue + idx; - next = list_entry(queue->next, task_t, run_list); + next = list_entry(queue->next, struct task_struct, run_list); if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; @@ -3776,7 +3785,7 @@ EXPORT_SYMBOL(sleep_on_timeout); * * Used by the rt_mutex code to implement priority inheritance logic. */ -void rt_mutex_setprio(task_t *p, int prio) +void rt_mutex_setprio(struct task_struct *p, int prio) { unsigned long flags; prio_array_t *array; @@ -3817,7 +3826,7 @@ void rt_mutex_setprio(task_t *p, int prio) #endif -void set_user_nice(task_t *p, long nice) +void set_user_nice(struct task_struct *p, long nice) { int old_prio, delta; unsigned long flags; @@ -3873,7 +3882,7 @@ EXPORT_SYMBOL(set_user_nice); * @p: task * @nice: nice value */ -int can_nice(const task_t *p, const int nice) +int can_nice(const struct task_struct *p, const int nice) { /* convert nice value [19,-20] to rlimit style value [1,40] */ int nice_rlim = 20 - nice; @@ -3932,7 +3941,7 @@ asmlinkage long sys_nice(int increment) * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */ -int task_prio(const task_t *p) +int task_prio(const struct task_struct *p) { return p->prio - MAX_RT_PRIO; } @@ -3941,7 +3950,7 @@ int task_prio(const task_t *p) * task_nice - return the nice value of a given task. * @p: the task in question. */ -int task_nice(const task_t *p) +int task_nice(const struct task_struct *p) { return TASK_NICE(p); } @@ -3960,7 +3969,7 @@ int idle_cpu(int cpu) * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ -task_t *idle_task(int cpu) +struct task_struct *idle_task(int cpu) { return cpu_rq(cpu)->idle; } @@ -3969,7 +3978,7 @@ task_t *idle_task(int cpu) * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid) { return pid ? find_task_by_pid(pid) : current; } @@ -4103,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { - int retval; struct sched_param lparam; struct task_struct *p; + int retval; if (!param || pid < 0) return -EINVAL; @@ -4121,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) read_unlock_irq(&tasklist_lock); retval = sched_setscheduler(p, policy, &lparam); put_task_struct(p); + return retval; } @@ -4156,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) */ asmlinkage long sys_sched_getscheduler(pid_t pid) { + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4184,8 +4194,8 @@ out_nounlock: asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; + struct task_struct *p; int retval = -EINVAL; - task_t *p; if (!param || pid < 0) goto out_nounlock; @@ -4218,9 +4228,9 @@ out_unlock: long sched_setaffinity(pid_t pid, cpumask_t new_mask) { - task_t *p; - int retval; cpumask_t cpus_allowed; + struct task_struct *p; + int retval; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4306,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; long sched_getaffinity(pid_t pid, cpumask_t *mask) { + struct task_struct *p; int retval; - task_t *p; lock_cpu_hotplug(); read_lock(&tasklist_lock); @@ -4592,9 +4602,9 @@ asmlinkage long sys_sched_get_priority_min(int policy) asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { + struct task_struct *p; int retval = -EINVAL; struct timespec t; - task_t *p; if (pid < 0) goto out_nounlock; @@ -4641,12 +4651,13 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) return list_entry(p->sibling.next,struct task_struct,sibling); } -static void show_task(task_t *p) +static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + +static void show_task(struct task_struct *p) { - task_t *relative; - unsigned state; + struct task_struct *relative; unsigned long free = 0; - static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + unsigned state; printk("%-13.13s ", p->comm); state = p->state ? __ffs(p->state) + 1 : 0; @@ -4697,7 +4708,7 @@ static void show_task(task_t *p) void show_state(void) { - task_t *g, *p; + struct task_struct *g, *p; #if (BITS_PER_LONG == 32) printk("\n" @@ -4730,7 +4741,7 @@ void show_state(void) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void __devinit init_idle(task_t *idle, int cpu) +void __devinit init_idle(struct task_struct *idle, int cpu) { runqueue_t *rq = cpu_rq(cpu); unsigned long flags; @@ -4793,7 +4804,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ -int set_cpus_allowed(task_t *p, cpumask_t new_mask) +int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { unsigned long flags; migration_req_t req; @@ -5061,7 +5072,7 @@ void idle_task_exit(void) mmdrop(mm); } -static void migrate_dead(unsigned int dead_cpu, task_t *p) +static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) { struct runqueue *rq = cpu_rq(dead_cpu); @@ -5096,9 +5107,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu) struct list_head *list = &rq->arrays[arr].queue[i]; while (!list_empty(list)) - migrate_dead(dead_cpu, - list_entry(list->next, task_t, - run_list)); + migrate_dead(dead_cpu, list_entry(list->next, + struct task_struct, run_list)); } } } @@ -6801,7 +6811,7 @@ void normalize_rt_tasks(void) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -task_t *curr_task(int cpu) +struct task_struct *curr_task(int cpu) { return cpu_curr(cpu); } @@ -6821,7 +6831,7 @@ task_t *curr_task(int cpu) * * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! */ -void set_curr_task(int cpu, task_t *p) +void set_curr_task(int cpu, struct task_struct *p) { cpu_curr(cpu) = p; } diff --git a/kernel/timer.c b/kernel/timer.c index b761898d04c8..396a3c024c2c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void) static void process_timeout(unsigned long __data) { - wake_up_process((task_t *)__data); + wake_up_process((struct task_struct *)__data); } /** diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 59f0b42bd89e..90d2c6001659 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,7 +51,7 @@ struct cpu_workqueue_struct { wait_queue_head_t work_done; struct workqueue_struct *wq; - task_t *thread; + struct task_struct *thread; int run_depth; /* Detect run_workqueue() recursion depth */ } ____cacheline_aligned; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d46ed0f1dc06..b9af136e5cfa 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -225,7 +225,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * we select a process with CAP_SYS_RAW_IO set). */ -static void __oom_kill_task(task_t *p, const char *message) +static void __oom_kill_task(struct task_struct *p, const char *message) { if (p->pid == 1) { WARN_ON(1); @@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, const char *message) force_sig(SIGKILL, p); } -static int oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(struct task_struct *p, const char *message) { struct mm_struct *mm; - task_t * g, * q; + struct task_struct *g, *q; mm = p->mm; @@ -316,7 +316,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points, */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - task_t *p; + struct task_struct *p; unsigned long points = 0; if (printk_ratelimit()) { -- cgit v1.2.3 From 31304c909e6945b005af62cd55a582e9c010a0b4 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Tue, 4 Jul 2006 15:04:39 +1000 Subject: [PATCH] uclinux: fix proc_task()/get_proc-task() naming Fix changed name of proc_task() to get_proc_task(). Signed-off-by: Greg Ungerer Signed-off-by: Linus Torvalds --- fs/proc/task_nommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index af69f28277b6..4616ed50ffcd 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -107,7 +107,7 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount * { struct vm_list_struct *vml; struct vm_area_struct *vma; - struct task_struct *task = proc_task(inode); + struct task_struct *task = get_proc_task(inode); struct mm_struct *mm = get_task_mm(task); int result = -ENOENT; -- cgit v1.2.3