summaryrefslogtreecommitdiff
path: root/kernel/events/uprobes.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-08-06 14:13:23 +0200
committerOleg Nesterov <oleg@redhat.com>2012-08-28 18:21:17 +0200
commitf1a45d023193f7d8e55e384090b645d609325393 (patch)
tree7f901f3dbd23a7602bf3d4bf774862a3ee5b7640 /kernel/events/uprobes.c
parent647c42dfd40fec032a4c8525a755160f0765921f (diff)
downloadlwn-f1a45d023193f7d8e55e384090b645d609325393.tar.gz
lwn-f1a45d023193f7d8e55e384090b645d609325393.zip
uprobes: Kill dup_mmap()->uprobe_mmap(), simplify uprobe_mmap/munmap
1. Kill dup_mmap()->uprobe_mmap(), it was only needed to calculate new_mm->uprobes_state.count removed by the previous patch. If the forking process has a pending uprobe (int3) in vma, it will be copied by copy_page_range(), note that it checks vma->anon_vma so "Don't copy ptes" is not possible after install_breakpoint() which does anon_vma_prepare(). 2. Remove is_swbp_at_addr() and "int count" in uprobe_mmap(). Again, this was needed for uprobes_state.count. As a side effect this fixes the bug pointed out by Srikar, this code lacked the necessary put_uprobe(). 3. uprobe_munmap() becomes a nop after the previous patch. Remove the meaningless code but do not remove the helper, we will need it. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/events/uprobes.c')
-rw-r--r--kernel/events/uprobes.c30
1 files changed, 3 insertions, 27 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 6f1664d217dc..ce59c100d65f 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1010,7 +1010,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
struct list_head tmp_list;
struct uprobe *uprobe, *u;
struct inode *inode;
- int ret, count;
+ int ret;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
return 0;
@@ -1023,8 +1023,6 @@ int uprobe_mmap(struct vm_area_struct *vma)
build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
ret = 0;
- count = 0;
-
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!ret) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
@@ -1034,19 +1032,11 @@ int uprobe_mmap(struct vm_area_struct *vma)
* We can race against uprobe_register(), see the
* comment near uprobe_hash().
*/
- if (ret == -EEXIST) {
+ if (ret == -EEXIST)
ret = 0;
-
- if (!is_swbp_at_addr(vma->vm_mm, vaddr))
- continue;
- }
-
- if (!ret)
- count++;
}
put_uprobe(uprobe);
}
-
mutex_unlock(uprobes_mmap_hash(inode));
return ret;
@@ -1057,27 +1047,13 @@ int uprobe_mmap(struct vm_area_struct *vma)
*/
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- struct list_head tmp_list;
- struct uprobe *uprobe, *u;
- struct inode *inode;
-
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
return;
if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
return;
- inode = vma->vm_file->f_mapping->host;
- if (!inode)
- return;
-
- mutex_lock(uprobes_mmap_hash(inode));
- build_probe_list(inode, vma, start, end, &tmp_list);
-
- list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
- put_uprobe(uprobe);
- }
- mutex_unlock(uprobes_mmap_hash(inode));
+ /* TODO: unmapping uprobe(s) will need more work */
}
/* Slot allocation for XOL */