summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/futex.h
blob: 9eaeb3c0878602055d4c34aea52f4d1826aa6884 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * These routines make two important assumptions:
 *
 * 1. atomic_t is really an int and can be freely cast back and forth
 *    (validated in __init_atomic_per_cpu).
 *
 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
 *    the same locking convention that all the kernel atomic routines use.
 */

#ifndef _ASM_TILE_FUTEX_H
#define _ASM_TILE_FUTEX_H

#ifndef __ASSEMBLY__

#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>

extern struct __get_user futex_set(int *v, int i);
extern struct __get_user futex_add(int *v, int n);
extern struct __get_user futex_or(int *v, int n);
extern struct __get_user futex_andn(int *v, int n);
extern struct __get_user futex_cmpxchg(int *v, int o, int n);

#ifndef __tilegx__
extern struct __get_user futex_xor(int *v, int n);
#else
static inline struct __get_user futex_xor(int __user *uaddr, int n)
{
	struct __get_user asm_ret = __get_user_4(uaddr);
	if (!asm_ret.err) {
		int oldval, newval;
		do {
			oldval = asm_ret.val;
			newval = oldval ^ n;
			asm_ret = futex_cmpxchg(uaddr, oldval, newval);
		} while (asm_ret.err == 0 && oldval != asm_ret.val);
	}
	return asm_ret;
}
#endif

static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
	int op = (encoded_op >> 28) & 7;
	int cmp = (encoded_op >> 24) & 15;
	int oparg = (encoded_op << 8) >> 20;
	int cmparg = (encoded_op << 20) >> 20;
	int ret;
	struct __get_user asm_ret;

	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
		oparg = 1 << oparg;

	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
		return -EFAULT;

	pagefault_disable();
	switch (op) {
	case FUTEX_OP_SET:
		asm_ret = futex_set(uaddr, oparg);
		break;
	case FUTEX_OP_ADD:
		asm_ret = futex_add(uaddr, oparg);
		break;
	case FUTEX_OP_OR:
		asm_ret = futex_or(uaddr, oparg);
		break;
	case FUTEX_OP_ANDN:
		asm_ret = futex_andn(uaddr, oparg);
		break;
	case FUTEX_OP_XOR:
		asm_ret = futex_xor(uaddr, oparg);
		break;
	default:
		asm_ret.err = -ENOSYS;
	}
	pagefault_enable();

	ret = asm_ret.err;

	if (!ret) {
		switch (cmp) {
		case FUTEX_OP_CMP_EQ:
			ret = (asm_ret.val == cmparg);
			break;
		case FUTEX_OP_CMP_NE:
			ret = (asm_ret.val != cmparg);
			break;
		case FUTEX_OP_CMP_LT:
			ret = (asm_ret.val < cmparg);
			break;
		case FUTEX_OP_CMP_GE:
			ret = (asm_ret.val >= cmparg);
			break;
		case FUTEX_OP_CMP_LE:
			ret = (asm_ret.val <= cmparg);
			break;
		case FUTEX_OP_CMP_GT:
			ret = (asm_ret.val > cmparg);
			break;
		default:
			ret = -ENOSYS;
		}
	}
	return ret;
}

static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
						int newval)
{
	struct __get_user asm_ret;

	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
		return -EFAULT;

	asm_ret = futex_cmpxchg(uaddr, oldval, newval);
	return asm_ret.err ? asm_ret.err : asm_ret.val;
}

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_FUTEX_H */