summaryrefslogtreecommitdiff
path: root/ipc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 08:30:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-07-24 12:46:53 +0200
commit3c96a21d186d7cadff3e356ecbea8ee9ad2692aa (patch)
tree387a607c5cff5b23c16d599ac78fadbbba6ba163 /ipc
parent769610edf15e38b8fa5f6beac8e6bfaeaff690d1 (diff)
downloadlwn-3c96a21d186d7cadff3e356ecbea8ee9ad2692aa.tar.gz
lwn-3c96a21d186d7cadff3e356ecbea8ee9ad2692aa.zip
ipc: Make the ipc code -rt aware
RT serializes the code with the (rt)spinlock but keeps preemption enabled. Some parts of the code need to be atomic nevertheless. Protect it with preempt_disable/enable_rt pairts. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/msg.c16
-rw-r--r--ipc/sem.c6
3 files changed, 27 insertions, 0 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c5e68adc6732..63a47f7c0c40 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -820,12 +820,17 @@ static inline void pipelined_send(struct mqueue_inode_info *info,
struct msg_msg *message,
struct ext_wait_queue *receiver)
{
+ /*
+ * Keep them in one critical section for PREEMPT_RT:
+ */
+ preempt_disable_rt();
receiver->msg = message;
list_del(&receiver->list);
receiver->state = STATE_PENDING;
wake_up_process(receiver->task);
smp_wmb();
receiver->state = STATE_READY;
+ preempt_enable_nort();
}
/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
diff --git a/ipc/msg.c b/ipc/msg.c
index 2ceab7f12fcb..6de2720b369b 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue *msq, int res)
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
+ /*
+ * Make sure that the wakeup doesnt preempt
+ * this CPU prematurely. (on PREEMPT_RT)
+ */
+ preempt_disable_rt();
+
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
+
+ preempt_enable_rt();
}
}
@@ -611,6 +619,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
+ /*
+ * Make sure that the wakeup doesnt preempt
+ * this CPU prematurely. (on PREEMPT_RT)
+ */
+ preempt_disable_rt();
+
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
@@ -624,9 +638,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
+ preempt_enable_rt();
return 1;
}
+ preempt_enable_rt();
}
}
return 0;
diff --git a/ipc/sem.c b/ipc/sem.c
index 87c2b641fd7b..3ee355461489 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -415,6 +415,11 @@ static void update_queue (struct sem_array * sma)
struct sem_queue *n;
/*
+ * make sure that the wakeup doesnt preempt
+ * _this_ cpu prematurely. (on preempt_rt)
+ */
+ preempt_disable_rt();
+ /*
* Continue scanning. The next operation
* that must be checked depends on the type of the
* completed operation:
@@ -450,6 +455,7 @@ static void update_queue (struct sem_array * sma)
*/
smp_wmb();
q->status = error;
+ preempt_enable_rt();
q = n;
} else {
q = list_entry(q->list.next, struct sem_queue, list);