diff options
Diffstat (limited to 'kernel/sched/syscalls.c')
-rw-r--r-- | kernel/sched/syscalls.c | 37 |
1 files changed, 17 insertions, 20 deletions
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index ff0e5ab4e37c..c326de1344fb 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -300,20 +300,10 @@ static void __setscheduler_params(struct task_struct *p, p->policy = policy; - if (dl_policy(policy)) { + if (dl_policy(policy)) __setparam_dl(p, attr); - } else if (fair_policy(policy)) { - p->static_prio = NICE_TO_PRIO(attr->sched_nice); - if (attr->sched_runtime) { - p->se.custom_slice = 1; - p->se.slice = clamp_t(u64, attr->sched_runtime, - NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ - NSEC_PER_MSEC*100); /* HZ=100 / 10 */ - } else { - p->se.custom_slice = 0; - p->se.slice = sysctl_sched_base_slice; - } - } + else if (fair_policy(policy)) + __setparam_fair(p, attr); /* rt-policy tasks do not have a timerslack */ if (rt_or_dl_task_policy(p)) { @@ -378,7 +368,7 @@ static int uclamp_validate(struct task_struct *p, * blocking operation which obviously cannot be done while holding * scheduler locks. */ - static_branch_enable(&sched_uclamp_used); + sched_uclamp_enable(); return 0; } @@ -885,7 +875,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { struct sched_param lparam; - if (!param || pid < 0) + if (unlikely(!param || pid < 0)) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; @@ -994,7 +984,7 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, struct sched_attr attr; int retval; - if (!uattr || pid < 0 || flags) + if (unlikely(!uattr || pid < 0 || flags)) return -EINVAL; retval = sched_copy_attr(uattr, &attr); @@ -1059,7 +1049,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) struct task_struct *p; int retval; - if (!param || pid < 0) + if (unlikely(!param || pid < 0)) return -EINVAL; scoped_guard (rcu) { @@ -1095,8 +1085,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, struct task_struct *p; int retval; - if (!uattr || pid < 0 || usize > PAGE_SIZE || - usize < SCHED_ATTR_SIZE_VER0 || flags) + if (unlikely(!uattr || pid < 0 || usize > PAGE_SIZE || + usize < SCHED_ATTR_SIZE_VER0 || flags)) return -EINVAL; scoped_guard (rcu) { @@ -1140,6 +1130,13 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) return 0; /* + * The special/sugov task isn't part of regular bandwidth/admission + * control so let userspace change affinities. + */ + if (dl_entity_is_special(&p->dl)) + return 0; + + /* * Since bandwidth control happens on root_domain basis, * if admission test is enabled, we only admit -deadline * tasks allowed to run on all the CPUs in the task's @@ -1433,7 +1430,7 @@ int __sched yield_to(struct task_struct *p, bool preempt) struct rq *rq, *p_rq; int yielded = 0; - scoped_guard (irqsave) { + scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { rq = this_rq(); again: |