diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 4 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 20 | ||||
-rw-r--r-- | kernel/params.c | 2 | ||||
-rw-r--r-- | kernel/power/process.c | 2 | ||||
-rw-r--r-- | kernel/printk/printk_ringbuffer.c | 2 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 2 | ||||
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 18 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 | ||||
-rw-r--r-- | kernel/time/hrtimer.c | 5 | ||||
-rw-r--r-- | kernel/time/itimer.c | 4 | ||||
-rw-r--r-- | kernel/time/sched_clock.c | 4 | ||||
-rw-r--r-- | kernel/time/timer.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events_synth.c | 36 | ||||
-rw-r--r-- | kernel/tracepoint.c | 2 |
14 files changed, 49 insertions, 59 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index be68ac0d49ad..f8614ef4ff31 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1503,8 +1503,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - if (unlikely(should_fail_futex(true))) + if (unlikely(should_fail_futex(true))) { ret = -EFAULT; + goto out_unlock; + } ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (!ret && (curval != uval)) { diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 3e99dfef8408..b71ad8d9f1c9 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void) if (!debug_locks) return false; - if (raw_cpu_read(lockdep_recursion)) + if (this_cpu_read(lockdep_recursion)) return false; if (current->lockdep_recursion) @@ -4057,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(in_nmi())) return; - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (unlikely(lockdep_hardirqs_enabled())) { @@ -4126,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) goto skip_checks; } - if (unlikely(__this_cpu_read(lockdep_recursion))) + if (unlikely(this_cpu_read(lockdep_recursion))) return; if (lockdep_hardirqs_enabled()) { @@ -4396,6 +4396,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, if (unlikely(hlock_class(this)->usage_mask & new_mask)) goto unlock; + if (!hlock_class(this)->usage_mask) + debug_atomic_dec(nr_unused_locks); + hlock_class(this)->usage_mask |= new_mask; if (new_bit < LOCK_TRACE_STATES) { @@ -4403,19 +4406,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, return 0; } - switch (new_bit) { - case 0 ... LOCK_USED-1: + if (new_bit < LOCK_USED) { ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; - break; - - case LOCK_USED: - debug_atomic_dec(nr_unused_locks); - break; - - default: - break; } unlock: diff --git a/kernel/params.c b/kernel/params.c index 3835fb82c64b..164d79330849 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -530,7 +530,7 @@ struct module_param_attrs { unsigned int num; struct attribute_group grp; - struct param_attribute attrs[0]; + struct param_attribute attrs[]; }; #ifdef CONFIG_SYSFS diff --git a/kernel/power/process.c b/kernel/power/process.c index 4b6a54da7e65..45b054b7b5ec 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -146,7 +146,7 @@ int freeze_processes(void) BUG_ON(in_atomic()); /* - * Now that the whole userspace is frozen we need to disbale + * Now that the whole userspace is frozen we need to disable * the OOM killer to disallow any further interference with * killable tasks. There is no guarantee oom victims will * ever reach a point they go away we have to wait with a timeout. diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c index 24a960a89aa8..6b1525685277 100644 --- a/kernel/printk/printk_ringbuffer.c +++ b/kernel/printk/printk_ringbuffer.c @@ -345,7 +345,7 @@ DESC_ID((id) - DESCS_COUNT(desc_ring)) */ struct prb_data_block { unsigned long id; - char data[0]; + char data[]; }; /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 06895ef85d69..2a52f42f64b6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -409,7 +409,7 @@ bool rcu_eqs_special_set(int cpu) * * The caller must have disabled interrupts and must not be idle. */ -void rcu_momentary_dyntick_idle(void) +notrace void rcu_momentary_dyntick_idle(void) { int special; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index e254745a82cb..d73bccde2720 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -102,8 +102,12 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, unsigned int next_freq) { - if (sg_policy->next_freq == next_freq) - return false; + if (!sg_policy->need_freq_update) { + if (sg_policy->next_freq == next_freq) + return false; + } else { + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); + } sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; @@ -164,7 +168,6 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) return sg_policy->next_freq; - sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = freq; return cpufreq_driver_resolve_freq(policy, freq); } @@ -440,7 +443,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, struct sugov_policy *sg_policy = sg_cpu->sg_policy; unsigned long util, max; unsigned int next_f; - bool busy; unsigned int cached_freq = sg_policy->cached_raw_freq; sugov_iowait_boost(sg_cpu, time, flags); @@ -451,9 +453,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (!sugov_should_update_freq(sg_policy, time)) return; - /* Limits may have changed, don't skip frequency update */ - busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); - util = sugov_get_util(sg_cpu); max = sg_cpu->max; util = sugov_iowait_apply(sg_cpu, time, util, max); @@ -462,7 +461,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, * Do not reduce the frequency if the CPU has not been idle * recently, as the reduction is likely to be premature then. */ - if (busy && next_f < sg_policy->next_freq) { + if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { next_f = sg_policy->next_freq; /* Restore cached freq as next_freq has changed */ @@ -827,9 +826,10 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->next_freq = 0; sg_policy->work_in_progress = false; sg_policy->limits_changed = false; - sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = 0; + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); + for_each_cpu(cpu, policy->cpus) { struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 865bb0228ab6..890b79cf0e7c 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -178,7 +178,7 @@ static void ack_state(struct multi_stop_data *msdata) set_state(msdata, msdata->state + 1); } -void __weak stop_machine_yield(const struct cpumask *cpumask) +notrace void __weak stop_machine_yield(const struct cpumask *cpumask) { cpu_relax(); } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 3624b9b5835d..387b4bef7dd1 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -425,11 +425,6 @@ static inline void debug_hrtimer_deactivate(struct hrtimer *timer) debug_object_deactivate(timer, &hrtimer_debug_descr); } -static inline void debug_hrtimer_free(struct hrtimer *timer) -{ - debug_object_free(timer, &hrtimer_debug_descr); -} - static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode); diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index ca4e6d57d68b..00629e658ca1 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -172,10 +172,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, u64 oval, nval, ointerval, ninterval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; - /* - * Use the to_ktime conversion because that clamps the maximum - * value to KTIME_MAX and avoid multiplication overflows. - */ nval = timespec64_to_ns(&value->it_value); ninterval = timespec64_to_ns(&value->it_interval); diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 0642013dace4..b1b9b12899f5 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -68,13 +68,13 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) return (cyc * mult) >> shift; } -struct clock_read_data *sched_clock_read_begin(unsigned int *seq) +notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) { *seq = raw_read_seqcount_latch(&cd.seq); return cd.read_data + (*seq & 1); } -int sched_clock_read_retry(unsigned int seq) +notrace int sched_clock_read_retry(unsigned int seq) { return read_seqcount_latch_retry(&cd.seq, seq); } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index de37e33a868d..c3ad64fb9d8b 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -732,11 +732,6 @@ static inline void debug_timer_deactivate(struct timer_list *timer) debug_object_deactivate(timer, &timer_debug_descr); } -static inline void debug_timer_free(struct timer_list *timer) -{ - debug_object_free(timer, &timer_debug_descr); -} - static inline void debug_timer_assert_init(struct timer_list *timer) { debug_object_assert_init(timer, &timer_debug_descr); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 3212e2c653b3..84b7cab55291 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -585,6 +585,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, struct synth_field *field; const char *prefix = NULL, *field_type = argv[0], *field_name, *array; int len, ret = 0; + struct seq_buf s; ssize_t size; if (field_type[0] == ';') @@ -630,13 +631,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, field_type++; len = strlen(field_type) + 1; - if (array) { - int l = strlen(array); + if (array) + len += strlen(array); - if (l && array[l - 1] == ';') - l--; - len += l; - } if (prefix) len += strlen(prefix); @@ -645,14 +642,18 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, ret = -ENOMEM; goto free; } + seq_buf_init(&s, field->type, len); if (prefix) - strcat(field->type, prefix); - strcat(field->type, field_type); + seq_buf_puts(&s, prefix); + seq_buf_puts(&s, field_type); if (array) { - strcat(field->type, array); - if (field->type[len - 1] == ';') - field->type[len - 1] = '\0'; + seq_buf_puts(&s, array); + if (s.buffer[s.len - 1] == ';') + s.len--; } + if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) + goto free; + s.buffer[s.len] = '\0'; size = synth_field_size(field->type); if (size < 0) { @@ -663,14 +664,21 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, if (synth_field_is_string(field->type)) { char *type; - type = kzalloc(sizeof("__data_loc ") + strlen(field->type) + 1, GFP_KERNEL); + len = sizeof("__data_loc ") + strlen(field->type) + 1; + type = kzalloc(len, GFP_KERNEL); if (!type) { ret = -ENOMEM; goto free; } - strcat(type, "__data_loc "); - strcat(type, field->type); + seq_buf_init(&s, type, len); + seq_buf_puts(&s, "__data_loc "); + seq_buf_puts(&s, field->type); + + if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) + goto free; + s.buffer[s.len] = '\0'; + kfree(field->type); field->type = type; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 26efd22f0633..3f659f855074 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -50,7 +50,7 @@ static bool ok_to_free_tracepoints; */ struct tp_probes { struct rcu_head rcu; - struct tracepoint_func probes[0]; + struct tracepoint_func probes[]; }; static inline void *allocate_probes(int count) |