summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2020-10-21 19:36:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-25 14:51:49 -0700
commit33def8498fdde180023444b08e12b72a9efed41d (patch)
tree1efe1dda24a8c8865fbc7a538a749d30a3532d92 /kernel
parent986b9eacb25910865b50e5f298aa8e2df7642f1b (diff)
downloadlwn-33def8498fdde180023444b08e12b72a9efed41d.tar.gz
lwn-33def8498fdde180023444b08e12b72a9efed41d.zip
treewide: Convert macro and uses of __section(foo) to __section("foo")
Use a more generic form for __section that requires quotes to avoid complications with clang and gcc differences. Remove the quote operator # from compiler_attributes.h __section macro. Convert all unquoted __section(foo) uses to quoted __section("foo"). Also convert __attribute__((section("foo"))) uses to __section("foo") even if the __attribute__ has multiple list entry forms. Conversion done using the script at: https://lore.kernel.org/lkml/75393e5ddc272dc7403de74d645e6c6e0f4e70eb.camel@perches.com/2-convert_section.pl Signed-off-by: Joe Perches <joe@perches.com> Reviewed-by: Nick Desaulniers <ndesaulniers@gooogle.com> Reviewed-by: Miguel Ojeda <ojeda@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kallsyms.c4
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/stop_task.c2
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_export.c2
8 files changed, 9 insertions, 9 deletions
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 4fb15fa96734..fe9de067771c 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -40,10 +40,10 @@ extern const u8 kallsyms_names[] __weak;
* has one (eg: FRV).
*/
extern const unsigned int kallsyms_num_syms
-__attribute__((weak, section(".rodata")));
+__section(".rodata") __attribute__((weak));
extern const unsigned long kallsyms_relative_base
-__attribute__((weak, section(".rodata")));
+__section(".rodata") __attribute__((weak));
extern const char kallsyms_token_table[] __weak;
extern const u16 kallsyms_token_index[] __weak;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 6d93f4518734..f232305dcefe 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2504,7 +2504,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
}
const struct sched_class dl_sched_class
- __attribute__((section("__dl_sched_class"))) = {
+ __section("__dl_sched_class") = {
.enqueue_task = enqueue_task_dl,
.dequeue_task = dequeue_task_dl,
.yield_task = yield_task_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e17012be4d14..290f9e38378c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11159,7 +11159,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
* All the scheduling class methods:
*/
const struct sched_class fair_sched_class
- __attribute__((section("__fair_sched_class"))) = {
+ __section("__fair_sched_class") = {
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index f324dc36fc43..24d0ee26377d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -458,7 +458,7 @@ static void update_curr_idle(struct rq *rq)
* Simple, special scheduling class for the per-CPU idle tasks:
*/
const struct sched_class idle_sched_class
- __attribute__((section("__idle_sched_class"))) = {
+ __section("__idle_sched_class") = {
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f215eea6a966..49ec096a8aa1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2430,7 +2430,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
}
const struct sched_class rt_sched_class
- __attribute__((section("__rt_sched_class"))) = {
+ __section("__rt_sched_class") = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 394bc8126a1e..ceb5b6b12561 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -110,7 +110,7 @@ static void update_curr_stop(struct rq *rq)
* Simple, special scheduling class for the per-CPU stop tasks:
*/
const struct sched_class stop_sched_class
- __attribute__((section("__stop_sched_class"))) = {
+ __section("__stop_sched_class") = {
.enqueue_task = enqueue_task_stop,
.dequeue_task = dequeue_task_stop,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 34e0c4d5a6e7..f3f5e77123ad 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -99,7 +99,7 @@ enum trace_type {
/* Use this for memory failure errors */
#define MEM_FAIL(condition, fmt, ...) ({ \
- static bool __section(.data.once) __warned; \
+ static bool __section(".data.once") __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 70d3d0a09053..90f81d33fa3f 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -176,7 +176,7 @@ struct trace_event_call __used event_##call = { \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
+__section("_ftrace_events") *__event_##call = &event_##call;
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \