diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 12:35:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-17 12:35:15 -0700 |
commit | 7f2444d38f6bbfa12bc15e2533d8f9daa85ca02b (patch) | |
tree | 6506ec79036890edfd9797b001391a350b5ac10f /drivers/clocksource/hyperv_timer.c | |
parent | c5f12fdb8bd873aa3ffdb79512e6bdac92b257b0 (diff) | |
parent | 77b4b5420422fc037d00b8f3f0e89b2262e4ae29 (diff) | |
download | lwn-7f2444d38f6bbfa12bc15e2533d8f9daa85ca02b.tar.gz lwn-7f2444d38f6bbfa12bc15e2533d8f9daa85ca02b.zip |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core timer updates from Thomas Gleixner:
"Timers and timekeeping updates:
- A large overhaul of the posix CPU timer code which is a preparation
for moving the CPU timer expiry out into task work so it can be
properly accounted on the task/process.
An update to the bogus permission checks will come later during the
merge window as feedback was not complete before heading of for
travel.
- Switch the timerqueue code to use cached rbtrees and get rid of the
homebrewn caching of the leftmost node.
- Consolidate hrtimer_init() + hrtimer_init_sleeper() calls into a
single function
- Implement the separation of hrtimers to be forced to expire in hard
interrupt context even when PREEMPT_RT is enabled and mark the
affected timers accordingly.
- Implement a mechanism for hrtimers and the timer wheel to protect
RT against priority inversion and live lock issues when a (hr)timer
which should be canceled is currently executing the callback.
Instead of infinitely spinning, the task which tries to cancel the
timer blocks on a per cpu base expiry lock which is held and
released by the (hr)timer expiry code.
- Enable the Hyper-V TSC page based sched_clock for Hyper-V guests
resulting in faster access to timekeeping functions.
- Updates to various clocksource/clockevent drivers and their device
tree bindings.
- The usual small improvements all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (101 commits)
posix-cpu-timers: Fix permission check regression
posix-cpu-timers: Always clear head pointer on dequeue
hrtimer: Add a missing bracket and hide `migration_base' on !SMP
posix-cpu-timers: Make expiry_active check actually work correctly
posix-timers: Unbreak CONFIG_POSIX_TIMERS=n build
tick: Mark sched_timer to expire in hard interrupt context
hrtimer: Add kernel doc annotation for HRTIMER_MODE_HARD
x86/hyperv: Hide pv_ops access for CONFIG_PARAVIRT=n
posix-cpu-timers: Utilize timerqueue for storage
posix-cpu-timers: Move state tracking to struct posix_cputimers
posix-cpu-timers: Deduplicate rlimit handling
posix-cpu-timers: Remove pointless comparisons
posix-cpu-timers: Get rid of 64bit divisions
posix-cpu-timers: Consolidate timer expiry further
posix-cpu-timers: Get rid of zero checks
rlimit: Rewrite non-sensical RLIMIT_CPU comment
posix-cpu-timers: Respect INFINITY for hard RTTIME limit
posix-cpu-timers: Switch thread group sampling to array
posix-cpu-timers: Restructure expiry array
posix-cpu-timers: Remove cputime_expires
...
Diffstat (limited to 'drivers/clocksource/hyperv_timer.c')
-rw-r--r-- | drivers/clocksource/hyperv_timer.c | 45 |
1 files changed, 16 insertions, 29 deletions
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index ba2c79e6a0ee..2317d4e3daaf 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -22,6 +22,7 @@ #include <asm/mshyperv.h> static struct clock_event_device __percpu *hv_clock_event; +static u64 hv_sched_clock_offset __ro_after_init; /* * If false, we're using the old mechanism for stimer0 interrupts @@ -212,19 +213,17 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); struct clocksource *hyperv_cs; EXPORT_SYMBOL_GPL(hyperv_cs); -#ifdef CONFIG_HYPERV_TSCPAGE - -static struct ms_hyperv_tsc_page *tsc_pg; +static struct ms_hyperv_tsc_page tsc_pg __aligned(PAGE_SIZE); struct ms_hyperv_tsc_page *hv_get_tsc_page(void) { - return tsc_pg; + return &tsc_pg; } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_sched_clock_tsc(void) +static u64 notrace read_hv_clock_tsc(struct clocksource *arg) { - u64 current_tick = hv_read_tsc_page(tsc_pg); + u64 current_tick = hv_read_tsc_page(&tsc_pg); if (current_tick == U64_MAX) hv_get_time_ref_count(current_tick); @@ -232,9 +231,9 @@ static u64 notrace read_hv_sched_clock_tsc(void) return current_tick; } -static u64 read_hv_clock_tsc(struct clocksource *arg) +static u64 read_hv_sched_clock_tsc(void) { - return read_hv_sched_clock_tsc(); + return read_hv_clock_tsc(NULL) - hv_sched_clock_offset; } static struct clocksource hyperv_cs_tsc = { @@ -244,9 +243,8 @@ static struct clocksource hyperv_cs_tsc = { .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -#endif -static u64 notrace read_hv_sched_clock_msr(void) +static u64 notrace read_hv_clock_msr(struct clocksource *arg) { u64 current_tick; /* @@ -258,9 +256,9 @@ static u64 notrace read_hv_sched_clock_msr(void) return current_tick; } -static u64 read_hv_clock_msr(struct clocksource *arg) +static u64 read_hv_sched_clock_msr(void) { - return read_hv_sched_clock_msr(); + return read_hv_clock_msr(NULL) - hv_sched_clock_offset; } static struct clocksource hyperv_cs_msr = { @@ -271,7 +269,6 @@ static struct clocksource hyperv_cs_msr = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -#ifdef CONFIG_HYPERV_TSCPAGE static bool __init hv_init_tsc_clocksource(void) { u64 tsc_msr; @@ -280,12 +277,8 @@ static bool __init hv_init_tsc_clocksource(void) if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) return false; - tsc_pg = vmalloc(PAGE_SIZE); - if (!tsc_pg) - return false; - hyperv_cs = &hyperv_cs_tsc; - phys_addr = page_to_phys(vmalloc_to_page(tsc_pg)); + phys_addr = virt_to_phys(&tsc_pg); /* * The Hyper-V TLFS specifies to preserve the value of reserved @@ -302,17 +295,11 @@ static bool __init hv_init_tsc_clocksource(void) hv_set_clocksource_vdso(hyperv_cs_tsc); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); - /* sched_clock_register is needed on ARM64 but is a no-op on x86 */ - sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ); + hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_setup_sched_clock(read_hv_sched_clock_tsc); + return true; } -#else -static bool __init hv_init_tsc_clocksource(void) -{ - return false; -} -#endif - void __init hv_init_clocksource(void) { @@ -333,7 +320,7 @@ void __init hv_init_clocksource(void) hyperv_cs = &hyperv_cs_msr; clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); - /* sched_clock_register is needed on ARM64 but is a no-op on x86 */ - sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ); + hv_sched_clock_offset = hyperv_cs->read(hyperv_cs); + hv_setup_sched_clock(read_hv_sched_clock_msr); } EXPORT_SYMBOL_GPL(hv_init_clocksource); |