summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/clocksource/pxa.h18
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/hrtimer.h16
-rw-r--r--include/linux/iio/iio.h9
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/ktime.h228
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/of_address.h11
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/linux/sh_timer.h5
-rw-r--r--include/linux/time.h65
-rw-r--r--include/linux/time64.h190
-rw-r--r--include/linux/timekeeper_internal.h150
-rw-r--r--include/linux/timekeeping.h209
-rw-r--r--include/linux/timerfd.h5
16 files changed, 601 insertions, 348 deletions
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
new file mode 100644
index 0000000..1efbe5a
--- /dev/null
+++ b/include/clocksource/pxa.h
@@ -0,0 +1,18 @@
+/*
+ * PXA clocksource, clockevents, and OST interrupt handlers.
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#ifndef _CLOCKSOURCE_PXA_H
+#define _CLOCKSOURCE_PXA_H
+
+extern void pxa_timer_nodt_init(int irq, void __iomem *base,
+ unsigned long clock_tick_rate);
+
+#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a16b497..653f0e2 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
- * @cycle_last: most recent cycle counter value seen by ::read()
* @owner: module reference, must be set by clocksource in modules
*/
struct clocksource {
@@ -171,7 +170,6 @@ struct clocksource {
* clocksource itself is cacheline aligned.
*/
cycle_t (*read)(struct clocksource *cs);
- cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index e7a8d3f..a036d05 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -165,6 +165,7 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
+ * @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set: Indicates that clock was set from irq context.
* @expires_next: absolute time of the next event which was scheduled
@@ -179,6 +180,7 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
+ unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set;
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { }
#endif
extern void hrtimers_resume(void);
-extern ktime_t ktime_get(void);
-extern ktime_t ktime_get_real(void);
-extern ktime_t ktime_get_boottime(void);
-extern ktime_t ktime_get_monotonic_offset(void);
-extern ktime_t ktime_get_clocktai(void);
-extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
- ktime_t *offs_tai);
-
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
-#if BITS_PER_LONG < 64
-extern u64 ktime_divns(const ktime_t kt, s64 div);
-#else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
-#endif
-
/* Show pending timers: */
extern void sysrq_timer_list_show(void);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index ccde917..15dc6bc 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
**/
static inline s64 iio_get_time_ns(void)
{
- struct timespec ts;
- /*
- * calls getnstimeofday.
- * If hrtimers then up to ns accurate, if not microsecond.
- */
- ktime_get_real_ts(&ts);
-
- return timespec_to_ns(&ts);
+ return ktime_get_real_ns();
}
/* Device operating modes */
diff --git a/include/linux/io.h b/include/linux/io.h
index b76e6e5..d5fc9b8 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
}
#endif
+#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
unsigned long size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index de9e46e..c9d645a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -27,43 +27,19 @@
/*
* ktime_t:
*
- * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers
+ * A single 64-bit variable is used to store the hrtimers
* internal representation of time values in scalar nanoseconds. The
* design plays out best on 64-bit CPUs, where most conversions are
* NOPs and most arithmetic ktime_t operations are plain arithmetic
* operations.
*
- * On 32-bit CPUs an optimized representation of the timespec structure
- * is used to avoid expensive conversions from and to timespecs. The
- * endian-aware order of the tv struct members is chosen to allow
- * mathematical operations on the tv64 member of the union too, which
- * for certain operations produces better code.
- *
- * For architectures with efficient support for 64/32-bit conversions the
- * plain scalar nanosecond based representation can be selected by the
- * config switch CONFIG_KTIME_SCALAR.
*/
union ktime {
s64 tv64;
-#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
- struct {
-# ifdef __BIG_ENDIAN
- s32 sec, nsec;
-# else
- s32 nsec, sec;
-# endif
- } tv;
-#endif
};
typedef union ktime ktime_t; /* Kill this */
-/*
- * ktime_t definitions when using the 64-bit scalar representation:
- */
-
-#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
-
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
* @secs: seconds to set
@@ -71,13 +47,12 @@ typedef union ktime ktime_t; /* Kill this */
*
* Return: The ktime_t representation of the value.
*/
-static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
+static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
-#if (BITS_PER_LONG == 64)
if (unlikely(secs >= KTIME_SEC_MAX))
return (ktime_t){ .tv64 = KTIME_MAX };
-#endif
- return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
+
+ return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
}
/* Subtract two ktime_t variables. rem = lhs -rhs: */
@@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts)
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
+/* convert a timespec64 to ktime_t format: */
+static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
+{
+ return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+
/* convert a timeval to ktime_t format: */
static inline ktime_t timeval_to_ktime(struct timeval tv)
{
@@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
+/* Map the ktime_t to timespec conversion to ns_to_timespec function */
+#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
+
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
#define ktime_to_ns(kt) ((kt).tv64)
-#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
-
-/*
- * Helper macros/inlines to get the ktime_t math right in the timespec
- * representation. The macros are sometimes ugly - their actual use is
- * pretty okay-ish, given the circumstances. We do all this for
- * performance reasons. The pure scalar nsec_t based code was nice and
- * simple, but created too many 64-bit / 32-bit conversions and divisions.
- *
- * Be especially aware that negative values are represented in a way
- * that the tv.sec field is negative and the tv.nsec field is greater
- * or equal to zero but less than nanoseconds per second. This is the
- * same representation which is used by timespecs.
- *
- * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
- */
-
-/* Set a ktime_t variable to a value in sec/nsec representation: */
-static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
-{
- return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
-}
-
-/**
- * ktime_sub - subtract two ktime_t variables
- * @lhs: minuend
- * @rhs: subtrahend
- *
- * Return: The remainder of the subtraction.
- */
-static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
-{
- ktime_t res;
-
- res.tv64 = lhs.tv64 - rhs.tv64;
- if (res.tv.nsec < 0)
- res.tv.nsec += NSEC_PER_SEC;
-
- return res;
-}
-
-/**
- * ktime_add - add two ktime_t variables
- * @add1: addend1
- * @add2: addend2
- *
- * Return: The sum of @add1 and @add2.
- */
-static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
-{
- ktime_t res;
-
- res.tv64 = add1.tv64 + add2.tv64;
- /*
- * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
- * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
- *
- * it's equivalent to:
- * tv.nsec -= NSEC_PER_SEC
- * tv.sec ++;
- */
- if (res.tv.nsec >= NSEC_PER_SEC)
- res.tv64 += (u32)-NSEC_PER_SEC;
-
- return res;
-}
-
-/**
- * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
- * @kt: addend
- * @nsec: the scalar nsec value to add
- *
- * Return: The sum of @kt and @nsec in ktime_t format.
- */
-extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
-
-/**
- * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
- * @kt: minuend
- * @nsec: the scalar nsec value to subtract
- *
- * Return: The subtraction of @nsec from @kt in ktime_t format.
- */
-extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
-
-/**
- * timespec_to_ktime - convert a timespec to ktime_t format
- * @ts: the timespec variable to convert
- *
- * Return: A ktime_t variable with the converted timespec value.
- */
-static inline ktime_t timespec_to_ktime(const struct timespec ts)
-{
- return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
- .nsec = (s32)ts.tv_nsec } };
-}
-
-/**
- * timeval_to_ktime - convert a timeval to ktime_t format
- * @tv: the timeval variable to convert
- *
- * Return: A ktime_t variable with the converted timeval value.
- */
-static inline ktime_t timeval_to_ktime(const struct timeval tv)
-{
- return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
- .nsec = (s32)(tv.tv_usec *
- NSEC_PER_USEC) } };
-}
-
-/**
- * ktime_to_timespec - convert a ktime_t variable to timespec format
- * @kt: the ktime_t variable to convert
- *
- * Return: The timespec representation of the ktime value.
- */
-static inline struct timespec ktime_to_timespec(const ktime_t kt)
-{
- return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
- .tv_nsec = (long) kt.tv.nsec };
-}
-
-/**
- * ktime_to_timeval - convert a ktime_t variable to timeval format
- * @kt: the ktime_t variable to convert
- *
- * Return: The timeval representation of the ktime value.
- */
-static inline struct timeval ktime_to_timeval(const ktime_t kt)
-{
- return (struct timeval) {
- .tv_sec = (time_t) kt.tv.sec,
- .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
-}
-
-/**
- * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
- * @kt: the ktime_t variable to convert
- *
- * Return: The scalar nanoseconds representation of @kt.
- */
-static inline s64 ktime_to_ns(const ktime_t kt)
-{
- return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
-}
-
-#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
/**
* ktime_equal - Compares two ktime_t variables to see if they are equal
@@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
return ktime_compare(cmp1, cmp2) < 0;
}
+#if BITS_PER_LONG < 64
+extern u64 ktime_divns(const ktime_t kt, s64 div);
+#else /* BITS_PER_LONG < 64 */
+# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
+#endif
+
static inline s64 ktime_to_us(const ktime_t kt)
{
- struct timeval tv = ktime_to_timeval(kt);
- return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
+ return ktime_divns(kt, NSEC_PER_USEC);
}
static inline s64 ktime_to_ms(const ktime_t kt)
{
- struct timeval tv = ktime_to_timeval(kt);
- return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
+ return ktime_divns(kt, NSEC_PER_MSEC);
}
static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
@@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
}
}
+/**
+ * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
+ * format only if the variable contains data
+ * @kt: the ktime_t variable to convert
+ * @ts: the timespec variable to store the result in
+ *
+ * Return: %true if there was a successful conversion, %false if kt was 0.
+ */
+static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
+ struct timespec64 *ts)
+{
+ if (kt.tv64) {
+ *ts = ktime_to_timespec64(kt);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
#define LOW_RES_NSEC TICK_NSEC
#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
-/* Get the monotonic time in timespec format: */
-extern void ktime_get_ts(struct timespec *ts);
-
-/* Get the real (wall-) time in timespec format: */
-#define ktime_get_real_ts(ts) getnstimeofday(ts)
-
static inline ktime_t ns_to_ktime(u64 ns)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
@@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms)
return ktime_add_ms(ktime_zero, ms);
}
+# include <linux/timekeeping.h>
+
#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2bce4aa..52d631c 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -604,8 +604,8 @@ struct mlx5_cmd_work_ent {
int page_queue;
u8 status;
u8 token;
- struct timespec ts1;
- struct timespec ts2;
+ u64 ts1;
+ u64 ts2;
u16 op;
};
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index c13b878..fb7b722 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np)
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
void __iomem *of_iomap(struct device_node *node, int index);
+void __iomem *of_io_request_and_map(struct device_node *device,
+ int index, char *name);
#else
+
+#include <linux/io.h>
+
static inline int of_address_to_resource(struct device_node *dev, int index,
struct resource *r)
{
@@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
{
return NULL;
}
+
+static inline void __iomem *of_io_request_and_map(struct device_node *device,
+ int index, char *name)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
#endif
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42cac4d..66124d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -813,7 +813,7 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
+ u64 blkio_start; /* Shared by blkio, swapin */
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_delay; /* wait for swapin block io completion */
u32 blkio_count; /* total count of the number of sync block */
@@ -821,7 +821,7 @@ struct task_delay_info {
u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */
- struct timespec freepages_start, freepages_end;
+ u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
u32 freepages_count; /* total count of memory reclaim */
};
@@ -1364,8 +1364,8 @@ struct task_struct {
} vtime_snap_whence;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
- struct timespec start_time; /* monotonic time */
- struct timespec real_start_time; /* boot based time */
+ u64 start_time; /* monotonic time in nsec */
+ u64 real_start_time; /* boot based time in nsec */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 8cf3503..cc35963 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,6 +117,22 @@ repeat:
}
/**
+ * raw_read_seqcount - Read the raw seqcount
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount without any lockdep checking and without checking or
+ * masking the LSB. Calling code is responsible for handling that.
+ */
+static inline unsigned raw_read_seqcount(const seqcount_t *s)
+{
+ unsigned ret = ACCESS_ONCE(s->sequence);
+ smp_rmb();
+ return ret;
+}
+
+/**
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
@@ -218,6 +234,17 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
}
/*
+ * raw_write_seqcount_latch - redirect readers to even/odd copy
+ * @s: pointer to seqcount_t
+ */
+static inline void raw_write_seqcount_latch(seqcount_t *s)
+{
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
+}
+
+/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 8e1e036..64638b0 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -2,11 +2,6 @@
#define __SH_TIMER_H__
struct sh_timer_config {
- char *name;
- long channel_offset;
- int timer_bit;
- unsigned long clockevent_rating;
- unsigned long clocksource_rating;
unsigned int channels_mask;
};
diff --git a/include/linux/time.h b/include/linux/time.h
index d5d229b..8c42cf8 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -4,19 +4,10 @@
# include <linux/cache.h>
# include <linux/seqlock.h>
# include <linux/math64.h>
-#include <uapi/linux/time.h>
+# include <linux/time64.h>
extern struct timezone sys_tz;
-/* Parameters used to convert the timespec values: */
-#define MSEC_PER_SEC 1000L
-#define USEC_PER_MSEC 1000L
-#define NSEC_PER_USEC 1000L
-#define NSEC_PER_MSEC 1000000L
-#define USEC_PER_SEC 1000000L
-#define NSEC_PER_SEC 1000000000L
-#define FSEC_PER_SEC 1000000000000000LL
-
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
static inline int timespec_equal(const struct timespec *a,
@@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
return ts_delta;
}
-#define KTIME_MAX ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX LONG_MAX
-#endif
-
/*
* Returns true if the timespec is norm, false if denorm:
*/
@@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
return true;
}
-extern bool persistent_clock_exist;
-
-static inline bool has_persistent_clock(void)
-{
- return persistent_clock_exist;
-}
-
-extern void read_persistent_clock(struct timespec *ts);
-extern void read_boot_clock(struct timespec *ts);
-extern int persistent_clock_is_local;
-extern int update_persistent_clock(struct timespec now);
-void timekeeping_init(void);
-extern int timekeeping_suspended;
-
-unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
-struct timespec __current_kernel_time(void); /* does not take xtime_lock */
-struct timespec get_monotonic_coarse(void);
-void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
- struct timespec *wtom, struct timespec *sleep);
-void timekeeping_inject_sleeptime(struct timespec *delta);
+extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta);
extern u32 (*arch_gettimeoffset)(void);
#endif
-extern void do_gettimeofday(struct timeval *tv);
-extern int do_settimeofday(const struct timespec *tv);
-extern int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz);
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct itimerval;
extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
-extern unsigned int alarm_setitimer(unsigned int seconds);
extern int do_getitimer(int which, struct itimerval *value);
-extern int __getnstimeofday(struct timespec *tv);
-extern void getnstimeofday(struct timespec *tv);
-extern void getrawmonotonic(struct timespec *ts);
-extern void getnstime_raw_and_real(struct timespec *ts_raw,
- struct timespec *ts_real);
-extern void getboottime(struct timespec *ts);
-extern void monotonic_to_bootbased(struct timespec *ts);
-extern void get_monotonic_boottime(struct timespec *ts);
-extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
-extern int timekeeping_valid_for_hres(void);
-extern u64 timekeeping_max_deferment(void);
-extern int timekeeping_inject_offset(struct timespec *ts);
-extern s32 timekeeping_get_tai_offset(void);
-extern void timekeeping_set_tai_offset(s32 tai_offset);
-extern void timekeeping_clocktai(struct timespec *ts);
+extern unsigned int alarm_setitimer(unsigned int seconds);
+
+extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct tms;
extern void do_sys_times(struct tms *);
diff --git a/include/linux/time64.h b/include/linux/time64.h
new file mode 100644
index 0000000..a383147
--- /dev/null
+++ b/include/linux/time64.h
@@ -0,0 +1,190 @@
+#ifndef _LINUX_TIME64_H
+#define _LINUX_TIME64_H
+
+#include <uapi/linux/time.h>
+
+typedef __s64 time64_t;
+
+/*
+ * This wants to go into uapi/linux/time.h once we agreed about the
+ * userspace interfaces.
+ */
+#if __BITS_PER_LONG == 64
+# define timespec64 timespec
+#else
+struct timespec64 {
+ time64_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif
+
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_MSEC 1000L
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define FSEC_PER_SEC 1000000000000000LL
+
+/* Located here for timespec[64]_valid_strict */
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+
+#if __BITS_PER_LONG == 64
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ return ts64;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+ return ts;
+}
+
+# define timespec64_equal timespec_equal
+# define timespec64_compare timespec_compare
+# define set_normalized_timespec64 set_normalized_timespec
+# define timespec64_add_safe timespec_add_safe
+# define timespec64_add timespec_add
+# define timespec64_sub timespec_sub
+# define timespec64_valid timespec_valid
+# define timespec64_valid_strict timespec_valid_strict
+# define timespec64_to_ns timespec_to_ns
+# define ns_to_timespec64 ns_to_timespec
+# define timespec64_add_ns timespec_add_ns
+
+#else
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ struct timespec ret;
+
+ ret.tv_sec = (time_t)ts64.tv_sec;
+ ret.tv_nsec = ts64.tv_nsec;
+ return ret;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+ struct timespec64 ret;
+
+ ret.tv_sec = ts.tv_sec;
+ ret.tv_nsec = ts.tv_nsec;
+ return ret;
+}
+
+static inline int timespec64_equal(const struct timespec64 *a,
+ const struct timespec64 *b)
+{
+ return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+}
+
+/*
+ * lhs < rhs: return <0
+ * lhs == rhs: return 0
+ * lhs > rhs: return >0
+ */
+static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
+
+/*
+ * timespec64_add_safe assumes both values are positive and checks for
+ * overflow. It will return TIME_T_MAX if the returned value would be
+ * smaller then either of the arguments.
+ */
+extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
+ const struct timespec64 rhs);
+
+
+static inline struct timespec64 timespec64_add(struct timespec64 lhs,
+ struct timespec64 rhs)
+{
+ struct timespec64 ts_delta;
+ set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
+ lhs.tv_nsec + rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * sub = lhs - rhs, in normalized form
+ */
+static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
+ struct timespec64 rhs)
+{
+ struct timespec64 ts_delta;
+ set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+ lhs.tv_nsec - rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * Returns true if the timespec64 is norm, false if denorm:
+ */
+static inline bool timespec64_valid(const struct timespec64 *ts)
+{
+ /* Dates before 1970 are bogus */
+ if (ts->tv_sec < 0)
+ return false;
+ /* Can't have more nanoseconds then a second */
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec64_valid_strict(const struct timespec64 *ts)
+{
+ if (!timespec64_valid(ts))
+ return false;
+ /* Disallow values that could overflow ktime_t */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return false;
+ return true;
+}
+
+/**
+ * timespec64_to_ns - Convert timespec64 to nanoseconds
+ * @ts: pointer to the timespec64 variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timespec64
+ * parameter.
+ */
+static inline s64 timespec64_to_ns(const struct timespec64 *ts)
+{
+ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+/**
+ * ns_to_timespec64 - Convert nanoseconds to timespec64
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timespec64 representation of the nsec parameter.
+ */
+extern struct timespec64 ns_to_timespec64(const s64 nsec);
+
+/**
+ * timespec64_add_ns - Adds nanoseconds to a timespec64
+ * @a: pointer to timespec64 to be incremented
+ * @ns: unsigned nanoseconds value to be added
+ *
+ * This must always be inlined because its used from the x86-64 vdso,
+ * which cannot call other kernel functions.
+ */
+static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
+{
+ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
+ a->tv_nsec = ns;
+}
+
+#endif
+
+#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index c1825eb..95640dc 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -10,77 +10,100 @@
#include <linux/jiffies.h>
#include <linux/time.h>
-/* Structure holding internal timekeeping values. */
-struct timekeeper {
- /* Current clocksource used for timekeeping. */
+/**
+ * struct tk_read_base - base structure for timekeeping readout
+ * @clock: Current clocksource used for timekeeping.
+ * @read: Read function of @clock
+ * @mask: Bitmask for two's complement subtraction of non 64bit clocks
+ * @cycle_last: @clock cycle value at last update
+ * @mult: NTP adjusted multiplier for scaled math conversion
+ * @shift: Shift value for scaled math conversion
+ * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
+ * @base_mono: ktime_t (nanoseconds) base time for readout
+ *
+ * This struct has size 56 byte on 64 bit. Together with a seqcount it
+ * occupies a single 64byte cache line.
+ *
+ * The struct is separate from struct timekeeper as it is also used
+ * for a fast NMI safe accessor to clock monotonic.
+ */
+struct tk_read_base {
struct clocksource *clock;
- /* NTP adjusted clock multiplier */
+ cycle_t (*read)(struct clocksource *cs);
+ cycle_t mask;
+ cycle_t cycle_last;
u32 mult;
- /* The shift value of the current clocksource. */
u32 shift;
- /* Number of clock cycles in one NTP interval. */
+ u64 xtime_nsec;
+ ktime_t base_mono;
+};
+
+/**
+ * struct timekeeper - Structure holding internal timekeeping values.
+ * @tkr: The readout base structure
+ * @xtime_sec: Current CLOCK_REALTIME time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real: Offset clock monotonic -> clock realtime
+ * @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_tai: Offset clock monotonic -> clock tai
+ * @tai_offset: The current UTC to TAI offset in seconds
+ * @base_raw: Monotonic raw base time in ktime_t format
+ * @raw_time: Monotonic raw base time in timespec64 format
+ * @cycle_interval: Number of clock cycles in one NTP interval
+ * @xtime_interval: Number of clock shifted nano seconds in one NTP
+ * interval.
+ * @xtime_remainder: Shifted nano seconds left over when rounding
+ * @cycle_interval
+ * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @ntp_error: Difference between accumulated time and NTP time in ntp
+ * shifted nano seconds.
+ * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
+ * ntp shifted nano seconds.
+ *
+ * Note: For timespec(64) based interfaces wall_to_monotonic is what
+ * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * to get to monotonic time. Monotonic is pegged at zero at system
+ * boot time, so wall_to_monotonic will be negative, however, we will
+ * ALWAYS keep the tv_nsec part positive so we can use the usual
+ * normalization.
+ *
+ * wall_to_monotonic is moved after resume from suspend for the
+ * monotonic time not to jump. We need to add total_sleep_time to
+ * wall_to_monotonic to get the real boot based time offset.
+ *
+ * wall_to_monotonic is no longer the boot time, getboottime must be
+ * used instead.
+ */
+struct timekeeper {
+ struct tk_read_base tkr;
+ u64 xtime_sec;
+ struct timespec64 wall_to_monotonic;
+ ktime_t offs_real;
+ ktime_t offs_boot;
+ ktime_t offs_tai;
+ s32 tai_offset;
+ ktime_t base_raw;
+ struct timespec64 raw_time;
+
+ /* The following members are for timekeeping internal use */
cycle_t cycle_interval;
- /* Last cycle value (also stored in clock->cycle_last) */
- cycle_t cycle_last;
- /* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
- /* shifted nano seconds left over when rounding cycle_interval */
s64 xtime_remainder;
- /* Raw nano seconds accumulated per NTP interval. */
u32 raw_interval;
-
- /* Current CLOCK_REALTIME time in seconds */
- u64 xtime_sec;
- /* Clock shifted nano seconds */
- u64 xtime_nsec;
-
+ /* The ntp_tick_length() value currently being used.
+ * This cached copy ensures we consistently apply the tick
+ * length for an entire tick, as ntp_tick_length may change
+ * mid-tick, and we don't want to apply that new value to
+ * the tick in progress.
+ */
+ u64 ntp_tick;
/* Difference between accumulated time and NTP time in ntp
* shifted nano seconds. */
s64 ntp_error;
- /* Shift conversion between clock shifted nano seconds and
- * ntp shifted nano seconds. */
u32 ntp_error_shift;
-
- /*
- * wall_to_monotonic is what we need to add to xtime (or xtime corrected
- * for sub jiffie times) to get to monotonic time. Monotonic is pegged
- * at zero at system boot time, so wall_to_monotonic will be negative,
- * however, we will ALWAYS keep the tv_nsec part positive so we can use
- * the usual normalization.
- *
- * wall_to_monotonic is moved after resume from suspend for the
- * monotonic time not to jump. We need to add total_sleep_time to
- * wall_to_monotonic to get the real boot based time offset.
- *
- * - wall_to_monotonic is no longer the boot time, getboottime must be
- * used instead.
- */
- struct timespec wall_to_monotonic;
- /* Offset clock monotonic -> clock realtime */
- ktime_t offs_real;
- /* time spent in suspend */
- struct timespec total_sleep_time;
- /* Offset clock monotonic -> clock boottime */
- ktime_t offs_boot;
- /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
- struct timespec raw_time;
- /* The current UTC to TAI offset in seconds */
- s32 tai_offset;
- /* Offset clock monotonic -> clock tai */
- ktime_t offs_tai;
-
+ u32 ntp_err_mult;
};
-static inline struct timespec tk_xtime(struct timekeeper *tk)
-{
- struct timespec ts;
-
- ts.tv_sec = tk->xtime_sec;
- ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
- return ts;
-}
-
-
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timekeeper *tk);
@@ -89,17 +112,10 @@ extern void update_vsyscall_tz(void);
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
- struct clocksource *c, u32 mult);
+ struct clocksource *c, u32 mult,
+ cycle_t cycle_last);
extern void update_vsyscall_tz(void);
-static inline void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec xt;
-
- xt = tk_xtime(tk);
- update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
-}
-
#else
static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
new file mode 100644
index 0000000..1caa6b0
--- /dev/null
+++ b/include/linux/timekeeping.h
@@ -0,0 +1,209 @@
+#ifndef _LINUX_TIMEKEEPING_H
+#define _LINUX_TIMEKEEPING_H
+
+/* Included from linux/ktime.h */
+
+void timekeeping_init(void);
+extern int timekeeping_suspended;
+
+/*
+ * Get and set timeofday
+ */
+extern void do_gettimeofday(struct timeval *tv);
+extern int do_settimeofday(const struct timespec *tv);
+extern int do_sys_settimeofday(const struct timespec *tv,
+ const struct timezone *tz);
+
+/*
+ * Kernel time accessors
+ */
+unsigned long get_seconds(void);
+struct timespec current_kernel_time(void);
+/* does not take xtime_lock */
+struct timespec __current_kernel_time(void);
+
+/*
+ * timespec based interfaces
+ */
+struct timespec get_monotonic_coarse(void);
+extern void getrawmonotonic(struct timespec *ts);
+extern void ktime_get_ts64(struct timespec64 *ts);
+
+extern int __getnstimeofday64(struct timespec64 *tv);
+extern void getnstimeofday64(struct timespec64 *tv);
+
+#if BITS_PER_LONG == 64
+static inline int __getnstimeofday(struct timespec *ts)
+{
+ return __getnstimeofday64(ts);
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+ getnstimeofday64(ts);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+ ktime_get_ts64(ts);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+ getnstimeofday64(ts);
+}
+
+#else
+static inline int __getnstimeofday(struct timespec *ts)
+{
+ struct timespec64 ts64;
+ int ret = __getnstimeofday64(&ts64);
+
+ *ts = timespec64_to_timespec(ts64);
+ return ret;
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getnstimeofday64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ ktime_get_ts64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getnstimeofday64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+#endif
+
+extern void getboottime(struct timespec *ts);
+
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
+#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
+
+/*
+ * ktime_t based interfaces
+ */
+
+enum tk_offsets {
+ TK_OFFS_REAL,
+ TK_OFFS_BOOT,
+ TK_OFFS_TAI,
+ TK_OFFS_MAX,
+};
+
+extern ktime_t ktime_get(void);
+extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
+extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
+extern ktime_t ktime_get_raw(void);
+
+/**
+ * ktime_get_real - get the real (wall-) time in ktime_t format
+ */
+static inline ktime_t ktime_get_real(void)
+{
+ return ktime_get_with_offset(TK_OFFS_REAL);
+}
+
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
+ * time spent in suspend.
+ */
+static inline ktime_t ktime_get_boottime(void)
+{
+ return ktime_get_with_offset(TK_OFFS_BOOT);
+}
+
+/**
+ * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
+ */
+static inline ktime_t ktime_get_clocktai(void)
+{
+ return ktime_get_with_offset(TK_OFFS_TAI);
+}
+
+/**
+ * ktime_mono_to_real - Convert monotonic time to clock realtime
+ */
+static inline ktime_t ktime_mono_to_real(ktime_t mono)
+{
+ return ktime_mono_to_any(mono, TK_OFFS_REAL);
+}
+
+static inline u64 ktime_get_ns(void)
+{
+ return ktime_to_ns(ktime_get());
+}
+
+static inline u64 ktime_get_real_ns(void)
+{
+ return ktime_to_ns(ktime_get_real());
+}
+
+static inline u64 ktime_get_boot_ns(void)
+{
+ return ktime_to_ns(ktime_get_boottime());
+}
+
+static inline u64 ktime_get_raw_ns(void)
+{
+ return ktime_to_ns(ktime_get_raw());
+}
+
+extern u64 ktime_get_mono_fast_ns(void);
+
+/*
+ * Timespec interfaces utilizing the ktime based ones
+ */
+static inline void get_monotonic_boottime(struct timespec *ts)
+{
+ *ts = ktime_to_timespec(ktime_get_boottime());
+}
+
+static inline void timekeeping_clocktai(struct timespec *ts)
+{
+ *ts = ktime_to_timespec(ktime_get_clocktai());
+}
+
+/*
+ * RTC specific
+ */
+extern void timekeeping_inject_sleeptime(struct timespec *delta);
+
+/*
+ * PPS accessor
+ */
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+ struct timespec *ts_real);
+
+/*
+ * Persistent clock related interfaces
+ */
+extern bool persistent_clock_exist;
+extern int persistent_clock_is_local;
+
+static inline bool has_persistent_clock(void)
+{
+ return persistent_clock_exist;
+}
+
+extern void read_persistent_clock(struct timespec *ts);
+extern void read_boot_clock(struct timespec *ts);
+extern int update_persistent_clock(struct timespec now);
+
+
+#endif
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index d3b57fa..bd36ce4 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -11,6 +11,9 @@
/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
* new flags, since they might collide with O_* ones. We want
@@ -29,4 +32,6 @@
/* Flags for timerfd_settime. */
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
+#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
+
#endif /* _LINUX_TIMERFD_H */
OpenPOWER on IntegriCloud