2007-02-16 12:28:00 +03:00
|
|
|
/* linux/include/linux/clockchips.h
|
|
|
|
*
|
|
|
|
* This file contains the structure definitions for clockchips.
|
|
|
|
*
|
|
|
|
* If you are not a clockchip, or the time of day code, you should
|
|
|
|
* not be including this file!
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_CLOCKCHIPS_H
|
|
|
|
#define _LINUX_CLOCKCHIPS_H
|
|
|
|
|
2015-03-25 15:05:19 +03:00
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
# include <linux/clocksource.h>
|
|
|
|
# include <linux/cpumask.h>
|
|
|
|
# include <linux/ktime.h>
|
|
|
|
# include <linux/notifier.h>
|
2007-02-16 12:28:00 +03:00
|
|
|
|
|
|
|
struct clock_event_device;
|
2013-04-26 00:31:49 +04:00
|
|
|
struct module;
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2015-02-27 14:51:33 +03:00
|
|
|
/*
|
|
|
|
* Possible states of a clock event device.
|
|
|
|
*
|
|
|
|
* DETACHED: Device is not used by clockevents core. Initial state or can be
|
|
|
|
* reached from SHUTDOWN.
|
|
|
|
* SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT.
|
|
|
|
* PERIODIC: Device is programmed to generate events periodically. Can be
|
|
|
|
* reached from DETACHED or SHUTDOWN.
|
|
|
|
* ONESHOT: Device is programmed to generate event only once. Can be reached
|
|
|
|
* from DETACHED or SHUTDOWN.
|
2015-04-03 06:34:04 +03:00
|
|
|
* ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
|
|
|
|
* stopped.
|
2015-02-27 14:51:33 +03:00
|
|
|
*/
|
|
|
|
enum clock_event_state {
|
2015-04-02 12:26:23 +03:00
|
|
|
CLOCK_EVT_STATE_DETACHED,
|
2015-02-27 14:51:33 +03:00
|
|
|
CLOCK_EVT_STATE_SHUTDOWN,
|
|
|
|
CLOCK_EVT_STATE_PERIODIC,
|
|
|
|
CLOCK_EVT_STATE_ONESHOT,
|
2015-04-03 06:34:04 +03:00
|
|
|
CLOCK_EVT_STATE_ONESHOT_STOPPED,
|
2007-02-16 12:28:00 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clock event features
|
|
|
|
*/
|
2015-04-02 12:26:23 +03:00
|
|
|
# define CLOCK_EVT_FEAT_PERIODIC 0x000001
|
|
|
|
# define CLOCK_EVT_FEAT_ONESHOT 0x000002
|
|
|
|
# define CLOCK_EVT_FEAT_KTIME 0x000004
|
|
|
|
|
2007-02-16 12:28:00 +03:00
|
|
|
/*
|
2015-04-02 12:26:23 +03:00
|
|
|
* x86(64) specific (mis)features:
|
2007-02-16 12:28:00 +03:00
|
|
|
*
|
|
|
|
* - Clockevent source stops in C3 State and needs broadcast support.
|
|
|
|
* - Local APIC timer is used as a dummy device.
|
|
|
|
*/
|
2015-04-02 12:26:23 +03:00
|
|
|
# define CLOCK_EVT_FEAT_C3STOP 0x000008
|
|
|
|
# define CLOCK_EVT_FEAT_DUMMY 0x000010
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2013-03-02 14:10:11 +04:00
|
|
|
/*
|
|
|
|
* Core shall set the interrupt affinity dynamically in broadcast mode
|
|
|
|
*/
|
2015-04-02 12:26:23 +03:00
|
|
|
# define CLOCK_EVT_FEAT_DYNIRQ 0x000020
|
|
|
|
# define CLOCK_EVT_FEAT_PERCPU 0x000040
|
2013-03-02 14:10:11 +04:00
|
|
|
|
2014-02-07 12:06:32 +04:00
|
|
|
/*
|
|
|
|
* Clockevent device is based on a hrtimer for broadcast
|
|
|
|
*/
|
2015-04-02 12:26:23 +03:00
|
|
|
# define CLOCK_EVT_FEAT_HRTIMER 0x000080
|
2014-02-07 12:06:32 +04:00
|
|
|
|
2007-02-16 12:28:00 +03:00
|
|
|
/**
|
|
|
|
* struct clock_event_device - clock event device descriptor
|
2011-05-19 01:33:41 +04:00
|
|
|
* @event_handler: Assigned by the framework to be called by the low
|
|
|
|
* level handler of the event source
|
2011-08-23 17:29:43 +04:00
|
|
|
* @set_next_event: set next event function using a clocksource delta
|
|
|
|
* @set_next_ktime: set next event function using a direct ktime value
|
2011-05-19 01:33:41 +04:00
|
|
|
* @next_event: local storage for the next event in oneshot mode
|
2007-02-16 12:28:00 +03:00
|
|
|
* @max_delta_ns: maximum delta value in ns
|
|
|
|
* @min_delta_ns: minimum delta value in ns
|
|
|
|
* @mult: nanosecond to cycles multiplier
|
|
|
|
* @shift: nanoseconds to cycles divisor (power of two)
|
2015-06-02 15:30:11 +03:00
|
|
|
* @state_use_accessors:current state of the device, assigned by the core code
|
2011-05-19 01:33:41 +04:00
|
|
|
* @features: features
|
|
|
|
* @retries: number of forced programming retries
|
2015-09-11 07:04:26 +03:00
|
|
|
* @set_state_periodic: switch state to periodic
|
|
|
|
* @set_state_oneshot: switch state to oneshot
|
|
|
|
* @set_state_oneshot_stopped: switch state to oneshot_stopped
|
|
|
|
* @set_state_shutdown: switch state to shutdown
|
|
|
|
* @tick_resume: resume clkevt device
|
2011-05-19 01:33:41 +04:00
|
|
|
* @broadcast: function to broadcast events
|
2011-05-19 01:33:41 +04:00
|
|
|
* @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
|
|
|
|
* @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
|
2011-05-19 01:33:41 +04:00
|
|
|
* @name: ptr to clock event name
|
2007-02-16 12:28:00 +03:00
|
|
|
* @rating: variable to rate clock event devices
|
2007-05-08 11:31:55 +04:00
|
|
|
* @irq: IRQ number (only for non CPU local devices)
|
2014-02-07 12:06:32 +04:00
|
|
|
* @bound_on: Bound on CPU
|
2007-05-08 11:31:55 +04:00
|
|
|
* @cpumask: cpumask to indicate for which CPUs this device works
|
2007-02-16 12:28:00 +03:00
|
|
|
* @list: list head for the management code
|
2013-04-26 00:31:49 +04:00
|
|
|
* @owner: module reference
|
2007-02-16 12:28:00 +03:00
|
|
|
*/
|
|
|
|
struct clock_event_device {
|
2011-05-19 01:33:41 +04:00
|
|
|
void (*event_handler)(struct clock_event_device *);
|
2015-04-02 12:26:23 +03:00
|
|
|
int (*set_next_event)(unsigned long evt, struct clock_event_device *);
|
|
|
|
int (*set_next_ktime)(ktime_t expires, struct clock_event_device *);
|
2011-05-19 01:33:41 +04:00
|
|
|
ktime_t next_event;
|
2009-08-18 21:45:11 +04:00
|
|
|
u64 max_delta_ns;
|
|
|
|
u64 min_delta_ns;
|
2009-11-11 17:05:25 +03:00
|
|
|
u32 mult;
|
|
|
|
u32 shift;
|
2015-06-02 15:30:11 +03:00
|
|
|
enum clock_event_state state_use_accessors;
|
2011-05-19 01:33:41 +04:00
|
|
|
unsigned int features;
|
|
|
|
unsigned long retries;
|
|
|
|
|
2015-02-27 14:51:33 +03:00
|
|
|
int (*set_state_periodic)(struct clock_event_device *);
|
|
|
|
int (*set_state_oneshot)(struct clock_event_device *);
|
2015-04-03 06:34:04 +03:00
|
|
|
int (*set_state_oneshot_stopped)(struct clock_event_device *);
|
2015-02-27 14:51:33 +03:00
|
|
|
int (*set_state_shutdown)(struct clock_event_device *);
|
2015-02-27 14:51:32 +03:00
|
|
|
int (*tick_resume)(struct clock_event_device *);
|
2015-02-13 03:54:56 +03:00
|
|
|
|
|
|
|
void (*broadcast)(const struct cpumask *mask);
|
2012-08-06 03:40:41 +04:00
|
|
|
void (*suspend)(struct clock_event_device *);
|
|
|
|
void (*resume)(struct clock_event_device *);
|
2011-05-19 01:33:41 +04:00
|
|
|
unsigned long min_delta_ticks;
|
|
|
|
unsigned long max_delta_ticks;
|
|
|
|
|
2011-05-19 01:33:41 +04:00
|
|
|
const char *name;
|
2007-02-16 12:28:00 +03:00
|
|
|
int rating;
|
|
|
|
int irq;
|
2014-02-07 12:06:32 +04:00
|
|
|
int bound_on;
|
2008-12-13 13:50:26 +03:00
|
|
|
const struct cpumask *cpumask;
|
2007-02-16 12:28:00 +03:00
|
|
|
struct list_head list;
|
2013-04-26 00:31:49 +04:00
|
|
|
struct module *owner;
|
2011-05-19 01:33:41 +04:00
|
|
|
} ____cacheline_aligned;
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2015-05-21 11:03:45 +03:00
|
|
|
/* Helpers to verify state of a clockevent device */
|
|
|
|
static inline bool clockevent_state_detached(struct clock_event_device *dev)
|
|
|
|
{
|
2015-06-02 15:30:11 +03:00
|
|
|
return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
|
2015-05-21 11:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
|
|
|
|
{
|
2015-06-02 15:30:11 +03:00
|
|
|
return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
|
2015-05-21 11:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool clockevent_state_periodic(struct clock_event_device *dev)
|
|
|
|
{
|
2015-06-02 15:30:11 +03:00
|
|
|
return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
|
2015-05-21 11:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
|
|
|
|
{
|
2015-06-02 15:30:11 +03:00
|
|
|
return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
|
2015-05-21 11:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
|
|
|
|
{
|
2015-06-02 15:30:11 +03:00
|
|
|
return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
|
2015-05-21 11:03:45 +03:00
|
|
|
}
|
|
|
|
|
2007-02-16 12:28:00 +03:00
|
|
|
/*
|
|
|
|
* Calculate a multiplication factor for scaled math, which is used to convert
|
|
|
|
* nanoseconds based values to clock ticks:
|
|
|
|
*
|
|
|
|
* clock_ticks = (nanoseconds * factor) >> shift.
|
|
|
|
*
|
|
|
|
* div_sc is the rearranged equation to calculate a factor from a given clock
|
|
|
|
* ticks / nanoseconds ratio:
|
|
|
|
*
|
|
|
|
* factor = (clock_ticks << shift) / nanoseconds
|
|
|
|
*/
|
2015-04-02 12:26:23 +03:00
|
|
|
static inline unsigned long
|
|
|
|
div_sc(unsigned long ticks, unsigned long nsec, int shift)
|
2007-02-16 12:28:00 +03:00
|
|
|
{
|
2015-04-02 12:26:23 +03:00
|
|
|
u64 tmp = ((u64)ticks) << shift;
|
2007-02-16 12:28:00 +03:00
|
|
|
|
|
|
|
do_div(tmp, nsec);
|
2015-04-02 12:26:23 +03:00
|
|
|
|
2007-02-16 12:28:00 +03:00
|
|
|
return (unsigned long) tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clock event layer functions */
|
2015-04-02 12:26:23 +03:00
|
|
|
extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt);
|
2007-02-16 12:28:00 +03:00
|
|
|
extern void clockevents_register_device(struct clock_event_device *dev);
|
2013-04-26 00:31:50 +04:00
|
|
|
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2012-05-09 18:39:34 +04:00
|
|
|
extern void clockevents_config(struct clock_event_device *dev, u32 freq);
|
2011-05-19 01:33:41 +04:00
|
|
|
extern void clockevents_config_and_register(struct clock_event_device *dev,
|
|
|
|
u32 freq, unsigned long min_delta,
|
|
|
|
unsigned long max_delta);
|
|
|
|
|
2011-05-19 01:33:42 +04:00
|
|
|
extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
|
|
|
|
|
2009-11-11 17:05:29 +03:00
|
|
|
static inline void
|
|
|
|
clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
|
|
|
|
{
|
2015-04-02 12:26:23 +03:00
|
|
|
return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
|
2009-11-11 17:05:29 +03:00
|
|
|
}
|
|
|
|
|
2012-08-06 03:40:41 +04:00
|
|
|
extern void clockevents_suspend(void);
|
|
|
|
extern void clockevents_resume(void);
|
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
|
|
# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
|
2013-01-14 21:05:22 +04:00
|
|
|
extern void tick_broadcast(const struct cpumask *mask);
|
2015-04-02 12:26:23 +03:00
|
|
|
# else
|
|
|
|
# define tick_broadcast NULL
|
|
|
|
# endif
|
2013-01-14 21:05:21 +04:00
|
|
|
extern int tick_receive_broadcast(void);
|
2015-04-02 12:26:23 +03:00
|
|
|
# endif
|
2013-01-14 21:05:21 +04:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
2014-02-07 12:06:32 +04:00
|
|
|
extern void tick_setup_hrtimer_broadcast(void);
|
2013-03-06 15:18:36 +04:00
|
|
|
extern int tick_check_broadcast_expired(void);
|
2015-04-02 12:26:23 +03:00
|
|
|
# else
|
2013-03-06 15:18:36 +04:00
|
|
|
static inline int tick_check_broadcast_expired(void) { return 0; }
|
2015-04-02 12:26:23 +03:00
|
|
|
static inline void tick_setup_hrtimer_broadcast(void) { }
|
|
|
|
# endif
|
2013-03-06 15:18:36 +04:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
|
2012-08-06 03:40:41 +04:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
static inline void clockevents_suspend(void) { }
|
|
|
|
static inline void clockevents_resume(void) { }
|
2013-03-22 13:48:33 +04:00
|
|
|
static inline int tick_check_broadcast_expired(void) { return 0; }
|
2015-04-02 12:26:23 +03:00
|
|
|
static inline void tick_setup_hrtimer_broadcast(void) { }
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
|
2007-02-16 12:28:00 +03:00
|
|
|
|
2015-04-02 12:26:23 +03:00
|
|
|
#endif /* _LINUX_CLOCKCHIPS_H */
|