2010-05-25 01:32:27 +04:00
|
|
|
#ifndef _LINUX_COMPACTION_H
|
|
|
|
#define _LINUX_COMPACTION_H
|
|
|
|
|
2010-05-25 01:32:30 +04:00
|
|
|
/* Return values for compact_zone() and try_to_compact_pages() */
|
|
|
|
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
|
|
|
|
#define COMPACT_SKIPPED 0
|
|
|
|
/* compaction should continue to another pageblock */
|
|
|
|
#define COMPACT_CONTINUE 1
|
|
|
|
/* direct compaction partially compacted a zone and there are suitable pages */
|
|
|
|
#define COMPACT_PARTIAL 2
|
|
|
|
/* The full zone was compacted */
|
|
|
|
#define COMPACT_COMPLETE 3
|
2010-05-25 01:32:27 +04:00
|
|
|
|
2010-05-25 01:32:28 +04:00
|
|
|
#ifdef CONFIG_COMPACTION
|
|
|
|
extern int sysctl_compact_memory;
|
|
|
|
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
2010-05-25 01:32:31 +04:00
|
|
|
extern int sysctl_extfrag_threshold;
|
|
|
|
extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
|
|
|
|
void __user *buffer, size_t *length, loff_t *ppos);
|
2010-05-25 01:32:30 +04:00
|
|
|
|
|
|
|
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
|
|
|
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
2011-01-14 02:45:57 +03:00
|
|
|
int order, gfp_t gfp_mask, nodemask_t *mask,
|
2013-01-11 13:27:01 +04:00
|
|
|
bool sync, bool *contended);
|
2013-02-23 04:32:33 +04:00
|
|
|
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
2012-10-09 03:32:47 +04:00
|
|
|
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
2011-01-14 02:45:56 +03:00
|
|
|
extern unsigned long compaction_suitable(struct zone *zone, int order);
|
2010-05-25 01:32:32 +04:00
|
|
|
|
|
|
|
/* Do not skip compaction more than 64 times */
|
|
|
|
#define COMPACT_MAX_DEFER_SHIFT 6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compaction is deferred when compaction fails to result in a page
|
|
|
|
* allocation success. 1 << compact_defer_limit compactions are skipped up
|
|
|
|
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
|
|
|
|
*/
|
2012-03-22 03:33:52 +04:00
|
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
2010-05-25 01:32:32 +04:00
|
|
|
{
|
|
|
|
zone->compact_considered = 0;
|
|
|
|
zone->compact_defer_shift++;
|
|
|
|
|
2012-03-22 03:33:52 +04:00
|
|
|
if (order < zone->compact_order_failed)
|
|
|
|
zone->compact_order_failed = order;
|
|
|
|
|
2010-05-25 01:32:32 +04:00
|
|
|
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
|
|
|
|
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if compaction should be skipped this time */
|
2012-03-22 03:33:52 +04:00
|
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
2010-05-25 01:32:32 +04:00
|
|
|
{
|
|
|
|
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
|
|
|
|
|
2012-03-22 03:33:52 +04:00
|
|
|
if (order < zone->compact_order_failed)
|
|
|
|
return false;
|
|
|
|
|
2010-05-25 01:32:32 +04:00
|
|
|
/* Avoid possible overflow */
|
|
|
|
if (++zone->compact_considered > defer_limit)
|
|
|
|
zone->compact_considered = defer_limit;
|
|
|
|
|
2012-08-01 03:42:49 +04:00
|
|
|
return zone->compact_considered < defer_limit;
|
2010-05-25 01:32:32 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:32:47 +04:00
|
|
|
/* Returns true if restarting compaction after many failures */
|
|
|
|
static inline bool compaction_restarting(struct zone *zone, int order)
|
|
|
|
{
|
|
|
|
if (order < zone->compact_order_failed)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
|
|
|
|
zone->compact_considered >= 1UL << zone->compact_defer_shift;
|
|
|
|
}
|
|
|
|
|
2010-05-25 01:32:30 +04:00
|
|
|
#else
|
|
|
|
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
|
2011-01-14 02:45:57 +03:00
|
|
|
int order, gfp_t gfp_mask, nodemask_t *nodemask,
|
2013-01-11 13:27:01 +04:00
|
|
|
bool sync, bool *contended)
|
2010-05-25 01:32:30 +04:00
|
|
|
{
|
|
|
|
return COMPACT_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2013-02-23 04:32:33 +04:00
|
|
|
static inline void compact_pgdat(pg_data_t *pgdat, int order)
|
2012-03-22 03:33:52 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-10-09 03:32:47 +04:00
|
|
|
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-01-14 02:45:56 +03:00
|
|
|
static inline unsigned long compaction_suitable(struct zone *zone, int order)
|
|
|
|
{
|
|
|
|
return COMPACT_SKIPPED;
|
|
|
|
}
|
|
|
|
|
2012-03-22 03:33:52 +04:00
|
|
|
static inline void defer_compaction(struct zone *zone, int order)
|
2010-05-25 01:32:32 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-03-22 03:33:52 +04:00
|
|
|
static inline bool compaction_deferred(struct zone *zone, int order)
|
2010-05-25 01:32:32 +04:00
|
|
|
{
|
2012-08-01 03:42:49 +04:00
|
|
|
return true;
|
2010-05-25 01:32:32 +04:00
|
|
|
}
|
|
|
|
|
2010-05-25 01:32:28 +04:00
|
|
|
#endif /* CONFIG_COMPACTION */
|
|
|
|
|
2010-05-25 01:32:29 +04:00
|
|
|
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
|
|
|
|
extern int compaction_register_node(struct node *node);
|
|
|
|
extern void compaction_unregister_node(struct node *node);
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline int compaction_register_node(struct node *node)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void compaction_unregister_node(struct node *node)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
|
|
|
|
|
2010-05-25 01:32:27 +04:00
|
|
|
#endif /* _LINUX_COMPACTION_H */
|