2005-04-17 02:20:36 +04:00
|
|
|
#ifndef _LINUX_MMAN_H
|
|
|
|
#define _LINUX_MMAN_H
|
|
|
|
|
2006-04-25 17:18:07 +04:00
|
|
|
#include <linux/mm.h>
|
2009-05-01 02:08:51 +04:00
|
|
|
#include <linux/percpu_counter.h>
|
2006-04-25 17:18:07 +04:00
|
|
|
|
2011-07-27 03:09:06 +04:00
|
|
|
#include <linux/atomic.h>
|
2012-10-13 13:46:48 +04:00
|
|
|
#include <uapi/linux/mman.h>
|
2006-04-25 17:18:07 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
extern int sysctl_overcommit_memory;
|
|
|
|
extern int sysctl_overcommit_ratio;
|
2014-01-22 03:49:14 +04:00
|
|
|
extern unsigned long sysctl_overcommit_kbytes;
|
2009-05-01 02:08:51 +04:00
|
|
|
extern struct percpu_counter vm_committed_as;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-07-04 02:02:44 +04:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
extern s32 vm_committed_as_batch;
|
|
|
|
#else
|
|
|
|
#define vm_committed_as_batch 0
|
|
|
|
#endif
|
|
|
|
|
2012-11-16 02:34:42 +04:00
|
|
|
unsigned long vm_memory_committed(void);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static inline void vm_acct_memory(long pages)
|
|
|
|
{
|
2017-06-20 21:01:20 +03:00
|
|
|
percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vm_unacct_memory(long pages)
|
|
|
|
{
|
|
|
|
vm_acct_memory(-pages);
|
|
|
|
}
|
|
|
|
|
2008-07-07 18:28:51 +04:00
|
|
|
/*
|
|
|
|
* Allow architectures to handle additional protection bits
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef arch_calc_vm_prot_bits
|
2016-02-13 00:02:31 +03:00
|
|
|
#define arch_calc_vm_prot_bits(prot, pkey) 0
|
2008-07-07 18:28:51 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_vm_get_page_prot
|
|
|
|
#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef arch_validate_prot
|
|
|
|
/*
|
|
|
|
* This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
|
|
|
|
* already been masked out.
|
|
|
|
*
|
|
|
|
* Returns true if the prot flags are valid
|
|
|
|
*/
|
2016-08-03 00:03:42 +03:00
|
|
|
static inline bool arch_validate_prot(unsigned long prot)
|
2008-07-07 18:28:51 +04:00
|
|
|
{
|
|
|
|
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
|
|
|
|
}
|
|
|
|
#define arch_validate_prot arch_validate_prot
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Optimisation macro. It is equivalent to:
|
|
|
|
* (x & bit1) ? bit2 : 0
|
|
|
|
* but this version is faster.
|
|
|
|
* ("bit1" and "bit2" must be single bits)
|
|
|
|
*/
|
|
|
|
#define _calc_vm_trans(x, bit1, bit2) \
|
|
|
|
((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
|
|
|
|
: ((x) & (bit1)) / ((bit1) / (bit2)))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Combine the mmap "prot" argument into "vm_flags" used internally.
|
|
|
|
*/
|
|
|
|
static inline unsigned long
|
2016-02-13 00:02:31 +03:00
|
|
|
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
|
|
|
|
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
|
2008-07-07 18:28:51 +04:00
|
|
|
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
|
2016-02-13 00:02:31 +03:00
|
|
|
arch_calc_vm_prot_bits(prot, pkey);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Combine the mmap "flags" argument into "vm_flags" used internally.
|
|
|
|
*/
|
|
|
|
static inline unsigned long
|
|
|
|
calc_vm_flag_bits(unsigned long flags)
|
|
|
|
{
|
|
|
|
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
|
|
|
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
|
2013-03-29 03:26:23 +04:00
|
|
|
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2013-11-13 03:08:31 +04:00
|
|
|
|
|
|
|
unsigned long vm_commit_limit(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* _LINUX_MMAN_H */
|