Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6: (71 commits)
  SELinux: inode_doinit_with_dentry drop no dentry printk
  SELinux: new permission between tty audit and audit socket
  SELinux: open perm for sock files
  smack: fixes for unlabeled host support
  keys: make procfiles per-user-namespace
  keys: skip keys from another user namespace
  keys: consider user namespace in key_permission
  keys: distinguish per-uid keys in different namespaces
  integrity: ima iint radix_tree_lookup locking fix
  TOMOYO: Do not call tomoyo_realpath_init unless registered.
  integrity: ima scatterlist bug fix
  smack: fix lots of kernel-doc notation
  TOMOYO: Don't create securityfs entries unless registered.
  TOMOYO: Fix exception policy read failure.
  SELinux: convert the avc cache hash list to an hlist
  SELinux: code readability with avc_cache
  SELinux: remove unused av.decided field
  SELinux: more careful use of avd in avc_has_perm_noaudit
  SELinux: remove the unused ae.used
  SELinux: check seqno when updating an avc_node
  ...
This commit is contained in:
Linus Torvalds 2009-03-26 11:03:39 -07:00
Родитель 1646df40bb 703a3cd728
Коммит 8d80ce80e1
63 изменённых файлов: 8825 добавлений и 633 удалений

Просмотреть файл

@ -0,0 +1,61 @@
What: security/ima/policy
Date: May 2008
Contact: Mimi Zohar <zohar@us.ibm.com>
Description:
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
values of executables and other sensitive system files
loaded into the run-time of this system. At runtime,
the policy can be constrained based on LSM specific data.
Policies are loaded into the securityfs file ima/policy
by opening the file, writing the rules one at a time and
then closing the file. The new policy takes effect after
the file ima/policy is closed.
rule format: action [condition ...]
action: measure | dont_measure
condition:= base | lsm
base: [[func=] [mask=] [fsmagic=] [uid=]]
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
fsmagic:= hex value
uid:= decimal value
lsm: are LSM specific
default policy:
# PROC_SUPER_MAGIC
dont_measure fsmagic=0x9fa0
# SYSFS_MAGIC
dont_measure fsmagic=0x62656572
# DEBUGFS_MAGIC
dont_measure fsmagic=0x64626720
# TMPFS_MAGIC
dont_measure fsmagic=0x01021994
# SECURITYFS_MAGIC
dont_measure fsmagic=0x73636673
measure func=BPRM_CHECK
measure func=FILE_MMAP mask=MAY_EXEC
measure func=INODE_PERM mask=MAY_READ uid=0
The default policy measures all executables in bprm_check,
all files mmapped executable in file_mmap, and all files
open for read by root in inode_permission.
Examples of LSM specific definitions:
SELinux:
# SELINUX_MAGIC
dont_measure fsmagic=0xF97CFF8C
dont_measure obj_type=var_log_t
dont_measure obj_type=auditd_log_t
measure subj_user=system_u func=INODE_PERM mask=MAY_READ
measure subj_role=system_r func=INODE_PERM mask=MAY_READ
Smack:
measure subj_user=_ func=INODE_PERM mask=MAY_READ

Просмотреть файл

@ -44,6 +44,7 @@ parameter is applicable:
FB The frame buffer device is enabled.
HW Appropriate hardware is enabled.
IA-64 IA-64 architecture is enabled.
IMA Integrity measurement architecture is enabled.
IOSCHED More than one I/O scheduler is enabled.
IP_PNP IP DHCP, BOOTP, or RARP is enabled.
ISAPNP ISA PnP code is enabled.
@ -902,6 +903,15 @@ and is between 256 and 4096 characters. It is defined in the file
ihash_entries= [KNL]
Set number of hash buckets for inode cache.
ima_audit= [IMA]
Format: { "0" | "1" }
0 -- integrity auditing messages. (Default)
1 -- enable informational integrity auditing messages.
ima_hash= [IMA]
Formt: { "sha1" | "md5" }
default: "sha1"
in2000= [HW,SCSI]
See header of drivers/scsi/in2000.c.

Просмотреть файл

@ -2216,6 +2216,11 @@ M: stefanr@s5r6.in-berlin.de
L: linux1394-devel@lists.sourceforge.net
S: Maintained
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
P: Mimi Zohar
M: zohar@us.ibm.com
S: Supported
IMS TWINTURBO FRAMEBUFFER DRIVER
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
S: Orphan
@ -3844,6 +3849,7 @@ M: jmorris@namei.org
L: linux-kernel@vger.kernel.org
L: linux-security-module@vger.kernel.org (suggested Cc:)
T: git kernel.org:pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
W: http://security.wiki.kernel.org/
S: Supported
SECURITY CONTACT
@ -4285,6 +4291,19 @@ L: tlan-devel@lists.sourceforge.net (subscribers-only)
W: http://sourceforge.net/projects/tlan/
S: Maintained
TOMOYO SECURITY MODULE
P: Kentaro Takeda
M: takedakn@nttdata.co.jp
P: Tetsuo Handa
M: penguin-kernel@I-love.SAKURA.ne.jp
L: linux-kernel@vger.kernel.org (kernel issues)
L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English)
L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
W: http://tomoyo.sourceforge.jp/
T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.2.x/tomoyo-lsm/patches/
S: Maintained
TOSHIBA ACPI EXTRAS DRIVER
P: John Belmonte
M: toshiba_acpi@memebeam.org

Просмотреть файл

@ -429,134 +429,148 @@ out:
#define TPM_DIGEST_SIZE 20
#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
#define TPM_GET_CAP_RET_SIZE_IDX 10
#define TPM_GET_CAP_RET_UINT32_1_IDX 14
#define TPM_GET_CAP_RET_UINT32_2_IDX 18
#define TPM_GET_CAP_RET_UINT32_3_IDX 22
#define TPM_GET_CAP_RET_UINT32_4_IDX 26
#define TPM_GET_CAP_PERM_DISABLE_IDX 16
#define TPM_GET_CAP_PERM_INACTIVE_IDX 18
#define TPM_GET_CAP_RET_BOOL_1_IDX 14
#define TPM_GET_CAP_TEMP_INACTIVE_IDX 16
#define TPM_CAP_IDX 13
#define TPM_CAP_SUBCAP_IDX 21
enum tpm_capabilities {
TPM_CAP_FLAG = 4,
TPM_CAP_PROP = 5,
TPM_CAP_FLAG = cpu_to_be32(4),
TPM_CAP_PROP = cpu_to_be32(5),
CAP_VERSION_1_1 = cpu_to_be32(0x06),
CAP_VERSION_1_2 = cpu_to_be32(0x1A)
};
enum tpm_sub_capabilities {
TPM_CAP_PROP_PCR = 0x1,
TPM_CAP_PROP_MANUFACTURER = 0x3,
TPM_CAP_FLAG_PERM = 0x8,
TPM_CAP_FLAG_VOL = 0x9,
TPM_CAP_PROP_OWNER = 0x11,
TPM_CAP_PROP_TIS_TIMEOUT = 0x15,
TPM_CAP_PROP_TIS_DURATION = 0x20,
TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
};
/*
* This is a semi generic GetCapability command for use
* with the capability type TPM_CAP_PROP or TPM_CAP_FLAG
* and their associated sub_capabilities.
*/
static const u8 tpm_cap[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 22, /* length */
0, 0, 0, 101, /* TPM_ORD_GetCapability */
0, 0, 0, 0, /* TPM_CAP_<TYPE> */
0, 0, 0, 4, /* TPM_CAP_SUB_<TYPE> size */
0, 0, 1, 0 /* TPM_CAP_SUB_<TYPE> */
};
static ssize_t transmit_cmd(struct tpm_chip *chip, u8 *data, int len,
char *desc)
static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
int len, const char *desc)
{
int err;
len = tpm_transmit(chip, data, len);
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
if (len == TPM_ERROR_SIZE) {
err = be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX)));
err = be32_to_cpu(cmd->header.out.return_code);
dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
}
return 0;
}
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
static const struct tpm_input_header tpm_getcap_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(22),
.ordinal = TPM_ORD_GET_CAP
};
ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_getcap_header;
if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
tpm_cmd.params.getcap_in.cap = subcap_id;
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
else
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
}
void tpm_gen_interrupt(struct tpm_chip *chip)
{
u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 30)];
struct tpm_cmd_t tpm_cmd;
ssize_t rc;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_TIMEOUT;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, data, sizeof(data),
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
u8 data[max_t(int, ARRAY_SIZE(tpm_cap), 30)];
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_TIMEOUT;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, data, sizeof(data),
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
if (rc)
goto duration;
if (be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_SIZE_IDX)))
if (be32_to_cpu(tpm_cmd.header.out.length)
!= 4 * sizeof(u32))
goto duration;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout =
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX)));
timeout = be32_to_cpu(timeout_cap->a);
if (timeout)
chip->vendor.timeout_a = usecs_to_jiffies(timeout);
timeout =
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_2_IDX)));
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
chip->vendor.timeout_b = usecs_to_jiffies(timeout);
timeout =
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_3_IDX)));
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
chip->vendor.timeout_c = usecs_to_jiffies(timeout);
timeout =
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_4_IDX)));
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
chip->vendor.timeout_d = usecs_to_jiffies(timeout);
duration:
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_TIS_DURATION;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
rc = transmit_cmd(chip, data, sizeof(data),
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
return;
if (be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_SIZE_IDX)))
if (be32_to_cpu(tpm_cmd.header.out.return_code)
!= 3 * sizeof(u32))
return;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu
(*((__be32 *) (data +
TPM_GET_CAP_RET_UINT32_1_IDX))));
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
@ -565,13 +579,9 @@ duration:
chip->vendor.duration[TPM_SHORT] = HZ;
chip->vendor.duration[TPM_MEDIUM] =
usecs_to_jiffies(be32_to_cpu
(*((__be32 *) (data +
TPM_GET_CAP_RET_UINT32_2_IDX))));
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
chip->vendor.duration[TPM_LONG] =
usecs_to_jiffies(be32_to_cpu
(*((__be32 *) (data +
TPM_GET_CAP_RET_UINT32_3_IDX))));
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@ -587,36 +597,18 @@ void tpm_continue_selftest(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
#define TPM_INTERNAL_RESULT_SIZE 200
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
u8 *data;
cap_t cap;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attemtping to determine the permanent enabled state");
if (rc) {
kfree(data);
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state");
if (rc)
return 0;
}
rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_DISABLE_IDX]);
kfree(data);
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
@ -624,31 +616,15 @@ EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
u8 *data;
cap_t cap;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_PERM;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attemtping to determine the permanent active state");
if (rc) {
kfree(data);
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state");
if (rc)
return 0;
}
rc = sprintf(buf, "%d\n", !data[TPM_GET_CAP_PERM_INACTIVE_IDX]);
kfree(data);
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
@ -656,31 +632,15 @@ EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
u8 *data;
cap_t cap;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_OWNER;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the owner state");
if (rc) {
kfree(data);
rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state");
if (rc)
return 0;
}
rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_RET_BOOL_1_IDX]);
kfree(data);
rc = sprintf(buf, "%d\n", cap.owned);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
@ -688,116 +648,180 @@ EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
u8 *data;
cap_t cap;
ssize_t rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_FLAG;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_FLAG_VOL;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the temporary state");
if (rc) {
kfree(data);
rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state");
if (rc)
return 0;
}
rc = sprintf(buf, "%d\n", data[TPM_GET_CAP_TEMP_INACTIVE_IDX]);
kfree(data);
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
static const u8 pcrread[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 14, /* length */
0, 0, 0, 21, /* TPM_ORD_PcrRead */
0, 0, 0, 0 /* PCR index */
/*
* tpm_chip_find_get - return tpm_chip for given chip number
*/
static struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->dev->driver->owner)) {
chip = pos;
break;
}
}
rcu_read_unlock();
return chip;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
static struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORDINAL_PCRREAD
};
int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
BUILD_BUG_ON(cmd.header.in.length > READ_PCR_RESULT_SIZE);
rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
"attempting to read a pcr value");
if (rc == 0)
memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
TPM_DIGEST_SIZE);
return rc;
}
/**
* tpm_pcr_read - read a pcr value
* @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
* @res_buf: TPM_PCR value
* size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
module_put(chip->dev->driver->owner);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
* @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_SIZE 34
static struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
.ordinal = TPM_ORD_PCR_EXTEND
};
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
int rc;
struct tpm_chip *chip;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
cmd.header.in = pcrextend_header;
BUILD_BUG_ON(be32_to_cpu(cmd.header.in.length) > EXTEND_PCR_SIZE);
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, cmd.header.in.length,
"attempting extend a PCR value");
module_put(chip->dev->driver->owner);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
cap_t cap;
u8 digest[TPM_DIGEST_SIZE];
ssize_t rc;
int i, j, num_pcrs;
__be32 index;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_PCR;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
if (rc) {
kfree(data);
if (rc)
return 0;
}
num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
memcpy(data, pcrread, sizeof(pcrread));
index = cpu_to_be32(i);
memcpy(data + 10, &index, 4);
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to read a PCR");
rc = __tpm_pcr_read(chip, i, digest);
if (rc)
goto out;
break;
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
str += sprintf(str, "%02X ", *(data + 10 + j));
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
out:
kfree(data);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
#define READ_PUBEK_RESULT_SIZE 314
static const u8 readpubek[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 30, /* length */
0, 0, 0, 124, /* TPM_ORD_ReadPubek */
#define TPM_ORD_READPUBEK cpu_to_be32(124)
struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(30),
.ordinal = TPM_ORD_READPUBEK
};
ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
struct tpm_cmd_t tpm_cmd;
ssize_t err;
int i, rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, readpubek, sizeof(readpubek));
err = transmit_cmd(chip, data, READ_PUBEK_RESULT_SIZE,
tpm_cmd.header.in = tpm_readpubek_header;
err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
"attempting to read the PUBEK");
if (err)
goto out;
@ -812,7 +836,7 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
256 byte modulus
ignore checksum 20 bytes
*/
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
@ -832,65 +856,33 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
}
out:
rc = str - buf;
kfree(data);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_pubek);
#define CAP_VERSION_1_1 6
#define CAP_VERSION_1_2 0x1A
#define CAP_VERSION_IDX 13
static const u8 cap_version[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 18, /* length */
0, 0, 0, 101, /* TPM_ORD_GetCapability */
0, 0, 0, 0,
0, 0, 0, 0
};
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
cap_t cap;
ssize_t rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc) {
kfree(data);
return 0;
}
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX))));
memcpy(data, cap_version, sizeof(cap_version));
data[CAP_VERSION_IDX] = CAP_VERSION_1_1;
rc = transmit_cmd(chip, data, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the 1.1 version");
if (rc)
goto out;
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
(int) data[14], (int) data[15], (int) data[16],
(int) data[17]);
out:
kfree(data);
cap.tpm_version.Major, cap.tpm_version.Minor,
cap.tpm_version.revMajor, cap.tpm_version.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
@ -898,51 +890,25 @@ EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
u8 *data;
ssize_t len;
cap_t cap;
ssize_t rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
data = kzalloc(TPM_INTERNAL_RESULT_SIZE, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, tpm_cap, sizeof(tpm_cap));
data[TPM_CAP_IDX] = TPM_CAP_PROP;
data[TPM_CAP_SUBCAP_IDX] = TPM_CAP_PROP_MANUFACTURER;
len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
if (len <= TPM_ERROR_SIZE) {
dev_dbg(chip->dev, "A TPM error (%d) occurred "
"attempting to determine the manufacturer\n",
be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
kfree(data);
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
}
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(*((__be32 *) (data + TPM_GET_CAP_RET_UINT32_1_IDX))));
memcpy(data, cap_version, sizeof(cap_version));
data[CAP_VERSION_IDX] = CAP_VERSION_1_2;
len = tpm_transmit(chip, data, TPM_INTERNAL_RESULT_SIZE);
if (len <= TPM_ERROR_SIZE) {
dev_err(chip->dev, "A TPM error (%d) occurred "
"attempting to determine the 1.2 version\n",
be32_to_cpu(*((__be32 *) (data + TPM_RET_CODE_IDX))));
goto out;
}
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
(int) data[16], (int) data[17], (int) data[18],
(int) data[19]);
out:
kfree(data);
cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
cap.tpm_version_1_2.revMajor,
cap.tpm_version_1_2.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);

Просмотреть файл

@ -26,6 +26,7 @@
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/tpm.h>
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
@ -123,6 +124,147 @@ static inline void tpm_write_index(int base, int index, int value)
outb(index, base);
outb(value & 0xFF, base+1);
}
struct tpm_input_header {
__be16 tag;
__be32 length;
__be32 ordinal;
}__attribute__((packed));
struct tpm_output_header {
__be16 tag;
__be32 length;
__be32 return_code;
}__attribute__((packed));
struct stclear_flags_t {
__be16 tag;
u8 deactivated;
u8 disableForceClear;
u8 physicalPresence;
u8 physicalPresenceLock;
u8 bGlobalLock;
}__attribute__((packed));
struct tpm_version_t {
u8 Major;
u8 Minor;
u8 revMajor;
u8 revMinor;
}__attribute__((packed));
struct tpm_version_1_2_t {
__be16 tag;
u8 Major;
u8 Minor;
u8 revMajor;
u8 revMinor;
}__attribute__((packed));
struct timeout_t {
__be32 a;
__be32 b;
__be32 c;
__be32 d;
}__attribute__((packed));
struct duration_t {
__be32 tpm_short;
__be32 tpm_medium;
__be32 tpm_long;
}__attribute__((packed));
struct permanent_flags_t {
__be16 tag;
u8 disable;
u8 ownership;
u8 deactivated;
u8 readPubek;
u8 disableOwnerClear;
u8 allowMaintenance;
u8 physicalPresenceLifetimeLock;
u8 physicalPresenceHWEnable;
u8 physicalPresenceCMDEnable;
u8 CEKPUsed;
u8 TPMpost;
u8 TPMpostLock;
u8 FIPS;
u8 operator;
u8 enableRevokeEK;
u8 nvLocked;
u8 readSRKPub;
u8 tpmEstablished;
u8 maintenanceDone;
u8 disableFullDALogicInfo;
}__attribute__((packed));
typedef union {
struct permanent_flags_t perm_flags;
struct stclear_flags_t stclear_flags;
bool owned;
__be32 num_pcrs;
struct tpm_version_t tpm_version;
struct tpm_version_1_2_t tpm_version_1_2;
__be32 manufacturer_id;
struct timeout_t timeout;
struct duration_t duration;
} cap_t;
struct tpm_getcap_params_in {
__be32 cap;
__be32 subcap_size;
__be32 subcap;
}__attribute__((packed));
struct tpm_getcap_params_out {
__be32 cap_size;
cap_t cap;
}__attribute__((packed));
struct tpm_readpubek_params_out {
u8 algorithm[4];
u8 encscheme[2];
u8 sigscheme[2];
u8 parameters[12]; /*assuming RSA*/
__be32 keysize;
u8 modulus[256];
u8 checksum[20];
}__attribute__((packed));
typedef union {
struct tpm_input_header in;
struct tpm_output_header out;
} tpm_cmd_header;
#define TPM_DIGEST_SIZE 20
struct tpm_pcrread_out {
u8 pcr_result[TPM_DIGEST_SIZE];
}__attribute__((packed));
struct tpm_pcrread_in {
__be32 pcr_idx;
}__attribute__((packed));
struct tpm_pcrextend_in {
__be32 pcr_idx;
u8 hash[TPM_DIGEST_SIZE];
}__attribute__((packed));
typedef union {
struct tpm_getcap_params_out getcap_out;
struct tpm_readpubek_params_out readpubek_out;
u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
struct tpm_getcap_params_in getcap_in;
struct tpm_pcrread_in pcrread_in;
struct tpm_pcrread_out pcrread_out;
struct tpm_pcrextend_in pcrextend_in;
} tpm_cmd_params;
struct tpm_cmd_t {
tpm_cmd_header header;
tpm_cmd_params params;
}__attribute__((packed));
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
extern void tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);

Просмотреть файл

@ -1402,6 +1402,7 @@ int compat_do_execve(char * filename,
retval = mutex_lock_interruptible(&current->cred_exec_mutex);
if (retval < 0)
goto out_free;
current->in_execve = 1;
retval = -ENOMEM;
bprm->cred = prepare_exec_creds();
@ -1454,6 +1455,7 @@ int compat_do_execve(char * filename,
goto out;
/* execve succeeded */
current->in_execve = 0;
mutex_unlock(&current->cred_exec_mutex);
acct_update_integrals(current);
free_bprm(bprm);
@ -1470,6 +1472,7 @@ out_file:
}
out_unlock:
current->in_execve = 0;
mutex_unlock(&current->cred_exec_mutex);
out_free:

Просмотреть файл

@ -45,6 +45,7 @@
#include <linux/proc_fs.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
@ -127,6 +128,9 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
MAY_READ | MAY_EXEC | MAY_OPEN);
if (error)
goto exit;
error = ima_path_check(&nd.path, MAY_READ | MAY_EXEC | MAY_OPEN);
if (error)
goto exit;
file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
error = PTR_ERR(file);
@ -672,6 +676,9 @@ struct file *open_exec(const char *name)
goto out_path_put;
err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN);
if (err)
goto out_path_put;
err = ima_path_check(&nd.path, MAY_EXEC | MAY_OPEN);
if (err)
goto out_path_put;
@ -1182,6 +1189,9 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
struct linux_binfmt *fmt;
retval = security_bprm_check(bprm);
if (retval)
return retval;
retval = ima_bprm_check(bprm);
if (retval)
return retval;
@ -1284,6 +1294,7 @@ int do_execve(char * filename,
retval = mutex_lock_interruptible(&current->cred_exec_mutex);
if (retval < 0)
goto out_free;
current->in_execve = 1;
retval = -ENOMEM;
bprm->cred = prepare_exec_creds();
@ -1337,6 +1348,7 @@ int do_execve(char * filename,
goto out;
/* execve succeeded */
current->in_execve = 0;
mutex_unlock(&current->cred_exec_mutex);
acct_update_integrals(current);
free_bprm(bprm);
@ -1355,6 +1367,7 @@ out_file:
}
out_unlock:
current->in_execve = 0;
mutex_unlock(&current->cred_exec_mutex);
out_free:

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
@ -279,6 +280,7 @@ void __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
@ -147,13 +148,13 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_cdev = NULL;
inode->i_rdev = 0;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
kmem_cache_free(inode_cachep, (inode));
return NULL;
}
if (security_inode_alloc(inode))
goto out_free_inode;
/* allocate and initialize an i_integrity */
if (ima_inode_alloc(inode))
goto out_free_security;
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
@ -189,6 +190,15 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_mapping = mapping;
return inode;
out_free_security:
security_inode_free(inode);
out_free_inode:
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
kmem_cache_free(inode_cachep, (inode));
return NULL;
}
EXPORT_SYMBOL(inode_init_always);

Просмотреть файл

@ -24,6 +24,7 @@
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
@ -850,6 +851,8 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
if (err == -EAGAIN)
err = inode_permission(nd->path.dentry->d_inode,
MAY_EXEC);
if (!err)
err = ima_path_check(&nd->path, MAY_EXEC);
if (err)
break;
@ -1509,6 +1512,11 @@ int may_open(struct path *path, int acc_mode, int flag)
error = inode_permission(inode, acc_mode);
if (error)
return error;
error = ima_path_check(path,
acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/

Просмотреть файл

@ -36,7 +36,8 @@
* 1500 - 1599 kernel LSPP events
* 1600 - 1699 kernel crypto events
* 1700 - 1799 kernel anomaly records
* 1800 - 1999 future kernel use (maybe integrity labels and related events)
* 1800 - 1899 kernel integrity events
* 1900 - 1999 future kernel use
* 2000 is for otherwise unclassified kernel audit messages (legacy)
* 2001 - 2099 unused (kernel)
* 2100 - 2199 user space anomaly records
@ -125,6 +126,12 @@
#define AUDIT_LAST_KERN_ANOM_MSG 1799
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */

61
include/linux/ima.h Normal file
Просмотреть файл

@ -0,0 +1,61 @@
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
#include <linux/fs.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
extern int ima_path_check(struct path *path, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_shm_check(struct file *file);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
return 0;
}
static inline int ima_inode_alloc(struct inode *inode)
{
return 0;
}
static inline void ima_inode_free(struct inode *inode)
{
return;
}
static inline int ima_path_check(struct path *path, int mask)
{
return 0;
}
static inline void ima_file_free(struct file *file)
{
return;
}
static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
static inline void ima_shm_check(struct file *file)
{
return;
}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */

Просмотреть файл

@ -1175,6 +1175,8 @@ struct task_struct {
/* ??? */
unsigned int personality;
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
pid_t pid;
pid_t tgid;

35
include/linux/tpm.h Normal file
Просмотреть файл

@ -0,0 +1,35 @@
/*
* Copyright (C) 2004,2007,2008 IBM Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Dave Safford <safford@watson.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
* Debora Velarde <dvelarde@us.ibm.com>
*
* Maintained by: <tpmdd_devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
*/
#ifndef __LINUX_TPM_H__
#define __LINUX_TPM_H__
/*
* Chip num is this value or a valid tpm idx
*/
#define TPM_ANY_NUM 0xFFFF
#if defined(CONFIG_TCG_TPM)
extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
#endif
#endif

Просмотреть файл

@ -39,6 +39,7 @@
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
#include <linux/ima.h>
#include <asm/uaccess.h>
@ -383,6 +384,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
error = PTR_ERR(file);
if (IS_ERR(file))
goto no_file;
ima_shm_check(file);
id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
if (id < 0) {
@ -887,6 +889,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
if (!file)
goto out_free;
ima_shm_check(file);
file->private_data = sfd;
file->f_mapping = shp->shm_file->f_mapping;

Просмотреть файл

@ -20,7 +20,7 @@
struct user_namespace init_user_ns = {
.kref = {
.refcount = ATOMIC_INIT(1),
.refcount = ATOMIC_INIT(2),
},
.creator = &root_user,
};

Просмотреть файл

@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/module.h>
@ -1047,6 +1048,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
}
error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
error = ima_file_mmap(file, prot);
if (error)
return error;

Просмотреть файл

@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@ -2665,6 +2666,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
ima_shm_check(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;

Просмотреть файл

@ -55,7 +55,8 @@ config SECURITYFS
bool "Enable the securityfs filesystem"
help
This will build the securityfs filesystem. It is currently used by
the TPM bios character driver. It is not used by SELinux or SMACK.
the TPM bios character driver and IMA, an integrity provider. It is
not used by SELinux or SMACK.
If you are unsure how to answer this question, answer N.
@ -134,6 +135,9 @@ config SECURITY_DEFAULT_MMAP_MIN_ADDR
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/integrity/ima/Kconfig
endmenu

Просмотреть файл

@ -5,6 +5,7 @@
obj-$(CONFIG_KEYS) += keys/
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
# always enable default capabilities
obj-y += commoncap.o
@ -15,5 +16,10 @@ obj-$(CONFIG_SECURITYFS) += inode.o
# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
subdir-$(CONFIG_IMA) += integrity/ima
obj-$(CONFIG_IMA) += integrity/ima/built-in.o

Просмотреть файл

@ -202,12 +202,11 @@ static int create_by_name(const char *name, mode_t mode,
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here). If an error occurs, %NULL is returned.
* you are responsible here). If an error occurs, the function will return
* the erorr value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned. It is not wise to check for this value, but rather, check for
* %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
* code.
* returned.
*/
struct dentry *securityfs_create_file(const char *name, mode_t mode,
struct dentry *parent, void *data,

Просмотреть файл

@ -0,0 +1,55 @@
# IBM Integrity Measurement Architecture
#
config IMA
bool "Integrity Measurement Architecture(IMA)"
depends on ACPI
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
select TCG_TPM
select TCG_TIS
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
values of executables and other sensitive system files,
as they are read or executed. If an attacker manages
to change the contents of an important system file
being measured, we can tell.
If your system has a TPM chip, then IMA also maintains
an aggregate integrity value over this list inside the
TPM hardware, so that the TPM can prove to a third party
whether or not critical system files have been modified.
Read <http://www.usenix.org/events/sec04/tech/sailer.html>
to learn more about IMA.
If unsure, say N.
config IMA_MEASURE_PCR_IDX
int
depends on IMA
range 8 14
default 10
help
IMA_MEASURE_PCR_IDX determines the TPM PCR register index
that IMA uses to maintain the integrity aggregate of the
measurement list. If unsure, use the default 10.
config IMA_AUDIT
bool
depends on IMA
default y
help
This option adds a kernel parameter 'ima_audit', which
allows informational auditing messages to be enabled
at boot. If this option is selected, informational integrity
auditing messages can be enabled with 'ima_audit=1' on
the kernel command line.
config IMA_LSM_RULES
bool
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
default y
help
Disabling this option will disregard LSM based policy rules.

Просмотреть файл

@ -0,0 +1,9 @@
#
# Makefile for building Trusted Computing Group's(TCG) runtime Integrity
# Measurement Architecture(IMA).
#
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_iint.o ima_audit.o

Просмотреть файл

@ -0,0 +1,166 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Reiner Sailer <sailer@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima.h
* internal Integrity Measurement Architecture (IMA) definitions
*/
#ifndef __LINUX_IMA_H
#define __LINUX_IMA_H
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/security.h>
#include <linux/hash.h>
#include <linux/tpm.h>
#include <linux/audit.h>
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
#define IMA_DIGEST_SIZE 20
#define IMA_EVENT_NAME_LEN_MAX 255
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
/* IMA inode template definition */
struct ima_template_data {
u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
};
struct ima_template_entry {
u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
const char *template_name;
int template_len;
struct ima_template_data template;
};
struct ima_queue_entry {
struct hlist_node hnext; /* place in hash collision list */
struct list_head later; /* place in ima_measurements list */
struct ima_template_entry *entry;
};
extern struct list_head ima_measurements; /* list of all measurements */
/* declarations */
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int info);
/* Internal IMA function definitions */
void ima_iintcache_init(void);
int ima_init(void);
void ima_cleanup(void);
int ima_fs_init(void);
void ima_fs_cleanup(void);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode);
int ima_calc_hash(struct file *file, char *digest);
int ima_calc_template_hash(int template_len, void *template, char *digest);
int ima_calc_boot_aggregate(char *digest);
void ima_add_violation(struct inode *inode, const unsigned char *filename,
const char *op, const char *cause);
/*
* used to protect h_table and sha_table
*/
extern spinlock_t ima_queue_lock;
struct ima_h_table {
atomic_long_t len; /* number of stored measurements in the list */
atomic_long_t violations;
struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
};
extern struct ima_h_table ima_htable;
static inline unsigned long ima_hash_key(u8 *digest)
{
return hash_long(*digest, IMA_HASH_BITS);
}
/* iint cache flags */
#define IMA_MEASURED 1
#define IMA_IINT_DUMP_STACK 512
/* integrity data associated with an inode */
struct ima_iint_cache {
u64 version; /* track inode changes */
unsigned long flags;
u8 digest[IMA_DIGEST_SIZE];
struct mutex mutex; /* protects: version, flags, digest */
long readcount; /* measured files readcount */
long writecount; /* measured files writecount */
long opencount; /* opens reference count */
struct kref refcount; /* ima_iint_cache reference count */
struct rcu_head rcu;
};
/* LIM API function definitions */
int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
int mask, int function);
int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
const unsigned char *filename);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode);
void ima_template_show(struct seq_file *m, void *e,
enum ima_show_type show);
/* radix tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
struct ima_iint_cache *ima_iint_insert(struct inode *inode);
struct ima_iint_cache *ima_iint_find_get(struct inode *inode);
struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode);
void ima_iint_delete(struct inode *inode);
void iint_free(struct kref *kref);
void iint_rcu_free(struct rcu_head *rcu);
/* IMA policy related functions */
enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK };
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
void ima_init_policy(void);
void ima_update_policy(void);
int ima_parse_add_rule(char *);
void ima_delete_rules(void);
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
#define security_filter_rule_init security_audit_rule_init
#define security_filter_rule_match security_audit_rule_match
#else
static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
void **lsmrule)
{
return -EINVAL;
}
static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
void *lsmrule,
struct audit_context *actx)
{
return -EINVAL;
}
#endif /* CONFIG_IMA_LSM_RULES */
#endif

Просмотреть файл

@ -0,0 +1,190 @@
/*
* Copyright (C) 2008 IBM Corporation
*
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_api.c
* Implements must_measure, collect_measurement, store_measurement,
* and store_template.
*/
#include <linux/module.h>
#include "ima.h"
static const char *IMA_TEMPLATE_NAME = "ima";
/*
* ima_store_template - store ima template measurements
*
* Calculate the hash of a template entry, add the template entry
* to an ordered list of measurement entries maintained inside the kernel,
* and also update the aggregate integrity value (maintained inside the
* configured TPM PCR) over the hashes of the current list of measurement
* entries.
*
* Applications retrieve the current kernel-held measurement list through
* the securityfs entries in /sys/kernel/security/ima. The signed aggregate
* TPM PCR (called quote) can be retrieved using a TPM user space library
* and is used to validate the measurement list.
*
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
int violation, struct inode *inode)
{
const char *op = "add_template_measure";
const char *audit_cause = "hashing_error";
int result;
memset(entry->digest, 0, sizeof(entry->digest));
entry->template_name = IMA_TEMPLATE_NAME;
entry->template_len = sizeof(entry->template);
if (!violation) {
result = ima_calc_template_hash(entry->template_len,
&entry->template,
entry->digest);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
entry->template_name, op,
audit_cause, result, 0);
return result;
}
}
result = ima_add_template_entry(entry, violation, op, inode);
return result;
}
/*
* ima_add_violation - add violation to measurement list.
*
* Violations are flagged in the measurement list with zero hash values.
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
void ima_add_violation(struct inode *inode, const unsigned char *filename,
const char *op, const char *cause)
{
struct ima_template_entry *entry;
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
result = -ENOMEM;
goto err_out;
}
memset(&entry->template, 0, sizeof(entry->template));
strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
result = ima_store_template(entry, violation, inode);
if (result < 0)
kfree(entry);
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, cause, result, 0);
}
/**
* ima_must_measure - measure decision based on policy.
* @inode: pointer to inode to measure
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
* @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP)
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
* func: PATH_CHECK | BPRM_CHECK | FILE_MMAP
* mask: contains the permission mask
* fsmagic: hex value
*
* Must be called with iint->mutex held.
*
* Return 0 to measure. Return 1 if already measured.
* For matching a DONT_MEASURE policy, no policy, or other
* error, return an error code.
*/
int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
int mask, int function)
{
int must_measure;
if (iint->flags & IMA_MEASURED)
return 1;
must_measure = ima_match_policy(inode, function, mask);
return must_measure ? 0 : -EACCES;
}
/*
* ima_collect_measurement - collect file measurement
*
* Calculate the file hash, if it doesn't already exist,
* storing the measurement and i_version in the iint.
*
* Must be called with iint->mutex held.
*
* Return 0 on success, error code otherwise
*/
int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
{
int result = -EEXIST;
if (!(iint->flags & IMA_MEASURED)) {
u64 i_version = file->f_dentry->d_inode->i_version;
memset(iint->digest, 0, IMA_DIGEST_SIZE);
result = ima_calc_hash(file, iint->digest);
if (!result)
iint->version = i_version;
}
return result;
}
/*
* ima_store_measurement - store file measurement
*
* Create an "ima" template and then store the template by calling
* ima_store_template.
*
* We only get here if the inode has not already been measured,
* but the measurement could already exist:
* - multiple copies of the same file on either the same or
* different filesystems.
* - the inode was previously flushed as well as the iint info,
* containing the hashing info.
*
* Must be called with iint->mutex held.
*/
void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
int result = -ENOMEM;
struct inode *inode = file->f_dentry->d_inode;
struct ima_template_entry *entry;
int violation = 0;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
memset(&entry->template, 0, sizeof(entry->template));
memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE);
strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
result = ima_store_template(entry, violation, inode);
if (!result)
iint->flags |= IMA_MEASURED;
else
kfree(entry);
}

Просмотреть файл

@ -0,0 +1,81 @@
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* File: integrity_audit.c
* Audit calls for the integrity subsystem
*/
#include <linux/fs.h>
#include <linux/audit.h>
#include "ima.h"
static int ima_audit;
#ifdef CONFIG_IMA_AUDIT
/* ima_audit_setup - enable informational auditing messages */
static int __init ima_audit_setup(char *str)
{
unsigned long audit;
int rc, result = 0;
char *op = "ima_audit";
char *cause;
rc = strict_strtoul(str, 0, &audit);
if (rc || audit > 1)
result = 1;
else
ima_audit = audit;
cause = ima_audit ? "enabled" : "not_enabled";
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
op, cause, result, 0);
return 1;
}
__setup("ima_audit=", ima_audit_setup);
#endif
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int audit_info)
{
struct audit_buffer *ab;
if (!ima_audit && audit_info == 1) /* Skip informational messages */
return;
ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
audit_log_format(ab, "integrity: pid=%d uid=%u auid=%u ses=%u",
current->pid, current->cred->uid,
audit_get_loginuid(current),
audit_get_sessionid(current));
audit_log_task_context(ab);
switch (audit_msgno) {
case AUDIT_INTEGRITY_DATA:
case AUDIT_INTEGRITY_METADATA:
case AUDIT_INTEGRITY_PCR:
case AUDIT_INTEGRITY_STATUS:
audit_log_format(ab, " op=%s cause=%s", op, cause);
break;
case AUDIT_INTEGRITY_HASH:
audit_log_format(ab, " op=%s hash=%s", op, cause);
break;
default:
audit_log_format(ab, " op=%s", op);
}
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, current->comm);
if (fname) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, fname);
}
if (inode)
audit_log_format(ab, " dev=%s ino=%lu",
inode->i_sb->s_id, inode->i_ino);
audit_log_format(ab, " res=%d", !result ? 0 : 1);
audit_log_end(ab);
}

Просмотреть файл

@ -0,0 +1,140 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* File: ima_crypto.c
* Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include "ima.h"
static int init_desc(struct hash_desc *desc)
{
int rc;
desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc->tfm)) {
pr_info("failed to load %s transform: %ld\n",
ima_hash, PTR_ERR(desc->tfm));
rc = PTR_ERR(desc->tfm);
return rc;
}
desc->flags = 0;
rc = crypto_hash_init(desc);
if (rc)
crypto_free_hash(desc->tfm);
return rc;
}
/*
* Calculate the MD5/SHA1 file digest
*/
int ima_calc_hash(struct file *file, char *digest)
{
struct hash_desc desc;
struct scatterlist sg[1];
loff_t i_size;
char *rbuf;
int rc, offset = 0;
rc = init_desc(&desc);
if (rc != 0)
return rc;
rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!rbuf) {
rc = -ENOMEM;
goto out;
}
i_size = i_size_read(file->f_dentry->d_inode);
while (offset < i_size) {
int rbuf_len;
rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
if (rbuf_len < 0) {
rc = rbuf_len;
break;
}
offset += rbuf_len;
sg_init_one(sg, rbuf, rbuf_len);
rc = crypto_hash_update(&desc, sg, rbuf_len);
if (rc)
break;
}
kfree(rbuf);
if (!rc)
rc = crypto_hash_final(&desc, digest);
out:
crypto_free_hash(desc.tfm);
return rc;
}
/*
* Calculate the hash of a given template
*/
int ima_calc_template_hash(int template_len, void *template, char *digest)
{
struct hash_desc desc;
struct scatterlist sg[1];
int rc;
rc = init_desc(&desc);
if (rc != 0)
return rc;
sg_init_one(sg, template, template_len);
rc = crypto_hash_update(&desc, sg, template_len);
if (!rc)
rc = crypto_hash_final(&desc, digest);
crypto_free_hash(desc.tfm);
return rc;
}
static void ima_pcrread(int idx, u8 *pcr)
{
if (!ima_used_chip)
return;
if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
pr_err("Error Communicating to TPM chip\n");
}
/*
* Calculate the boot aggregate hash
*/
int ima_calc_boot_aggregate(char *digest)
{
struct hash_desc desc;
struct scatterlist sg;
u8 pcr_i[IMA_DIGEST_SIZE];
int rc, i;
rc = init_desc(&desc);
if (rc != 0)
return rc;
/* cumulative sha1 over tpm registers 0-7 */
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
sg_init_one(&sg, pcr_i, IMA_DIGEST_SIZE);
rc = crypto_hash_update(&desc, &sg, IMA_DIGEST_SIZE);
}
if (!rc)
crypto_hash_final(&desc, digest);
crypto_free_hash(desc.tfm);
return rc;
}

Просмотреть файл

@ -0,0 +1,376 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Kylene Hall <kjhall@us.ibm.com>
* Reiner Sailer <sailer@us.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_fs.c
* implemenents security file system for reporting
* current measurement list and IMA statistics
*/
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/parser.h>
#include "ima.h"
static int valid_policy = 1;
#define TMPBUFLEN 12
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
loff_t *ppos, atomic_long_t *val)
{
char tmpbuf[TMPBUFLEN];
ssize_t len;
len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
static ssize_t ima_show_htable_violations(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
}
static struct file_operations ima_htable_violations_ops = {
.read = ima_show_htable_violations
};
static ssize_t ima_show_measurements_count(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
}
static struct file_operations ima_measurements_count_ops = {
.read = ima_show_measurements_count
};
/* returns pointer to hlist_node */
static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct ima_queue_entry *qe;
/* we need a lock since pos could point beyond last element */
rcu_read_lock();
list_for_each_entry_rcu(qe, &ima_measurements, later) {
if (!l--) {
rcu_read_unlock();
return qe;
}
}
rcu_read_unlock();
return NULL;
}
static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ima_queue_entry *qe = v;
/* lock protects when reading beyond last element
* against concurrent list-extension
*/
rcu_read_lock();
qe = list_entry(rcu_dereference(qe->later.next),
struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
return (&qe->later == &ima_measurements) ? NULL : qe;
}
static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
static void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
}
/* print format:
* 32bit-le=pcr#
* char[20]=template digest
* 32bit-le=template name size
* char[n]=template name
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
/* get entry */
e = qe->entry;
if (e == NULL)
return -1;
/*
* 1st: PCRIndex
* PCR used is always the same (config option) in
* little-endian format
*/
ima_putc(m, &pcr, sizeof pcr);
/* 2nd: template digest */
ima_putc(m, e->digest, IMA_DIGEST_SIZE);
/* 3rd: template name size */
namelen = strlen(e->template_name);
ima_putc(m, &namelen, sizeof namelen);
/* 4th: template name */
ima_putc(m, (void *)e->template_name, namelen);
/* 5th: template specific data */
ima_template_show(m, (struct ima_template_data *)&e->template,
IMA_SHOW_BINARY);
return 0;
}
static struct seq_operations ima_measurments_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
.show = ima_measurements_show
};
static int ima_measurements_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ima_measurments_seqops);
}
static struct file_operations ima_measurements_ops = {
.open = ima_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void ima_print_digest(struct seq_file *m, u8 *digest)
{
int i;
for (i = 0; i < IMA_DIGEST_SIZE; i++)
seq_printf(m, "%02x", *(digest + i));
}
void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
{
struct ima_template_data *entry = e;
int namelen;
switch (show) {
case IMA_SHOW_ASCII:
ima_print_digest(m, entry->digest);
seq_printf(m, " %s\n", entry->file_name);
break;
case IMA_SHOW_BINARY:
ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
namelen = strlen(entry->file_name);
ima_putc(m, &namelen, sizeof namelen);
ima_putc(m, entry->file_name, namelen);
default:
break;
}
}
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
/* get entry */
e = qe->entry;
if (e == NULL)
return -1;
/* 1st: PCR used (config option) */
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
/* 2nd: SHA1 template hash */
ima_print_digest(m, e->digest);
/* 3th: template name */
seq_printf(m, " %s ", e->template_name);
/* 4th: template specific data */
ima_template_show(m, (struct ima_template_data *)&e->template,
IMA_SHOW_ASCII);
return 0;
}
static struct seq_operations ima_ascii_measurements_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
.show = ima_ascii_measurements_show
};
static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ima_ascii_measurements_seqops);
}
static struct file_operations ima_ascii_measurements_ops = {
.open = ima_ascii_measurements_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static ssize_t ima_write_policy(struct file *file, const char __user *buf,
size_t datalen, loff_t *ppos)
{
char *data;
int rc;
if (datalen >= PAGE_SIZE)
return -ENOMEM;
if (*ppos != 0) {
/* No partial writes. */
return -EINVAL;
}
data = kmalloc(datalen + 1, GFP_KERNEL);
if (!data)
return -ENOMEM;
if (copy_from_user(data, buf, datalen)) {
kfree(data);
return -EFAULT;
}
*(data + datalen) = '\0';
rc = ima_parse_add_rule(data);
if (rc < 0) {
datalen = -EINVAL;
valid_policy = 0;
}
kfree(data);
return datalen;
}
static struct dentry *ima_dir;
static struct dentry *binary_runtime_measurements;
static struct dentry *ascii_runtime_measurements;
static struct dentry *runtime_measurements_count;
static struct dentry *violations;
static struct dentry *ima_policy;
static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
int ima_open_policy(struct inode * inode, struct file * filp)
{
if (atomic_dec_and_test(&policy_opencount))
return 0;
return -EBUSY;
}
/*
* ima_release_policy - start using the new measure policy rules.
*
* Initially, ima_measure points to the default policy rules, now
* point to the new policy rules, and remove the securityfs policy file,
* assuming a valid policy.
*/
static int ima_release_policy(struct inode *inode, struct file *file)
{
if (!valid_policy) {
ima_delete_rules();
valid_policy = 1;
atomic_set(&policy_opencount, 1);
return 0;
}
ima_update_policy();
securityfs_remove(ima_policy);
ima_policy = NULL;
return 0;
}
static struct file_operations ima_measure_policy_ops = {
.open = ima_open_policy,
.write = ima_write_policy,
.release = ima_release_policy
};
int ima_fs_init(void)
{
ima_dir = securityfs_create_dir("ima", NULL);
if (IS_ERR(ima_dir))
return -1;
binary_runtime_measurements =
securityfs_create_file("binary_runtime_measurements",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_measurements_ops);
if (IS_ERR(binary_runtime_measurements))
goto out;
ascii_runtime_measurements =
securityfs_create_file("ascii_runtime_measurements",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_ascii_measurements_ops);
if (IS_ERR(ascii_runtime_measurements))
goto out;
runtime_measurements_count =
securityfs_create_file("runtime_measurements_count",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_measurements_count_ops);
if (IS_ERR(runtime_measurements_count))
goto out;
violations =
securityfs_create_file("violations", S_IRUSR | S_IRGRP,
ima_dir, NULL, &ima_htable_violations_ops);
if (IS_ERR(violations))
goto out;
ima_policy = securityfs_create_file("policy",
S_IRUSR | S_IRGRP | S_IWUSR,
ima_dir, NULL,
&ima_measure_policy_ops);
if (IS_ERR(ima_policy))
goto out;
return 0;
out:
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
return -1;
}
void __exit ima_fs_cleanup(void)
{
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
}

Просмотреть файл

@ -0,0 +1,204 @@
/*
* Copyright (C) 2008 IBM Corporation
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_iint.c
* - implements the IMA hooks: ima_inode_alloc, ima_inode_free
* - cache integrity information associated with an inode
* using a radix tree.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/radix-tree.h>
#include "ima.h"
#define ima_iint_delete ima_inode_free
RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
/* ima_iint_find_get - return the iint associated with an inode
*
* ima_iint_find_get gets a reference to the iint. Caller must
* remember to put the iint reference.
*/
struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
{
struct ima_iint_cache *iint;
rcu_read_lock();
iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
if (!iint)
goto out;
kref_get(&iint->refcount);
out:
rcu_read_unlock();
return iint;
}
/* Allocate memory for the iint associated with the inode
* from the iint_cache slab, initialize the iint, and
* insert it into the radix tree.
*
* On success return a pointer to the iint; on failure return NULL.
*/
struct ima_iint_cache *ima_iint_insert(struct inode *inode)
{
struct ima_iint_cache *iint = NULL;
int rc = 0;
if (!ima_initialized)
return iint;
iint = kmem_cache_alloc(iint_cache, GFP_KERNEL);
if (!iint)
return iint;
rc = radix_tree_preload(GFP_KERNEL);
if (rc < 0)
goto out;
spin_lock(&ima_iint_lock);
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
spin_unlock(&ima_iint_lock);
out:
if (rc < 0) {
kmem_cache_free(iint_cache, iint);
if (rc == -EEXIST) {
spin_lock(&ima_iint_lock);
iint = radix_tree_lookup(&ima_iint_store,
(unsigned long)inode);
spin_unlock(&ima_iint_lock);
} else
iint = NULL;
}
radix_tree_preload_end();
return iint;
}
/**
* ima_inode_alloc - allocate an iint associated with an inode
* @inode: pointer to the inode
*
* Return 0 on success, 1 on failure.
*/
int ima_inode_alloc(struct inode *inode)
{
struct ima_iint_cache *iint;
if (!ima_initialized)
return 0;
iint = ima_iint_insert(inode);
if (!iint)
return 1;
return 0;
}
/* ima_iint_find_insert_get - get the iint associated with an inode
*
* Most insertions are done at inode_alloc, except those allocated
* before late_initcall. When the iint does not exist, allocate it,
* initialize and insert it, and increment the iint refcount.
*
* (Can't initialize at security_initcall before any inodes are
* allocated, got to wait at least until proc_init.)
*
* Return the iint.
*/
struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode)
{
struct ima_iint_cache *iint = NULL;
iint = ima_iint_find_get(inode);
if (iint)
return iint;
iint = ima_iint_insert(inode);
if (iint)
kref_get(&iint->refcount);
return iint;
}
EXPORT_SYMBOL_GPL(ima_iint_find_insert_get);
/* iint_free - called when the iint refcount goes to zero */
void iint_free(struct kref *kref)
{
struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
refcount);
iint->version = 0;
iint->flags = 0UL;
if (iint->readcount != 0) {
printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__,
iint->readcount);
iint->readcount = 0;
}
if (iint->writecount != 0) {
printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__,
iint->writecount);
iint->writecount = 0;
}
if (iint->opencount != 0) {
printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__,
iint->opencount);
iint->opencount = 0;
}
kref_set(&iint->refcount, 1);
kmem_cache_free(iint_cache, iint);
}
void iint_rcu_free(struct rcu_head *rcu_head)
{
struct ima_iint_cache *iint = container_of(rcu_head,
struct ima_iint_cache, rcu);
kref_put(&iint->refcount, iint_free);
}
/**
* ima_iint_delete - called on integrity_inode_free
* @inode: pointer to the inode
*
* Free the integrity information(iint) associated with an inode.
*/
void ima_iint_delete(struct inode *inode)
{
struct ima_iint_cache *iint;
if (!ima_initialized)
return;
spin_lock(&ima_iint_lock);
iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
spin_unlock(&ima_iint_lock);
if (iint)
call_rcu(&iint->rcu, iint_rcu_free);
}
static void init_once(void *foo)
{
struct ima_iint_cache *iint = foo;
memset(iint, 0, sizeof *iint);
iint->version = 0;
iint->flags = 0UL;
mutex_init(&iint->mutex);
iint->readcount = 0;
iint->writecount = 0;
iint->opencount = 0;
kref_set(&iint->refcount, 1);
}
void ima_iintcache_init(void)
{
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
SLAB_PANIC, init_once);
}

Просмотреть файл

@ -0,0 +1,96 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Reiner Sailer <sailer@watson.ibm.com>
* Leendert van Doorn <leendert@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_init.c
* initialization and cleanup functions
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include "ima.h"
/* name for boot aggregate entry */
static const char *boot_aggregate_name = "boot_aggregate";
int ima_used_chip;
/* Add the boot aggregate to the IMA measurement list and extend
* the PCR register.
*
* Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
* assuming a TPM chip exists, and zeroes if the TPM chip does not
* exist. Add the boot aggregate measurement to the measurement
* list and extend the PCR register.
*
* If a tpm chip does not exist, indicate the core root of trust is
* not hardware based by invalidating the aggregate PCR value.
* (The aggregate PCR value is invalidated by adding one value to
* the measurement list and extending the aggregate PCR value with
* a different value.) Violations add a zero entry to the measurement
* list and extend the aggregate PCR value with ff...ff's.
*/
static void ima_add_boot_aggregate(void)
{
struct ima_template_entry *entry;
const char *op = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
int result = -ENOMEM;
int violation = 1;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
goto err_out;
memset(&entry->template, 0, sizeof(entry->template));
strncpy(entry->template.file_name, boot_aggregate_name,
IMA_EVENT_NAME_LEN_MAX);
if (ima_used_chip) {
violation = 0;
result = ima_calc_boot_aggregate(entry->template.digest);
if (result < 0) {
audit_cause = "hashing_error";
kfree(entry);
goto err_out;
}
}
result = ima_store_template(entry, violation, NULL);
if (result < 0)
kfree(entry);
return;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
audit_cause, result, 0);
}
int ima_init(void)
{
u8 pcr_i[IMA_DIGEST_SIZE];
int rc;
ima_used_chip = 0;
rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i);
if (rc == 0)
ima_used_chip = 1;
if (!ima_used_chip)
pr_info("No TPM chip found, activating TPM-bypass!\n");
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
return ima_fs_init();
}
void __exit ima_cleanup(void)
{
ima_fs_cleanup();
}

Просмотреть файл

@ -0,0 +1,327 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Reiner Sailer <sailer@watson.ibm.com>
* Serge Hallyn <serue@us.ibm.com>
* Kylene Hall <kylene@us.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_main.c
* implements the IMA hooks: ima_bprm_check, ima_file_mmap,
* and ima_path_check.
*/
#include <linux/module.h>
#include <linux/file.h>
#include <linux/binfmts.h>
#include <linux/mount.h>
#include <linux/mman.h>
#include "ima.h"
int ima_initialized;
char *ima_hash = "sha1";
static int __init hash_setup(char *str)
{
const char *op = "hash_setup";
const char *hash = "sha1";
int result = 0;
int audit_info = 0;
if (strncmp(str, "md5", 3) == 0) {
hash = "md5";
ima_hash = str;
} else if (strncmp(str, "sha1", 4) != 0) {
hash = "invalid_hash_type";
result = 1;
}
integrity_audit_msg(AUDIT_INTEGRITY_HASH, NULL, NULL, op, hash,
result, audit_info);
return 1;
}
__setup("ima_hash=", hash_setup);
/**
* ima_file_free - called on __fput()
* @file: pointer to file structure being freed
*
* Flag files that changed, based on i_version;
* and decrement the iint readcount/writecount.
*/
void ima_file_free(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
if (iint->opencount <= 0) {
printk(KERN_INFO
"%s: %s open/free imbalance (r:%ld w:%ld o:%ld f:%ld)\n",
__FUNCTION__, file->f_dentry->d_name.name,
iint->readcount, iint->writecount,
iint->opencount, atomic_long_read(&file->f_count));
if (!(iint->flags & IMA_IINT_DUMP_STACK)) {
dump_stack();
iint->flags |= IMA_IINT_DUMP_STACK;
}
}
iint->opencount--;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
iint->readcount--;
if (file->f_mode & FMODE_WRITE) {
iint->writecount--;
if (iint->writecount == 0) {
if (iint->version != inode->i_version)
iint->flags &= ~IMA_MEASURED;
}
}
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
}
/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
*
* When opening a file for read, if the file is already open for write,
* the file could change, resulting in a file measurement error.
*
* Opening a file for write, if the file is already open for read, results
* in a time of measure, time of use (ToMToU) error.
*
* In either case invalidate the PCR.
*/
enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
static void ima_read_write_check(enum iint_pcr_error error,
struct ima_iint_cache *iint,
struct inode *inode,
const unsigned char *filename)
{
switch (error) {
case TOMTOU:
if (iint->readcount > 0)
ima_add_violation(inode, filename, "invalid_pcr",
"ToMToU");
break;
case OPEN_WRITERS:
if (iint->writecount > 0)
ima_add_violation(inode, filename, "invalid_pcr",
"open_writers");
break;
}
}
static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
const unsigned char *filename)
{
int rc = 0;
if (IS_ERR(file)) {
pr_info("%s dentry_open failed\n", filename);
return rc;
}
iint->opencount++;
iint->readcount++;
rc = ima_collect_measurement(iint, file);
if (!rc)
ima_store_measurement(iint, file, filename);
return rc;
}
/**
* ima_path_check - based on policy, collect/store measurement.
* @path: contains a pointer to the path to be measured
* @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
*
* Measure the file being open for readonly, based on the
* ima_must_measure() policy decision.
*
* Keep read/write counters for all files, but only
* invalidate the PCR for measured files:
* - Opening a file for write when already open for read,
* results in a time of measure, time of use (ToMToU) error.
* - Opening a file for read when already open for write,
* could result in a file measurement error.
*
* Return 0 on success, an error code on failure.
* (Based on the results of appraise_measurement().)
*/
int ima_path_check(struct path *path, int mask)
{
struct inode *inode = path->dentry->d_inode;
struct ima_iint_cache *iint;
struct file *file = NULL;
int rc;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
iint = ima_iint_find_insert_get(inode);
if (!iint)
return 0;
mutex_lock(&iint->mutex);
iint->opencount++;
if ((mask & MAY_WRITE) || (mask == 0))
iint->writecount++;
else if (mask & (MAY_READ | MAY_EXEC))
iint->readcount++;
rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
if (rc < 0)
goto out;
if ((mask & MAY_WRITE) || (mask == 0))
ima_read_write_check(TOMTOU, iint, inode,
path->dentry->d_name.name);
if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
goto out;
ima_read_write_check(OPEN_WRITERS, iint, inode,
path->dentry->d_name.name);
if (!(iint->flags & IMA_MEASURED)) {
struct dentry *dentry = dget(path->dentry);
struct vfsmount *mnt = mntget(path->mnt);
file = dentry_open(dentry, mnt, O_RDONLY, current->cred);
rc = get_path_measurement(iint, file, dentry->d_name.name);
}
out:
mutex_unlock(&iint->mutex);
if (file)
fput(file);
kref_put(&iint->refcount, iint_free);
return 0;
}
static int process_measurement(struct file *file, const unsigned char *filename,
int mask, int function)
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
int rc;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
iint = ima_iint_find_insert_get(inode);
if (!iint)
return -ENOMEM;
mutex_lock(&iint->mutex);
rc = ima_must_measure(iint, inode, mask, function);
if (rc != 0)
goto out;
rc = ima_collect_measurement(iint, file);
if (!rc)
ima_store_measurement(iint, file, filename);
out:
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
return rc;
}
static void opencount_get(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_insert_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
iint->opencount++;
mutex_unlock(&iint->mutex);
}
/**
* ima_file_mmap - based on policy, collect/store measurement.
* @file: pointer to the file to be measured (May be NULL)
* @prot: contains the protection that will be applied by the kernel.
*
* Measure files being mmapped executable based on the ima_must_measure()
* policy decision.
*
* Return 0 on success, an error code on failure.
* (Based on the results of appraise_measurement().)
*/
int ima_file_mmap(struct file *file, unsigned long prot)
{
int rc;
if (!file)
return 0;
if (prot & PROT_EXEC)
rc = process_measurement(file, file->f_dentry->d_name.name,
MAY_EXEC, FILE_MMAP);
return 0;
}
/*
* ima_shm_check - IPC shm and shmat create/fput a file
*
* Maintain the opencount for these files to prevent unnecessary
* imbalance messages.
*/
void ima_shm_check(struct file *file)
{
opencount_get(file);
return;
}
/**
* ima_bprm_check - based on policy, collect/store measurement.
* @bprm: contains the linux_binprm structure
*
* The OS protects against an executable file, already open for write,
* from being executed in deny_write_access() and an executable file,
* already open for execute, from being modified in get_write_access().
* So we can be certain that what we verify and measure here is actually
* what is being executed.
*
* Return 0 on success, an error code on failure.
* (Based on the results of appraise_measurement().)
*/
int ima_bprm_check(struct linux_binprm *bprm)
{
int rc;
rc = process_measurement(bprm->file, bprm->filename,
MAY_EXEC, BPRM_CHECK);
return 0;
}
static int __init init_ima(void)
{
int error;
ima_iintcache_init();
error = ima_init();
ima_initialized = 1;
return error;
}
static void __exit cleanup_ima(void)
{
ima_cleanup();
}
late_initcall(init_ima); /* Start IMA after the TPM is available */
MODULE_DESCRIPTION("Integrity Measurement Architecture");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -0,0 +1,414 @@
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* ima_policy.c
* - initialize default measure policy rules
*
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/parser.h>
#include "ima.h"
/* flags definitions */
#define IMA_FUNC 0x0001
#define IMA_MASK 0x0002
#define IMA_FSMAGIC 0x0004
#define IMA_UID 0x0008
enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE };
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
};
struct ima_measure_rule_entry {
struct list_head list;
enum ima_action action;
unsigned int flags;
enum ima_hooks func;
int mask;
unsigned long fsmagic;
uid_t uid;
struct {
void *rule; /* LSM file metadata specific */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
};
/* Without LSM specific knowledge, the default policy can only be
* written in terms of .action, .func, .mask, .fsmagic, and .uid
*/
static struct ima_measure_rule_entry default_rules[] = {
{.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,
.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,
.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = 0xF97CFF8C,.flags = IMA_FSMAGIC},
{.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0,
.flags = IMA_FUNC | IMA_MASK | IMA_UID}
};
static LIST_HEAD(measure_default_rules);
static LIST_HEAD(measure_policy_rules);
static struct list_head *ima_measure;
static DEFINE_MUTEX(ima_measure_mutex);
/**
* ima_match_rules - determine whether an inode matches the measure rule.
* @rule: a pointer to a rule
* @inode: a pointer to an inode
* @func: LIM hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
*
* Returns true on rule match, false on failure.
*/
static bool ima_match_rules(struct ima_measure_rule_entry *rule,
struct inode *inode, enum ima_hooks func, int mask)
{
struct task_struct *tsk = current;
int i;
if ((rule->flags & IMA_FUNC) && rule->func != func)
return false;
if ((rule->flags & IMA_MASK) && rule->mask != mask)
return false;
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid)
return false;
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc;
u32 osid, sid;
if (!rule->lsm[i].rule)
continue;
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
case LSM_OBJ_TYPE:
security_inode_getsecid(inode, &osid);
rc = security_filter_rule_match(osid,
rule->lsm[i].type,
AUDIT_EQUAL,
rule->lsm[i].rule,
NULL);
break;
case LSM_SUBJ_USER:
case LSM_SUBJ_ROLE:
case LSM_SUBJ_TYPE:
security_task_getsecid(tsk, &sid);
rc = security_filter_rule_match(sid,
rule->lsm[i].type,
AUDIT_EQUAL,
rule->lsm[i].rule,
NULL);
default:
break;
}
if (!rc)
return false;
}
return true;
}
/**
* ima_match_policy - decision based on LSM and other conditions
* @inode: pointer to an inode for which the policy decision is being made
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
*
* (There is no need for locking when walking the policy list,
* as elements in the list are never deleted, nor does the list
* change.)
*/
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask)
{
struct ima_measure_rule_entry *entry;
list_for_each_entry(entry, ima_measure, list) {
bool rc;
rc = ima_match_rules(entry, inode, func, mask);
if (rc)
return entry->action;
}
return 0;
}
/**
* ima_init_policy - initialize the default measure rules.
*
* ima_measure points to either the measure_default_rules or the
* the new measure_policy_rules.
*/
void ima_init_policy(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_rules); i++)
list_add_tail(&default_rules[i].list, &measure_default_rules);
ima_measure = &measure_default_rules;
}
/**
* ima_update_policy - update default_rules with new measure rules
*
* Called on file .release to update the default rules with a complete new
* policy. Once updated, the policy is locked, no additional rules can be
* added to the policy.
*/
void ima_update_policy(void)
{
const char *op = "policy_update";
const char *cause = "already exists";
int result = 1;
int audit_info = 0;
if (ima_measure == &measure_default_rules) {
ima_measure = &measure_policy_rules;
cause = "complete";
result = 0;
}
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, cause, result, audit_info);
}
enum {
Opt_err = -1,
Opt_measure = 1, Opt_dont_measure,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
Opt_func, Opt_mask, Opt_fsmagic, Opt_uid
};
static match_table_t policy_tokens = {
{Opt_measure, "measure"},
{Opt_dont_measure, "dont_measure"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
{Opt_subj_user, "subj_user=%s"},
{Opt_subj_role, "subj_role=%s"},
{Opt_subj_type, "subj_type=%s"},
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
{Opt_uid, "uid=%s"},
{Opt_err, NULL}
};
static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
char *args, int lsm_rule, int audit_type)
{
int result;
entry->lsm[lsm_rule].type = audit_type;
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
AUDIT_EQUAL, args,
&entry->lsm[lsm_rule].rule);
return result;
}
static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
{
struct audit_buffer *ab;
char *p;
int result = 0;
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
entry->action = -1;
while ((p = strsep(&rule, " \n")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
unsigned long lnum;
if (result < 0)
break;
if (!*p)
continue;
token = match_token(p, policy_tokens, args);
switch (token) {
case Opt_measure:
audit_log_format(ab, "%s ", "measure");
entry->action = MEASURE;
break;
case Opt_dont_measure:
audit_log_format(ab, "%s ", "dont_measure");
entry->action = DONT_MEASURE;
break;
case Opt_func:
audit_log_format(ab, "func=%s ", args[0].from);
if (strcmp(args[0].from, "PATH_CHECK") == 0)
entry->func = PATH_CHECK;
else if (strcmp(args[0].from, "FILE_MMAP") == 0)
entry->func = FILE_MMAP;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
entry->func = BPRM_CHECK;
else
result = -EINVAL;
if (!result)
entry->flags |= IMA_FUNC;
break;
case Opt_mask:
audit_log_format(ab, "mask=%s ", args[0].from);
if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
entry->mask = MAY_EXEC;
else if (strcmp(args[0].from, "MAY_WRITE") == 0)
entry->mask = MAY_WRITE;
else if (strcmp(args[0].from, "MAY_READ") == 0)
entry->mask = MAY_READ;
else if (strcmp(args[0].from, "MAY_APPEND") == 0)
entry->mask = MAY_APPEND;
else
result = -EINVAL;
if (!result)
entry->flags |= IMA_MASK;
break;
case Opt_fsmagic:
audit_log_format(ab, "fsmagic=%s ", args[0].from);
result = strict_strtoul(args[0].from, 16,
&entry->fsmagic);
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
case Opt_uid:
audit_log_format(ab, "uid=%s ", args[0].from);
result = strict_strtoul(args[0].from, 10, &lnum);
if (!result) {
entry->uid = (uid_t) lnum;
if (entry->uid != lnum)
result = -EINVAL;
else
entry->flags |= IMA_UID;
}
break;
case Opt_obj_user:
audit_log_format(ab, "obj_user=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_USER,
AUDIT_OBJ_USER);
break;
case Opt_obj_role:
audit_log_format(ab, "obj_role=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_ROLE,
AUDIT_OBJ_ROLE);
break;
case Opt_obj_type:
audit_log_format(ab, "obj_type=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_OBJ_TYPE,
AUDIT_OBJ_TYPE);
break;
case Opt_subj_user:
audit_log_format(ab, "subj_user=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_USER,
AUDIT_SUBJ_USER);
break;
case Opt_subj_role:
audit_log_format(ab, "subj_role=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_ROLE,
AUDIT_SUBJ_ROLE);
break;
case Opt_subj_type:
audit_log_format(ab, "subj_type=%s ", args[0].from);
result = ima_lsm_rule_init(entry, args[0].from,
LSM_SUBJ_TYPE,
AUDIT_SUBJ_TYPE);
break;
case Opt_err:
audit_log_format(ab, "UNKNOWN=%s ", p);
break;
}
}
if (entry->action == UNKNOWN)
result = -EINVAL;
audit_log_format(ab, "res=%d", !result ? 0 : 1);
audit_log_end(ab);
return result;
}
/**
* ima_parse_add_rule - add a rule to measure_policy_rules
* @rule - ima measurement policy rule
*
* Uses a mutex to protect the policy list from multiple concurrent writers.
* Returns 0 on success, an error code on failure.
*/
int ima_parse_add_rule(char *rule)
{
const char *op = "update_policy";
struct ima_measure_rule_entry *entry;
int result = 0;
int audit_info = 0;
/* Prevent installed policy from changing */
if (ima_measure != &measure_default_rules) {
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "already exists",
-EACCES, audit_info);
return -EACCES;
}
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "-ENOMEM", -ENOMEM, audit_info);
return -ENOMEM;
}
INIT_LIST_HEAD(&entry->list);
result = ima_parse_rule(rule, entry);
if (!result) {
mutex_lock(&ima_measure_mutex);
list_add_tail(&entry->list, &measure_policy_rules);
mutex_unlock(&ima_measure_mutex);
} else {
kfree(entry);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "invalid policy", result,
audit_info);
}
return result;
}
/* ima_delete_rules called to cleanup invalid policy */
void ima_delete_rules(void)
{
struct ima_measure_rule_entry *entry, *tmp;
mutex_lock(&ima_measure_mutex);
list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) {
list_del(&entry->list);
kfree(entry);
}
mutex_unlock(&ima_measure_mutex);
}

Просмотреть файл

@ -0,0 +1,140 @@
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
* Serge Hallyn <serue@us.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: ima_queue.c
* Implements queues that store template measurements and
* maintains aggregate over the stored measurements
* in the pre-configured TPM PCR (if available).
* The measurement list is append-only. No entry is
* ever removed or changed during the boot-cycle.
*/
#include <linux/module.h>
#include <linux/rculist.h>
#include "ima.h"
LIST_HEAD(ima_measurements); /* list of all measurements */
/* key: inode (before secure-hashing a file) */
struct ima_h_table ima_htable = {
.len = ATOMIC_LONG_INIT(0),
.violations = ATOMIC_LONG_INIT(0),
.queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
};
/* mutex protects atomicity of extending measurement list
* and extending the TPM PCR aggregate. Since tpm_extend can take
* long (and the tpm driver uses a mutex), we can't use the spinlock.
*/
static DEFINE_MUTEX(ima_extend_list_mutex);
/* lookup up the digest value in the hash table, and return the entry */
static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
{
struct ima_queue_entry *qe, *ret = NULL;
unsigned int key;
struct hlist_node *pos;
int rc;
key = ima_hash_key(digest_value);
rcu_read_lock();
hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) {
rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
break;
}
}
rcu_read_unlock();
return ret;
}
/* ima_add_template_entry helper function:
* - Add template entry to measurement list and hash table.
*
* (Called with ima_extend_list_mutex held.)
*/
static int ima_add_digest_entry(struct ima_template_entry *entry)
{
struct ima_queue_entry *qe;
unsigned int key;
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
if (qe == NULL) {
pr_err("OUT OF MEMORY ERROR creating queue entry.\n");
return -ENOMEM;
}
qe->entry = entry;
INIT_LIST_HEAD(&qe->later);
list_add_tail_rcu(&qe->later, &ima_measurements);
atomic_long_inc(&ima_htable.len);
key = ima_hash_key(entry->digest);
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
return 0;
}
static int ima_pcr_extend(const u8 *hash)
{
int result = 0;
if (!ima_used_chip)
return result;
result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
if (result != 0)
pr_err("Error Communicating to TPM chip\n");
return result;
}
/* Add template entry to the measurement list and hash table,
* and extend the pcr.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode)
{
u8 digest[IMA_DIGEST_SIZE];
const char *audit_cause = "hash_added";
int audit_info = 1;
int result = 0;
mutex_lock(&ima_extend_list_mutex);
if (!violation) {
memcpy(digest, entry->digest, sizeof digest);
if (ima_lookup_digest_entry(digest)) {
audit_cause = "hash_exists";
goto out;
}
}
result = ima_add_digest_entry(entry);
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
goto out;
}
if (violation) /* invalidate pcr */
memset(digest, 0xff, sizeof digest);
result = ima_pcr_extend(digest);
if (result != 0) {
audit_cause = "TPM error";
audit_info = 0;
}
out:
mutex_unlock(&ima_extend_list_mutex);
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template_name,
op, audit_cause, result, audit_info);
return result;
}

Просмотреть файл

@ -53,6 +53,7 @@ struct key_user {
atomic_t nkeys; /* number of keys */
atomic_t nikeys; /* number of instantiated keys */
uid_t uid;
struct user_namespace *user_ns;
int qnkeys; /* number of keys allocated to this user */
int qnbytes; /* number of bytes allocated to this user */
};
@ -61,7 +62,8 @@ extern struct rb_root key_user_tree;
extern spinlock_t key_user_lock;
extern struct key_user root_key_user;
extern struct key_user *key_user_lookup(uid_t uid);
extern struct key_user *key_user_lookup(uid_t uid,
struct user_namespace *user_ns);
extern void key_user_put(struct key_user *user);
/*

Просмотреть файл

@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
#include <linux/user_namespace.h>
#include "internal.h"
static struct kmem_cache *key_jar;
@ -60,7 +61,7 @@ void __key_check(const struct key *key)
* get the key quota record for a user, allocating a new record if one doesn't
* already exist
*/
struct key_user *key_user_lookup(uid_t uid)
struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
@ -79,6 +80,10 @@ struct key_user *key_user_lookup(uid_t uid)
p = &(*p)->rb_left;
else if (uid > user->uid)
p = &(*p)->rb_right;
else if (user_ns < user->user_ns)
p = &(*p)->rb_left;
else if (user_ns > user->user_ns)
p = &(*p)->rb_right;
else
goto found;
}
@ -106,6 +111,7 @@ struct key_user *key_user_lookup(uid_t uid)
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
candidate->user_ns = get_user_ns(user_ns);
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
@ -136,6 +142,7 @@ void key_user_put(struct key_user *user)
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
put_user_ns(user->user_ns);
kfree(user);
}
@ -234,7 +241,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
quotalen = desclen + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
user = key_user_lookup(uid, cred->user->user_ns);
if (!user)
goto no_memory_1;

Просмотреть файл

@ -726,7 +726,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
/* change the UID */
if (uid != (uid_t) -1 && uid != key->uid) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
newowner = key_user_lookup(uid, current_user_ns());
if (!newowner)
goto error_put;

Просмотреть файл

@ -539,6 +539,9 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
&keyring_name_hash[bucket],
type_data.link
) {
if (keyring->user->user_ns != current_user_ns())
continue;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
continue;

Просмотреть файл

@ -35,6 +35,9 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
key = key_ref_to_ptr(key_ref);
if (key->user->user_ns != cred->user->user_ns)
goto use_other_perms;
/* use the second 8-bits of permissions for keys the caller owns */
if (key->uid == cred->fsuid) {
kperm = key->perm >> 16;
@ -56,6 +59,8 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
}
}
use_other_perms:
/* otherwise use the least-significant 8-bits */
kperm = key->perm;

Просмотреть файл

@ -91,6 +91,28 @@ __initcall(key_proc_init);
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
static struct rb_node *__key_serial_next(struct rb_node *n)
{
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
if (key->user->user_ns == current_user_ns())
break;
n = rb_next(n);
}
return n;
}
static struct rb_node *key_serial_next(struct rb_node *n)
{
return __key_serial_next(rb_next(n));
}
static struct rb_node *key_serial_first(struct rb_root *r)
{
struct rb_node *n = rb_first(r);
return __key_serial_next(n);
}
static int proc_keys_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_keys_ops);
@ -104,10 +126,10 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
spin_lock(&key_serial_lock);
_p = rb_first(&key_serial_tree);
_p = key_serial_first(&key_serial_tree);
while (pos > 0 && _p) {
pos--;
_p = rb_next(_p);
_p = key_serial_next(_p);
}
return _p;
@ -117,7 +139,7 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return rb_next((struct rb_node *) v);
return key_serial_next((struct rb_node *) v);
}
@ -203,6 +225,27 @@ static int proc_keys_show(struct seq_file *m, void *v)
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
static struct rb_node *__key_user_next(struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
if (user->user_ns == current_user_ns())
break;
n = rb_next(n);
}
return n;
}
static struct rb_node *key_user_next(struct rb_node *n)
{
return __key_user_next(rb_next(n));
}
static struct rb_node *key_user_first(struct rb_root *r)
{
struct rb_node *n = rb_first(r);
return __key_user_next(n);
}
/*****************************************************************************/
/*
* implement "/proc/key-users" to provides a list of the key users
@ -220,10 +263,10 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
spin_lock(&key_user_lock);
_p = rb_first(&key_user_tree);
_p = key_user_first(&key_user_tree);
while (pos > 0 && _p) {
pos--;
_p = rb_next(_p);
_p = key_user_next(_p);
}
return _p;
@ -233,7 +276,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return rb_next((struct rb_node *) v);
return key_user_next((struct rb_node *) v);
}

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include "internal.h"
@ -34,6 +35,7 @@ struct key_user root_key_user = {
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
.uid = 0,
.user_ns = &init_user_ns,
};
/*****************************************************************************/

Просмотреть файл

@ -365,7 +365,7 @@ static struct key *construct_key_and_link(struct key_type *type,
kenter("");
user = key_user_lookup(current_fsuid());
user = key_user_lookup(current_fsuid(), current_user_ns());
if (!user)
return ERR_PTR(-ENOMEM);

Просмотреть файл

@ -88,17 +88,16 @@ struct avc_entry {
u32 tsid;
u16 tclass;
struct av_decision avd;
atomic_t used; /* used recently */
};
struct avc_node {
struct avc_entry ae;
struct list_head list;
struct hlist_node list; /* anchored in avc_cache->slots[i] */
struct rcu_head rhead;
};
struct avc_cache {
struct list_head slots[AVC_CACHE_SLOTS];
struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
atomic_t lru_hint; /* LRU hint for reclaim scan */
atomic_t active_nodes;
@ -234,7 +233,7 @@ void __init avc_init(void)
int i;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
INIT_LIST_HEAD(&avc_cache.slots[i]);
INIT_HLIST_HEAD(&avc_cache.slots[i]);
spin_lock_init(&avc_cache.slots_lock[i]);
}
atomic_set(&avc_cache.active_nodes, 0);
@ -250,16 +249,20 @@ int avc_get_hash_stats(char *page)
{
int i, chain_len, max_chain_len, slots_used;
struct avc_node *node;
struct hlist_head *head;
rcu_read_lock();
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
if (!list_empty(&avc_cache.slots[i])) {
head = &avc_cache.slots[i];
if (!hlist_empty(head)) {
struct hlist_node *next;
slots_used++;
chain_len = 0;
list_for_each_entry_rcu(node, &avc_cache.slots[i], list)
hlist_for_each_entry_rcu(node, next, head, list)
chain_len++;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
@ -283,7 +286,7 @@ static void avc_node_free(struct rcu_head *rhead)
static void avc_node_delete(struct avc_node *node)
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->list);
call_rcu(&node->rhead, avc_node_free);
atomic_dec(&avc_cache.active_nodes);
}
@ -297,7 +300,7 @@ static void avc_node_kill(struct avc_node *node)
static void avc_node_replace(struct avc_node *new, struct avc_node *old)
{
list_replace_rcu(&old->list, &new->list);
hlist_replace_rcu(&old->list, &new->list);
call_rcu(&old->rhead, avc_node_free);
atomic_dec(&avc_cache.active_nodes);
}
@ -307,29 +310,31 @@ static inline int avc_reclaim_node(void)
struct avc_node *node;
int hvalue, try, ecx;
unsigned long flags;
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
if (!spin_trylock_irqsave(lock, flags))
continue;
rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
if (atomic_dec_and_test(&node->ae.used)) {
/* Recently Unused */
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
if (ecx >= AVC_CACHE_RECLAIM) {
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
goto out;
}
hlist_for_each_entry(node, next, head, list) {
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
if (ecx >= AVC_CACHE_RECLAIM) {
rcu_read_unlock();
spin_unlock_irqrestore(lock, flags);
goto out;
}
}
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
spin_unlock_irqrestore(lock, flags);
}
out:
return ecx;
@ -344,8 +349,7 @@ static struct avc_node *avc_alloc_node(void)
goto out;
INIT_RCU_HEAD(&node->rhead);
INIT_LIST_HEAD(&node->list);
atomic_set(&node->ae.used, 1);
INIT_HLIST_NODE(&node->list);
avc_cache_stats_incr(allocations);
if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
@ -355,21 +359,24 @@ out:
return node;
}
static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
{
node->ae.ssid = ssid;
node->ae.tsid = tsid;
node->ae.tclass = tclass;
memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd));
memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
}
static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
struct hlist_node *next;
hvalue = avc_hash(ssid, tsid, tclass);
list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) {
head = &avc_cache.slots[hvalue];
hlist_for_each_entry_rcu(node, next, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
@ -378,15 +385,6 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
}
}
if (ret == NULL) {
/* cache miss */
goto out;
}
/* cache hit */
if (atomic_read(&ret->ae.used) != 1)
atomic_set(&ret->ae.used, 1);
out:
return ret;
}
@ -395,30 +393,25 @@ out:
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
*
* Look up an AVC entry that is valid for the
* @requested permissions between the SID pair
* (@ssid, @tsid), interpreting the permissions
* based on @tclass. If a valid AVC entry exists,
* then this function return the avc_node.
* Otherwise, this function returns NULL.
*/
static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested)
static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node;
avc_cache_stats_incr(lookups);
node = avc_search_node(ssid, tsid, tclass);
if (node && ((node->ae.avd.decided & requested) == requested)) {
if (node)
avc_cache_stats_incr(hits);
goto out;
}
else
avc_cache_stats_incr(misses);
node = NULL;
avc_cache_stats_incr(misses);
out:
return node;
}
@ -449,34 +442,41 @@ static int avc_latest_notif_update(int seqno, int is_insert)
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @ae: AVC entry
* @avd: resulting av decision
*
* Insert an AVC entry for the SID pair
* (@ssid, @tsid) and class @tclass.
* The access vectors and the sequence number are
* normally provided by the security server in
* response to a security_compute_av() call. If the
* sequence number @ae->avd.seqno is not less than the latest
* sequence number @avd->seqno is not less than the latest
* revocation notification, then the function copies
* the access vectors into a cache entry, returns
* avc_node inserted. Otherwise, this function returns NULL.
*/
static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
{
struct avc_node *pos, *node = NULL;
int hvalue;
unsigned long flag;
if (avc_latest_notif_update(ae->avd.seqno, 1))
if (avc_latest_notif_update(avd->seqno, 1))
goto out;
node = avc_alloc_node();
if (node) {
hvalue = avc_hash(ssid, tsid, tclass);
avc_node_populate(node, ssid, tsid, tclass, ae);
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag);
list_for_each_entry(pos, &avc_cache.slots[hvalue], list) {
hvalue = avc_hash(ssid, tsid, tclass);
avc_node_populate(node, ssid, tsid, tclass, avd);
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
hlist_for_each_entry(pos, next, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
@ -484,9 +484,9 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_en
goto found;
}
}
list_add_rcu(&node->list, &avc_cache.slots[hvalue]);
hlist_add_head_rcu(&node->list, head);
found:
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag);
spin_unlock_irqrestore(lock, flag);
}
out:
return node;
@ -742,17 +742,22 @@ static inline int avc_sidcmp(u32 x, u32 y)
* @event : Updating event
* @perms : Permission mask bits
* @ssid,@tsid,@tclass : identifier of an AVC entry
* @seqno : sequence number when decision was made
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
* otherwise, this function update the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
u32 seqno)
{
int hvalue, rc = 0;
unsigned long flag;
struct avc_node *pos, *node, *orig = NULL;
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
node = avc_alloc_node();
if (!node) {
@ -762,12 +767,17 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
/* Lock the target slot */
hvalue = avc_hash(ssid, tsid, tclass);
spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag);
list_for_each_entry(pos, &avc_cache.slots[hvalue], list) {
head = &avc_cache.slots[hvalue];
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
hlist_for_each_entry(pos, next, head, list) {
if (ssid == pos->ae.ssid &&
tsid == pos->ae.tsid &&
tclass == pos->ae.tclass){
tclass == pos->ae.tclass &&
seqno == pos->ae.avd.seqno){
orig = pos;
break;
}
@ -783,7 +793,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
* Copy and replace original node.
*/
avc_node_populate(node, ssid, tsid, tclass, &orig->ae);
avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
switch (event) {
case AVC_CALLBACK_GRANT:
@ -808,7 +818,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
}
avc_node_replace(node, orig);
out_unlock:
spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag);
spin_unlock_irqrestore(lock, flag);
out:
return rc;
}
@ -823,18 +833,24 @@ int avc_ss_reset(u32 seqno)
int i, rc = 0, tmprc;
unsigned long flag;
struct avc_node *node;
struct hlist_head *head;
struct hlist_node *next;
spinlock_t *lock;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
head = &avc_cache.slots[i];
lock = &avc_cache.slots_lock[i];
spin_lock_irqsave(lock, flag);
/*
* With preemptable RCU, the outer spinlock does not
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
list_for_each_entry(node, &avc_cache.slots[i], list)
hlist_for_each_entry(node, next, head, list)
avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
spin_unlock_irqrestore(lock, flag);
}
for (c = avc_callbacks; c; c = c->next) {
@ -875,10 +891,10 @@ int avc_ss_reset(u32 seqno)
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *avd)
struct av_decision *in_avd)
{
struct avc_node *node;
struct avc_entry entry, *p_ae;
struct av_decision avd_entry, *avd;
int rc = 0;
u32 denied;
@ -886,29 +902,34 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_lock();
node = avc_lookup(ssid, tsid, tclass, requested);
node = avc_lookup(ssid, tsid, tclass);
if (!node) {
rcu_read_unlock();
rc = security_compute_av(ssid, tsid, tclass, requested, &entry.avd);
if (in_avd)
avd = in_avd;
else
avd = &avd_entry;
rc = security_compute_av(ssid, tsid, tclass, requested, avd);
if (rc)
goto out;
rcu_read_lock();
node = avc_insert(ssid, tsid, tclass, &entry);
node = avc_insert(ssid, tsid, tclass, avd);
} else {
if (in_avd)
memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
avd = &node->ae.avd;
}
p_ae = node ? &node->ae : &entry;
if (avd)
memcpy(avd, &p_ae->avd, sizeof(*avd));
denied = requested & ~(p_ae->avd.allowed);
denied = requested & ~(avd->allowed);
if (denied) {
if (flags & AVC_STRICT)
rc = -EACCES;
else if (!selinux_enforcing || security_permissive_sid(ssid))
avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
tsid, tclass);
tsid, tclass, avd->seqno);
else
rc = -EACCES;
}

Просмотреть файл

@ -89,7 +89,7 @@
#define XATTR_SELINUX_SUFFIX "selinux"
#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
#define NUM_SEL_MNT_OPTS 4
#define NUM_SEL_MNT_OPTS 5
extern unsigned int policydb_loaded_version;
extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
@ -353,6 +353,7 @@ enum {
Opt_fscontext = 2,
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
};
static const match_table_t tokens = {
@ -360,6 +361,7 @@ static const match_table_t tokens = {
{Opt_fscontext, FSCONTEXT_STR "%s"},
{Opt_defcontext, DEFCONTEXT_STR "%s"},
{Opt_rootcontext, ROOTCONTEXT_STR "%s"},
{Opt_labelsupport, LABELSUPP_STR},
{Opt_error, NULL},
};
@ -431,7 +433,7 @@ static int sb_finish_set_opts(struct super_block *sb)
}
}
sbsec->initialized = 1;
sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
@ -441,6 +443,12 @@ static int sb_finish_set_opts(struct super_block *sb)
sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
sbsec->behavior == SECURITY_FS_USE_NONE ||
sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
sbsec->flags &= ~SE_SBLABELSUPP;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@ -487,23 +495,22 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
security_init_mnt_opts(opts);
if (!sbsec->initialized)
if (!(sbsec->flags & SE_SBINITIALIZED))
return -EINVAL;
if (!ss_initialized)
return -EINVAL;
/*
* if we ever use sbsec flags for anything other than tracking mount
* settings this is going to need a mask
*/
tmp = sbsec->flags;
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
for (i = 0; i < 8; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
if (sbsec->flags & SE_SBLABELSUPP)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
if (!opts->mnt_opts) {
@ -549,6 +556,10 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
if (sbsec->flags & SE_SBLABELSUPP) {
opts->mnt_opts[i] = NULL;
opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
}
BUG_ON(i != opts->num_mnt_opts);
@ -562,8 +573,10 @@ out_free:
static int bad_option(struct superblock_security_struct *sbsec, char flag,
u32 old_sid, u32 new_sid)
{
char mnt_flags = sbsec->flags & SE_MNTMASK;
/* check if the old mount command had the same options */
if (sbsec->initialized)
if (sbsec->flags & SE_SBINITIALIZED)
if (!(sbsec->flags & flag) ||
(old_sid != new_sid))
return 1;
@ -571,8 +584,8 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
/* check if we were passed the same options twice,
* aka someone passed context=a,context=b
*/
if (!sbsec->initialized)
if (sbsec->flags & flag)
if (!(sbsec->flags & SE_SBINITIALIZED))
if (mnt_flags & flag)
return 1;
return 0;
}
@ -626,7 +639,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* this sb does not set any security options. (The first options
* will be used for both mounts)
*/
if (sbsec->initialized && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
if ((sbsec->flags & SE_SBINITIALIZED) && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
&& (num_opts == 0))
goto out;
@ -637,6 +650,9 @@ static int selinux_set_mnt_opts(struct super_block *sb,
*/
for (i = 0; i < num_opts; i++) {
u32 sid;
if (flags[i] == SE_SBLABELSUPP)
continue;
rc = security_context_to_sid(mount_options[i],
strlen(mount_options[i]), &sid);
if (rc) {
@ -690,19 +706,19 @@ static int selinux_set_mnt_opts(struct super_block *sb,
}
}
if (sbsec->initialized) {
if (sbsec->flags & SE_SBINITIALIZED) {
/* previously mounted with options, but not on this attempt? */
if (sbsec->flags && !num_opts)
if ((sbsec->flags & SE_MNTMASK) && !num_opts)
goto out_double_mount;
rc = 0;
goto out;
}
if (strcmp(sb->s_type->name, "proc") == 0)
sbsec->proc = 1;
sbsec->flags |= SE_SBPROC;
/* Determine the labeling behavior to use for this filesystem type. */
rc = security_fs_use(sbsec->proc ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
if (rc) {
printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
__func__, sb->s_type->name, rc);
@ -806,10 +822,10 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
}
/* how can we clone if the old one wasn't set up?? */
BUG_ON(!oldsbsec->initialized);
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
/* if fs is reusing a sb, just let its options stand... */
if (newsbsec->initialized)
if (newsbsec->flags & SE_SBINITIALIZED)
return;
mutex_lock(&newsbsec->lock);
@ -917,7 +933,8 @@ static int selinux_parse_opts_str(char *options,
goto out_err;
}
break;
case Opt_labelsupport:
break;
default:
rc = -EINVAL;
printk(KERN_WARNING "SELinux: unknown mount option\n");
@ -999,7 +1016,12 @@ static void selinux_write_opts(struct seq_file *m,
char *prefix;
for (i = 0; i < opts->num_mnt_opts; i++) {
char *has_comma = strchr(opts->mnt_opts[i], ',');
char *has_comma;
if (opts->mnt_opts[i])
has_comma = strchr(opts->mnt_opts[i], ',');
else
has_comma = NULL;
switch (opts->mnt_opts_flags[i]) {
case CONTEXT_MNT:
@ -1014,6 +1036,10 @@ static void selinux_write_opts(struct seq_file *m,
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
case SE_SBLABELSUPP:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
default:
BUG();
};
@ -1209,7 +1235,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
goto out_unlock;
sbsec = inode->i_sb->s_security;
if (!sbsec->initialized) {
if (!(sbsec->flags & SE_SBINITIALIZED)) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
server is ready to handle calls. */
@ -1237,19 +1263,26 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
dentry = d_find_alias(inode);
}
if (!dentry) {
printk(KERN_WARNING "SELinux: %s: no dentry for dev=%s "
"ino=%ld\n", __func__, inode->i_sb->s_id,
inode->i_ino);
/*
* this is can be hit on boot when a file is accessed
* before the policy is loaded. When we load policy we
* may find inodes that have no dentry on the
* sbsec->isec_head list. No reason to complain as these
* will get fixed up the next time we go through
* inode_doinit with a dentry, before these inodes could
* be used again by userspace.
*/
goto out_unlock;
}
len = INITCONTEXTLEN;
context = kmalloc(len, GFP_NOFS);
context = kmalloc(len+1, GFP_NOFS);
if (!context) {
rc = -ENOMEM;
dput(dentry);
goto out_unlock;
}
context[len] = '\0';
rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
context, len);
if (rc == -ERANGE) {
@ -1262,12 +1295,13 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
}
kfree(context);
len = rc;
context = kmalloc(len, GFP_NOFS);
context = kmalloc(len+1, GFP_NOFS);
if (!context) {
rc = -ENOMEM;
dput(dentry);
goto out_unlock;
}
context[len] = '\0';
rc = inode->i_op->getxattr(dentry,
XATTR_NAME_SELINUX,
context, len);
@ -1289,10 +1323,19 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
sbsec->def_sid,
GFP_NOFS);
if (rc) {
printk(KERN_WARNING "SELinux: %s: context_to_sid(%s) "
"returned %d for dev=%s ino=%ld\n",
__func__, context, -rc,
inode->i_sb->s_id, inode->i_ino);
char *dev = inode->i_sb->s_id;
unsigned long ino = inode->i_ino;
if (rc == -EINVAL) {
if (printk_ratelimit())
printk(KERN_NOTICE "SELinux: inode=%lu on dev=%s was found to have an invalid "
"context=%s. This indicates you may need to relabel the inode or the "
"filesystem in question.\n", ino, dev, context);
} else {
printk(KERN_WARNING "SELinux: %s: context_to_sid(%s) "
"returned %d for dev=%s ino=%ld\n",
__func__, context, -rc, dev, ino);
}
kfree(context);
/* Leave with the unlabeled SID */
rc = 0;
@ -1326,7 +1369,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
/* Default to the fs superblock SID. */
isec->sid = sbsec->sid;
if (sbsec->proc && !S_ISLNK(inode->i_mode)) {
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
struct proc_inode *proci = PROC_I(inode);
if (proci->pde) {
isec->sclass = inode_mode_to_security_class(inode->i_mode);
@ -1587,7 +1630,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
if (!newsid || sbsec->behavior == SECURITY_FS_USE_MNTPOINT) {
if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid, tclass, &newsid);
if (rc)
return rc;
@ -1801,6 +1844,8 @@ static inline u32 open_file_to_av(struct file *file)
av |= FIFO_FILE__OPEN;
else if (S_ISDIR(mode))
av |= DIR__OPEN;
else if (S_ISSOCK(mode))
av |= SOCK_FILE__OPEN;
else
printk(KERN_ERR "SELinux: WARNING: inside %s with "
"unknown mode:%o\n", __func__, mode);
@ -1815,7 +1860,7 @@ static int selinux_ptrace_may_access(struct task_struct *child,
{
int rc;
rc = secondary_ops->ptrace_may_access(child, mode);
rc = cap_ptrace_may_access(child, mode);
if (rc)
return rc;
@ -1832,7 +1877,7 @@ static int selinux_ptrace_traceme(struct task_struct *parent)
{
int rc;
rc = secondary_ops->ptrace_traceme(parent);
rc = cap_ptrace_traceme(parent);
if (rc)
return rc;
@ -1848,7 +1893,7 @@ static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
if (error)
return error;
return secondary_ops->capget(target, effective, inheritable, permitted);
return cap_capget(target, effective, inheritable, permitted);
}
static int selinux_capset(struct cred *new, const struct cred *old,
@ -1858,7 +1903,7 @@ static int selinux_capset(struct cred *new, const struct cred *old,
{
int error;
error = secondary_ops->capset(new, old,
error = cap_capset(new, old,
effective, inheritable, permitted);
if (error)
return error;
@ -1866,12 +1911,22 @@ static int selinux_capset(struct cred *new, const struct cred *old,
return cred_has_perm(old, new, PROCESS__SETCAP);
}
/*
* (This comment used to live with the selinux_task_setuid hook,
* which was removed).
*
* Since setuid only affects the current process, and since the SELinux
* controls are not based on the Linux identity attributes, SELinux does not
* need to control this operation. However, SELinux does control the use of
* the CAP_SETUID and CAP_SETGID capabilities using the capable hook.
*/
static int selinux_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit)
{
int rc;
rc = secondary_ops->capable(tsk, cred, cap, audit);
rc = cap_capable(tsk, cred, cap, audit);
if (rc)
return rc;
@ -1997,7 +2052,7 @@ static int selinux_syslog(int type)
{
int rc;
rc = secondary_ops->syslog(type);
rc = cap_syslog(type);
if (rc)
return rc;
@ -2028,10 +2083,6 @@ static int selinux_syslog(int type)
* mapping. 0 means there is enough memory for the allocation to
* succeed and -ENOMEM implies there is not.
*
* Note that secondary_ops->capable and task_has_perm_noaudit return 0
* if the capability is granted, but __vm_enough_memory requires 1 if
* the capability is granted.
*
* Do not audit the selinux permission check, as this is applied to all
* processes that allocate mappings.
*/
@ -2058,7 +2109,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
struct inode *inode = bprm->file->f_path.dentry->d_inode;
int rc;
rc = secondary_ops->bprm_set_creds(bprm);
rc = cap_bprm_set_creds(bprm);
if (rc)
return rc;
@ -2156,11 +2207,6 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
return 0;
}
static int selinux_bprm_check_security(struct linux_binprm *bprm)
{
return secondary_ops->bprm_check_security(bprm);
}
static int selinux_bprm_secureexec(struct linux_binprm *bprm)
{
const struct cred *cred = current_cred();
@ -2180,7 +2226,7 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
PROCESS__NOATSECURE, NULL);
}
return (atsecure || secondary_ops->bprm_secureexec(bprm));
return (atsecure || cap_bprm_secureexec(bprm));
}
extern struct vfsmount *selinuxfs_mount;
@ -2290,8 +2336,6 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
struct rlimit *rlim, *initrlim;
int rc, i;
secondary_ops->bprm_committing_creds(bprm);
new_tsec = bprm->cred->security;
if (new_tsec->sid == new_tsec->osid)
return;
@ -2337,8 +2381,6 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
int rc, i;
unsigned long flags;
secondary_ops->bprm_committed_creds(bprm);
osid = tsec->osid;
sid = tsec->sid;
@ -2400,7 +2442,8 @@ static inline int selinux_option(char *option, int len)
return (match_prefix(CONTEXT_STR, sizeof(CONTEXT_STR)-1, option, len) ||
match_prefix(FSCONTEXT_STR, sizeof(FSCONTEXT_STR)-1, option, len) ||
match_prefix(DEFCONTEXT_STR, sizeof(DEFCONTEXT_STR)-1, option, len) ||
match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len));
match_prefix(ROOTCONTEXT_STR, sizeof(ROOTCONTEXT_STR)-1, option, len) ||
match_prefix(LABELSUPP_STR, sizeof(LABELSUPP_STR)-1, option, len));
}
static inline void take_option(char **to, char *from, int *first, int len)
@ -2513,11 +2556,6 @@ static int selinux_mount(char *dev_name,
void *data)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->sb_mount(dev_name, path, type, flags, data);
if (rc)
return rc;
if (flags & MS_REMOUNT)
return superblock_has_perm(cred, path->mnt->mnt_sb,
@ -2530,11 +2568,6 @@ static int selinux_mount(char *dev_name,
static int selinux_umount(struct vfsmount *mnt, int flags)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->sb_umount(mnt, flags);
if (rc)
return rc;
return superblock_has_perm(cred, mnt->mnt_sb,
FILESYSTEM__UNMOUNT, NULL);
@ -2570,7 +2603,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
if (!newsid || sbsec->behavior == SECURITY_FS_USE_MNTPOINT) {
if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
&newsid);
@ -2585,14 +2618,14 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
}
/* Possibly defer initialization to selinux_complete_init. */
if (sbsec->initialized) {
if (sbsec->flags & SE_SBINITIALIZED) {
struct inode_security_struct *isec = inode->i_security;
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
}
if (!ss_initialized || sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
return -EOPNOTSUPP;
if (name) {
@ -2622,21 +2655,11 @@ static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int ma
static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
int rc;
rc = secondary_ops->inode_link(old_dentry, dir, new_dentry);
if (rc)
return rc;
return may_link(dir, old_dentry, MAY_LINK);
}
static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
{
int rc;
rc = secondary_ops->inode_unlink(dir, dentry);
if (rc)
return rc;
return may_link(dir, dentry, MAY_UNLINK);
}
@ -2657,12 +2680,6 @@ static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
int rc;
rc = secondary_ops->inode_mknod(dir, dentry, mode, dev);
if (rc)
return rc;
return may_create(dir, dentry, inode_mode_to_security_class(mode));
}
@ -2682,22 +2699,13 @@ static int selinux_inode_readlink(struct dentry *dentry)
static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->inode_follow_link(dentry, nameidata);
if (rc)
return rc;
return dentry_has_perm(cred, NULL, dentry, FILE__READ);
}
static int selinux_inode_permission(struct inode *inode, int mask)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->inode_permission(inode, mask);
if (rc)
return rc;
if (!mask) {
/* No permission to check. Existence test. */
@ -2711,11 +2719,6 @@ static int selinux_inode_permission(struct inode *inode, int mask)
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->inode_setattr(dentry, iattr);
if (rc)
return rc;
if (iattr->ia_valid & ATTR_FORCE)
return 0;
@ -2769,7 +2772,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
if (!(sbsec->flags & SE_SBLABELSUPP))
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
@ -2931,16 +2934,6 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
return len;
}
static int selinux_inode_need_killpriv(struct dentry *dentry)
{
return secondary_ops->inode_need_killpriv(dentry);
}
static int selinux_inode_killpriv(struct dentry *dentry)
{
return secondary_ops->inode_killpriv(dentry);
}
static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
{
struct inode_security_struct *isec = inode->i_security;
@ -3078,18 +3071,13 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
unsigned long prot)
{
const struct cred *cred = current_cred();
int rc;
rc = secondary_ops->file_mprotect(vma, reqprot, prot);
if (rc)
return rc;
if (selinux_checkreqprot)
prot = reqprot;
#ifndef CONFIG_PPC32
if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
rc = 0;
int rc = 0;
if (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk) {
rc = cred_has_perm(cred, cred, PROCESS__EXECHEAP);
@ -3239,12 +3227,6 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
static int selinux_task_create(unsigned long clone_flags)
{
int rc;
rc = secondary_ops->task_create(clone_flags);
if (rc)
return rc;
return current_has_perm(current, PROCESS__FORK);
}
@ -3277,14 +3259,6 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old,
return 0;
}
/*
* commit new credentials
*/
static void selinux_cred_commit(struct cred *new, const struct cred *old)
{
secondary_ops->cred_commit(new, old);
}
/*
* set the security data for a kernel service
* - all the creation contexts are set to unlabelled
@ -3329,29 +3303,6 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
return 0;
}
static int selinux_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
{
/* Since setuid only affects the current process, and
since the SELinux controls are not based on the Linux
identity attributes, SELinux does not need to control
this operation. However, SELinux does control the use
of the CAP_SETUID and CAP_SETGID capabilities using the
capable hook. */
return 0;
}
static int selinux_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
return secondary_ops->task_fix_setuid(new, old, flags);
}
static int selinux_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags)
{
/* See the comment for setuid above. */
return 0;
}
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return current_has_perm(p, PROCESS__SETPGID);
@ -3372,17 +3323,11 @@ static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
*secid = task_sid(p);
}
static int selinux_task_setgroups(struct group_info *group_info)
{
/* See the comment for setuid above. */
return 0;
}
static int selinux_task_setnice(struct task_struct *p, int nice)
{
int rc;
rc = secondary_ops->task_setnice(p, nice);
rc = cap_task_setnice(p, nice);
if (rc)
return rc;
@ -3393,7 +3338,7 @@ static int selinux_task_setioprio(struct task_struct *p, int ioprio)
{
int rc;
rc = secondary_ops->task_setioprio(p, ioprio);
rc = cap_task_setioprio(p, ioprio);
if (rc)
return rc;
@ -3408,11 +3353,6 @@ static int selinux_task_getioprio(struct task_struct *p)
static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
{
struct rlimit *old_rlim = current->signal->rlim + resource;
int rc;
rc = secondary_ops->task_setrlimit(resource, new_rlim);
if (rc)
return rc;
/* Control the ability to change the hard limit (whether
lowering or raising it), so that the hard limit can
@ -3428,7 +3368,7 @@ static int selinux_task_setscheduler(struct task_struct *p, int policy, struct s
{
int rc;
rc = secondary_ops->task_setscheduler(p, policy, lp);
rc = cap_task_setscheduler(p, policy, lp);
if (rc)
return rc;
@ -3451,10 +3391,6 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
u32 perm;
int rc;
rc = secondary_ops->task_kill(p, info, sig, secid);
if (rc)
return rc;
if (!sig)
perm = PROCESS__SIGNULL; /* null signal; existence test */
else
@ -3467,18 +3403,6 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
return rc;
}
static int selinux_task_prctl(int option,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5)
{
/* The current prctl operations do not appear to require
any SELinux controls since they merely observe or modify
the state of the current process. */
return secondary_ops->task_prctl(option, arg2, arg3, arg4, arg5);
}
static int selinux_task_wait(struct task_struct *p)
{
return task_has_perm(p, current, PROCESS__SIGCHLD);
@ -4047,10 +3971,6 @@ static int selinux_socket_unix_stream_connect(struct socket *sock,
struct avc_audit_data ad;
int err;
err = secondary_ops->unix_stream_connect(sock, other, newsk);
if (err)
return err;
isec = SOCK_INODE(sock)->i_security;
other_isec = SOCK_INODE(other)->i_security;
@ -4844,7 +4764,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
{
int err;
err = secondary_ops->netlink_send(sk, skb);
err = cap_netlink_send(sk, skb);
if (err)
return err;
@ -4859,7 +4779,7 @@ static int selinux_netlink_recv(struct sk_buff *skb, int capability)
int err;
struct avc_audit_data ad;
err = secondary_ops->netlink_recv(skb, capability);
err = cap_netlink_recv(skb, capability);
if (err)
return err;
@ -5167,11 +5087,6 @@ static int selinux_shm_shmat(struct shmid_kernel *shp,
char __user *shmaddr, int shmflg)
{
u32 perms;
int rc;
rc = secondary_ops->shm_shmat(shp, shmaddr, shmflg);
if (rc)
return rc;
if (shmflg & SHM_RDONLY)
perms = SHM__READ;
@ -5581,7 +5496,6 @@ static struct security_operations selinux_ops = {
.netlink_recv = selinux_netlink_recv,
.bprm_set_creds = selinux_bprm_set_creds,
.bprm_check_security = selinux_bprm_check_security,
.bprm_committing_creds = selinux_bprm_committing_creds,
.bprm_committed_creds = selinux_bprm_committed_creds,
.bprm_secureexec = selinux_bprm_secureexec,
@ -5623,8 +5537,6 @@ static struct security_operations selinux_ops = {
.inode_getsecurity = selinux_inode_getsecurity,
.inode_setsecurity = selinux_inode_setsecurity,
.inode_listsecurity = selinux_inode_listsecurity,
.inode_need_killpriv = selinux_inode_need_killpriv,
.inode_killpriv = selinux_inode_killpriv,
.inode_getsecid = selinux_inode_getsecid,
.file_permission = selinux_file_permission,
@ -5644,17 +5556,12 @@ static struct security_operations selinux_ops = {
.task_create = selinux_task_create,
.cred_free = selinux_cred_free,
.cred_prepare = selinux_cred_prepare,
.cred_commit = selinux_cred_commit,
.kernel_act_as = selinux_kernel_act_as,
.kernel_create_files_as = selinux_kernel_create_files_as,
.task_setuid = selinux_task_setuid,
.task_fix_setuid = selinux_task_fix_setuid,
.task_setgid = selinux_task_setgid,
.task_setpgid = selinux_task_setpgid,
.task_getpgid = selinux_task_getpgid,
.task_getsid = selinux_task_getsid,
.task_getsecid = selinux_task_getsecid,
.task_setgroups = selinux_task_setgroups,
.task_setnice = selinux_task_setnice,
.task_setioprio = selinux_task_setioprio,
.task_getioprio = selinux_task_getioprio,
@ -5664,7 +5571,6 @@ static struct security_operations selinux_ops = {
.task_movememory = selinux_task_movememory,
.task_kill = selinux_task_kill,
.task_wait = selinux_task_wait,
.task_prctl = selinux_task_prctl,
.task_to_inode = selinux_task_to_inode,
.ipc_permission = selinux_ipc_permission,

Просмотреть файл

@ -24,6 +24,7 @@
S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod")
S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open")
S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open")
S_(SECCLASS_SOCK_FILE, SOCK_FILE__OPEN, "open")
S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open")
S_(SECCLASS_FD, FD__USE, "use")
S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto")
@ -152,6 +153,7 @@
S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE, "nlmsg_write")
S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_RELAY, "nlmsg_relay")
S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV, "nlmsg_readpriv")
S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT, "nlmsg_tty_audit")
S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_READ, "nlmsg_read")
S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_WRITE, "nlmsg_write")
S_(SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, "sendto")

Просмотреть файл

@ -174,6 +174,7 @@
#define SOCK_FILE__SWAPON 0x00004000UL
#define SOCK_FILE__QUOTAON 0x00008000UL
#define SOCK_FILE__MOUNTON 0x00010000UL
#define SOCK_FILE__OPEN 0x00020000UL
#define FIFO_FILE__IOCTL 0x00000001UL
#define FIFO_FILE__READ 0x00000002UL
#define FIFO_FILE__WRITE 0x00000004UL
@ -707,6 +708,7 @@
#define NETLINK_AUDIT_SOCKET__NLMSG_WRITE 0x00800000UL
#define NETLINK_AUDIT_SOCKET__NLMSG_RELAY 0x01000000UL
#define NETLINK_AUDIT_SOCKET__NLMSG_READPRIV 0x02000000UL
#define NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT 0x04000000UL
#define NETLINK_IP6FW_SOCKET__IOCTL 0x00000001UL
#define NETLINK_IP6FW_SOCKET__READ 0x00000002UL
#define NETLINK_IP6FW_SOCKET__WRITE 0x00000004UL

Просмотреть файл

@ -60,9 +60,7 @@ struct superblock_security_struct {
u32 def_sid; /* default SID for labeling */
u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
unsigned int behavior; /* labeling behavior */
unsigned char initialized; /* initialization flag */
unsigned char flags; /* which mount options were specified */
unsigned char proc; /* proc fs */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;

Просмотреть файл

@ -37,15 +37,23 @@
#define POLICYDB_VERSION_MAX POLICYDB_VERSION_BOUNDARY
#endif
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
/* Non-mount related flags */
#define SE_SBINITIALIZED 0x10
#define SE_SBPROC 0x20
#define SE_SBLABELSUPP 0x40
#define CONTEXT_STR "context="
#define FSCONTEXT_STR "fscontext="
#define ROOTCONTEXT_STR "rootcontext="
#define DEFCONTEXT_STR "defcontext="
#define LABELSUPP_STR "seclabel"
struct netlbl_lsm_secattr;
@ -80,7 +88,6 @@ int security_policycap_supported(unsigned int req_cap);
#define SEL_VEC_MAX 32
struct av_decision {
u32 allowed;
u32 decided;
u32 auditallow;
u32 auditdeny;
u32 seqno;

Просмотреть файл

@ -113,7 +113,7 @@ static struct nlmsg_perm nlmsg_audit_perms[] =
{ AUDIT_USER, NETLINK_AUDIT_SOCKET__NLMSG_RELAY },
{ AUDIT_SIGNAL_INFO, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
};

Просмотреть файл

@ -595,7 +595,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u",
avd.allowed, avd.decided,
avd.allowed, 0xffffffff,
avd.auditallow, avd.auditdeny,
avd.seqno);
out2:

Просмотреть файл

@ -407,7 +407,6 @@ static int context_struct_compute_av(struct context *scontext,
* Initialize the access vectors to the default values.
*/
avd->allowed = 0;
avd->decided = 0xffffffff;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
@ -743,7 +742,6 @@ int security_compute_av(u32 ssid,
if (!ss_initialized) {
avd->allowed = 0xffffffff;
avd->decided = 0xffffffff;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;

Просмотреть файл

@ -162,8 +162,8 @@ int smk_access(char *subject_label, char *object_label, int request)
/**
* smk_curacc - determine if current has a specific access to an object
* @object_label: a pointer to the object's Smack label
* @request: the access requested, in "MAY" format
* @obj_label: a pointer to the object's Smack label
* @mode: the access requested, in "MAY" format
*
* This function checks the current subject label/object label pair
* in the access rule list and returns 0 if the access is permitted,

Просмотреть файл

@ -91,6 +91,7 @@ struct inode_smack *new_inode_smack(char *smack)
/**
* smack_ptrace_may_access - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
* @mode: ptrace attachment mode
*
* Returns 0 if access is OK, an error code otherwise
*
@ -203,9 +204,8 @@ static void smack_sb_free_security(struct super_block *sb)
/**
* smack_sb_copy_data - copy mount options data for processing
* @type: file system type
* @orig: where to start
* @smackopts
* @smackopts: mount options string
*
* Returns 0 on success or -ENOMEM on error.
*
@ -331,7 +331,7 @@ static int smack_sb_statfs(struct dentry *dentry)
/**
* smack_sb_mount - Smack check for mounting
* @dev_name: unused
* @nd: mount point
* @path: mount point
* @type: unused
* @flags: unused
* @data: unused
@ -370,7 +370,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
/**
* smack_inode_alloc_security - allocate an inode blob
* @inode - the inode in need of a blob
* @inode: the inode in need of a blob
*
* Returns 0 if it gets a blob, -ENOMEM otherwise
*/
@ -384,7 +384,7 @@ static int smack_inode_alloc_security(struct inode *inode)
/**
* smack_inode_free_security - free an inode blob
* @inode - the inode with a blob
* @inode: the inode with a blob
*
* Clears the blob pointer in inode
*/
@ -538,7 +538,6 @@ static int smack_inode_rename(struct inode *old_inode,
* smack_inode_permission - Smack version of permission()
* @inode: the inode in question
* @mask: the access requested
* @nd: unused
*
* This is the important Smack hook.
*
@ -701,8 +700,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
* @inode: the object
* @name: attribute name
* @buffer: where to put the result
* @size: size of the buffer
* @err: unused
* @alloc: unused
*
* Returns the size of the attribute or an error code
*/
@ -864,7 +862,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
/**
* smack_file_lock - Smack check on file locking
* @file: the object
* @cmd unused
* @cmd: unused
*
* Returns 0 if current has write access, error code otherwise
*/
@ -1003,8 +1001,8 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
return 0;
}
/*
* commit new credentials
/**
* smack_cred_commit - commit new credentials
* @new: the new credentials
* @old: the original credentials
*/
@ -1014,8 +1012,8 @@ static void smack_cred_commit(struct cred *new, const struct cred *old)
/**
* smack_kernel_act_as - Set the subjective context in a set of credentials
* @new points to the set of credentials to be modified.
* @secid specifies the security ID to be set
* @new: points to the set of credentials to be modified.
* @secid: specifies the security ID to be set
*
* Set the security data for a kernel service.
*/
@ -1032,8 +1030,8 @@ static int smack_kernel_act_as(struct cred *new, u32 secid)
/**
* smack_kernel_create_files_as - Set the file creation label in a set of creds
* @new points to the set of credentials to be modified
* @inode points to the inode to use as a reference
* @new: points to the set of credentials to be modified
* @inode: points to the inode to use as a reference
*
* Set the file creation context in a set of credentials to the same
* as the objective context of the specified inode
@ -1242,7 +1240,7 @@ static int smack_task_wait(struct task_struct *p)
/**
* smack_task_to_inode - copy task smack into the inode blob
* @p: task to copy from
* inode: inode to copy to
* @inode: inode to copy to
*
* Sets the smack pointer in the inode security blob
*/
@ -1260,7 +1258,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
* smack_sk_alloc_security - Allocate a socket blob
* @sk: the socket
* @family: unused
* @priority: memory allocation priority
* @gfp_flags: memory allocation flags
*
* Assign Smack pointers to current
*
@ -1974,7 +1972,7 @@ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
/**
* smack_ipc_getsecid - Extract smack security id
* @ipcp: the object permissions
* @ipp: the object permissions
* @secid: where result will be saved
*/
static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
@ -2251,7 +2249,7 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
/**
* smack_socket_sendmsg - Smack check based on destination host
* @sock: the socket
* @msghdr: the message
* @msg: the message
* @size: the size of the message
*
* Return 0 if the current subject can write to the destination
@ -2292,8 +2290,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat
* pair to smack
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
* @sip: where to put the result
*
@ -2414,7 +2411,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
* @sock: the socket
* @optval: user's destination
* @optlen: size thereof
* @len: max thereoe
* @len: max thereof
*
* returns zero on success, an error code otherwise
*/
@ -2749,7 +2746,7 @@ static void smack_audit_rule_free(void *vrule)
#endif /* CONFIG_AUDIT */
/*
/**
* smack_secid_to_secctx - return the smack label for a secid
* @secid: incoming integer
* @secdata: destination
@ -2766,7 +2763,7 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
return 0;
}
/*
/**
* smack_secctx_to_secid - return the secid for a smack label
* @secdata: smack label
* @seclen: how long result is
@ -2780,11 +2777,10 @@ static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
return 0;
}
/*
/**
* smack_release_secctx - don't do anything.
* @key_ref: unused
* @context: unused
* @perm: unused
* @secdata: unused
* @seclen: unused
*
* Exists to make sure nothing gets done, and properly
*/

Просмотреть файл

@ -245,7 +245,7 @@ out:
/**
* smk_write_load - write() for /smack/load
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
@ -402,6 +402,7 @@ static void smk_cipso_doi(void)
/**
* smk_unlbl_ambient - initialize the unlabeled domain
* @oldambient: previous domain string
*/
static void smk_unlbl_ambient(char *oldambient)
{
@ -513,7 +514,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file)
/**
* smk_write_cipso - write() for /smack/cipso
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
@ -733,7 +734,7 @@ static void smk_netlbladdr_insert(struct smk_netlbladdr *new)
/**
* smk_write_netlbladdr - write() for /smack/netlabel
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
@ -884,7 +885,7 @@ static ssize_t smk_read_doi(struct file *filp, char __user *buf,
/**
* smk_write_doi - write() for /smack/doi
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
@ -949,7 +950,7 @@ static ssize_t smk_read_direct(struct file *filp, char __user *buf,
/**
* smk_write_direct - write() for /smack/direct
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
@ -1024,7 +1025,7 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
/**
* smk_write_ambient - write() for /smack/ambient
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
@ -1099,7 +1100,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
/**
* smk_write_onlycap - write() for /smack/onlycap
* @filp: file pointer, not actually used
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start

11
security/tomoyo/Kconfig Normal file
Просмотреть файл

@ -0,0 +1,11 @@
config SECURITY_TOMOYO
bool "TOMOYO Linux Support"
depends on SECURITY
select SECURITYFS
select SECURITY_PATH
default n
help
This selects TOMOYO Linux, pathname-based access control.
Required userspace tools and further information may be
found at <http://tomoyo.sourceforge.jp/>.
If you are unsure how to answer this question, answer N.

1
security/tomoyo/Makefile Normal file
Просмотреть файл

@ -0,0 +1 @@
obj-y = common.o realpath.o tomoyo.o domain.o file.o

2206
security/tomoyo/common.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

359
security/tomoyo/common.h Normal file
Просмотреть файл

@ -0,0 +1,359 @@
/*
* security/tomoyo/common.h
*
* Common functions for TOMOYO.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
#define _SECURITY_TOMOYO_COMMON_H
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/kmod.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/list.h>
struct dentry;
struct vfsmount;
/* Temporary buffer for holding pathnames. */
struct tomoyo_page_buffer {
char buffer[4096];
};
/* Structure for holding a token. */
struct tomoyo_path_info {
const char *name;
u32 hash; /* = full_name_hash(name, strlen(name)) */
u16 total_len; /* = strlen(name) */
u16 const_len; /* = tomoyo_const_part_length(name) */
bool is_dir; /* = tomoyo_strendswith(name, "/") */
bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
u16 depth; /* = tomoyo_path_depth(name) */
};
/*
* This is the max length of a token.
*
* A token consists of only ASCII printable characters.
* Non printable characters in a token is represented in \ooo style
* octal string. Thus, \ itself is represented as \\.
*/
#define TOMOYO_MAX_PATHNAME_LEN 4000
/* Structure for holding requested pathname. */
struct tomoyo_path_info_with_data {
/* Keep "head" first, for this pointer is passed to tomoyo_free(). */
struct tomoyo_path_info head;
char bariier1[16]; /* Safeguard for overrun. */
char body[TOMOYO_MAX_PATHNAME_LEN];
char barrier2[16]; /* Safeguard for overrun. */
};
/*
* Common header for holding ACL entries.
*
* Packing "struct tomoyo_acl_info" allows
* "struct tomoyo_single_path_acl_record" to embed "u16" and
* "struct tomoyo_double_path_acl_record" to embed "u8"
* without enlarging their structure size.
*/
struct tomoyo_acl_info {
struct list_head list;
/*
* Type of this ACL entry.
*
* MSB is is_deleted flag.
*/
u8 type;
} __packed;
/* This ACL entry is deleted. */
#define TOMOYO_ACL_DELETED 0x80
/* Structure for domain information. */
struct tomoyo_domain_info {
struct list_head list;
struct list_head acl_info_list;
/* Name of this domain. Never NULL. */
const struct tomoyo_path_info *domainname;
u8 profile; /* Profile number to use. */
u8 is_deleted; /* Delete flag.
0 = active.
1 = deleted but undeletable.
255 = deleted and no longer undeletable. */
bool quota_warned; /* Quota warnning flag. */
/* DOMAIN_FLAGS_*. Use tomoyo_set_domain_flag() to modify. */
u8 flags;
};
/* Profile number is an integer between 0 and 255. */
#define TOMOYO_MAX_PROFILES 256
/* Ignore "allow_read" directive in exception policy. */
#define TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ 1
/*
* This domain was unable to create a new domain at tomoyo_find_next_domain()
* because the name of the domain to be created was too long or
* it could not allocate memory.
* More than one process continued execve() without domain transition.
*/
#define TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED 2
/*
* Structure for "allow_read/write", "allow_execute", "allow_read",
* "allow_write", "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir",
* "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar",
* "allow_truncate", "allow_symlink" and "allow_rewrite" directive.
*/
struct tomoyo_single_path_acl_record {
struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */
u16 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename;
};
/* Structure for "allow_rename" and "allow_link" directive. */
struct tomoyo_double_path_acl_record {
struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */
u8 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename1;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename2;
};
/* Keywords for ACLs. */
#define TOMOYO_KEYWORD_ALIAS "alias "
#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
#define TOMOYO_KEYWORD_DELETE "delete "
#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
#define TOMOYO_KEYWORD_SELECT "select "
#define TOMOYO_KEYWORD_UNDELETE "undelete "
#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
/* A domain definition starts with <kernel>. */
#define TOMOYO_ROOT_NAME "<kernel>"
#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
/* Index numbers for Access Controls. */
#define TOMOYO_MAC_FOR_FILE 0 /* domain_policy.conf */
#define TOMOYO_MAX_ACCEPT_ENTRY 1
#define TOMOYO_VERBOSE 2
#define TOMOYO_MAX_CONTROL_INDEX 3
/* Structure for reading/writing policy via securityfs interfaces. */
struct tomoyo_io_buffer {
int (*read) (struct tomoyo_io_buffer *);
int (*write) (struct tomoyo_io_buffer *);
/* Exclusive lock for this structure. */
struct mutex io_sem;
/* The position currently reading from. */
struct list_head *read_var1;
/* Extra variables for reading. */
struct list_head *read_var2;
/* The position currently writing to. */
struct tomoyo_domain_info *write_var1;
/* The step for reading. */
int read_step;
/* Buffer for reading. */
char *read_buf;
/* EOF flag for reading. */
bool read_eof;
/* Read domain ACL of specified PID? */
bool read_single_domain;
/* Extra variable for reading. */
u8 read_bit;
/* Bytes available for reading. */
int read_avail;
/* Size of read buffer. */
int readbuf_size;
/* Buffer for writing. */
char *write_buf;
/* Bytes available for writing. */
int write_avail;
/* Size of write buffer. */
int writebuf_size;
};
/* Check whether the domain has too many ACL entries to hold. */
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain);
/* Transactional sprintf() for policy dump. */
bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
/* Check whether the domainname is correct. */
bool tomoyo_is_correct_domain(const unsigned char *domainname,
const char *function);
/* Check whether the token is correct. */
bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
const s8 pattern_type, const s8 end_type,
const char *function);
/* Check whether the token can be a domainname. */
bool tomoyo_is_domain_def(const unsigned char *buffer);
/* Check whether the given filename matches the given pattern. */
bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
const struct tomoyo_path_info *pattern);
/* Read "alias" entry in exception policy. */
bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head);
/*
* Read "initialize_domain" and "no_initialize_domain" entry
* in exception policy.
*/
bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head);
/* Read "keep_domain" and "no_keep_domain" entry in exception policy. */
bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head);
/* Read "file_pattern" entry in exception policy. */
bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head);
/* Read "allow_read" entry in exception policy. */
bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head);
/* Read "deny_rewrite" entry in exception policy. */
bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head);
/* Write domain policy violation warning message to console? */
bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain);
/* Convert double path operation to operation name. */
const char *tomoyo_dp2keyword(const u8 operation);
/* Get the last component of the given domainname. */
const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
/* Get warning message. */
const char *tomoyo_get_msg(const bool is_enforce);
/* Convert single path operation to operation name. */
const char *tomoyo_sp2keyword(const u8 operation);
/* Delete a domain. */
int tomoyo_delete_domain(char *data);
/* Create "alias" entry in exception policy. */
int tomoyo_write_alias_policy(char *data, const bool is_delete);
/*
* Create "initialize_domain" and "no_initialize_domain" entry
* in exception policy.
*/
int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
const bool is_delete);
/* Create "keep_domain" and "no_keep_domain" entry in exception policy. */
int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
const bool is_delete);
/*
* Create "allow_read/write", "allow_execute", "allow_read", "allow_write",
* "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir",
* "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar",
* "allow_truncate", "allow_symlink", "allow_rewrite", "allow_rename" and
* "allow_link" entry in domain policy.
*/
int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
const bool is_delete);
/* Create "allow_read" entry in exception policy. */
int tomoyo_write_globally_readable_policy(char *data, const bool is_delete);
/* Create "deny_rewrite" entry in exception policy. */
int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete);
/* Create "file_pattern" entry in exception policy. */
int tomoyo_write_pattern_policy(char *data, const bool is_delete);
/* Find a domain by the given name. */
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname);
/* Find or create a domain by the given name. */
struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
domainname,
const u8 profile);
/* Undelete a domain. */
struct tomoyo_domain_info *tomoyo_undelete_domain(const char *domainname);
/* Check mode for specified functionality. */
unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
const u8 index);
/* Allocate memory for structures. */
void *tomoyo_alloc_acl_element(const u8 acl_type);
/* Fill in "struct tomoyo_path_info" members. */
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
/* Run policy loader when /sbin/init starts. */
void tomoyo_load_policy(const char *filename);
/* Change "struct tomoyo_domain_info"->flags. */
void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
const bool is_delete, const u8 flags);
/* strcmp() for "struct tomoyo_path_info" structure. */
static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
const struct tomoyo_path_info *b)
{
return a->hash != b->hash || strcmp(a->name, b->name);
}
/* Get type of an ACL entry. */
static inline u8 tomoyo_acl_type1(struct tomoyo_acl_info *ptr)
{
return ptr->type & ~TOMOYO_ACL_DELETED;
}
/* Get type of an ACL entry. */
static inline u8 tomoyo_acl_type2(struct tomoyo_acl_info *ptr)
{
return ptr->type;
}
/**
* tomoyo_is_valid - Check whether the character is a valid char.
*
* @c: The character to check.
*
* Returns true if @c is a valid character, false otherwise.
*/
static inline bool tomoyo_is_valid(const unsigned char c)
{
return c > ' ' && c < 127;
}
/**
* tomoyo_is_invalid - Check whether the character is an invalid char.
*
* @c: The character to check.
*
* Returns true if @c is an invalid character, false otherwise.
*/
static inline bool tomoyo_is_invalid(const unsigned char c)
{
return c && (c <= ' ' || c >= 127);
}
/* The list for "struct tomoyo_domain_info". */
extern struct list_head tomoyo_domain_list;
extern struct rw_semaphore tomoyo_domain_list_lock;
/* Lock for domain->acl_info_list. */
extern struct rw_semaphore tomoyo_domain_acl_info_list_lock;
/* Has /sbin/init started? */
extern bool tomoyo_policy_loaded;
/* The kernel's domain. */
extern struct tomoyo_domain_info tomoyo_kernel_domain;
/**
* list_for_each_cookie - iterate over a list with cookie.
* @pos: the &struct list_head to use as a loop cursor.
* @cookie: the &struct list_head to use as a cookie.
* @head: the head for your list.
*
* Same with list_for_each() except that this primitive uses @cookie
* so that we can continue iteration.
* @cookie must be NULL when iteration starts, and @cookie will become
* NULL when iteration finishes.
*/
#define list_for_each_cookie(pos, cookie, head) \
for (({ if (!cookie) \
cookie = head; }), \
pos = (cookie)->next; \
prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
(cookie) = pos, pos = pos->next)
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */

878
security/tomoyo/domain.c Normal file
Просмотреть файл

@ -0,0 +1,878 @@
/*
* security/tomoyo/domain.c
*
* Implementation of the Domain-Based Mandatory Access Control.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#include "common.h"
#include "tomoyo.h"
#include "realpath.h"
#include <linux/binfmts.h>
/* Variables definitions.*/
/* The initial domain. */
struct tomoyo_domain_info tomoyo_kernel_domain;
/* The list for "struct tomoyo_domain_info". */
LIST_HEAD(tomoyo_domain_list);
DECLARE_RWSEM(tomoyo_domain_list_lock);
/* Structure for "initialize_domain" and "no_initialize_domain" keyword. */
struct tomoyo_domain_initializer_entry {
struct list_head list;
const struct tomoyo_path_info *domainname; /* This may be NULL */
const struct tomoyo_path_info *program;
bool is_deleted;
bool is_not; /* True if this entry is "no_initialize_domain". */
/* True if the domainname is tomoyo_get_last_name(). */
bool is_last_name;
};
/* Structure for "keep_domain" and "no_keep_domain" keyword. */
struct tomoyo_domain_keeper_entry {
struct list_head list;
const struct tomoyo_path_info *domainname;
const struct tomoyo_path_info *program; /* This may be NULL */
bool is_deleted;
bool is_not; /* True if this entry is "no_keep_domain". */
/* True if the domainname is tomoyo_get_last_name(). */
bool is_last_name;
};
/* Structure for "alias" keyword. */
struct tomoyo_alias_entry {
struct list_head list;
const struct tomoyo_path_info *original_name;
const struct tomoyo_path_info *aliased_name;
bool is_deleted;
};
/**
* tomoyo_set_domain_flag - Set or clear domain's attribute flags.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @is_delete: True if it is a delete request.
* @flags: Flags to set or clear.
*
* Returns nothing.
*/
void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
const bool is_delete, const u8 flags)
{
/* We need to serialize because this is bitfield operation. */
static DEFINE_SPINLOCK(lock);
/***** CRITICAL SECTION START *****/
spin_lock(&lock);
if (!is_delete)
domain->flags |= flags;
else
domain->flags &= ~flags;
spin_unlock(&lock);
/***** CRITICAL SECTION END *****/
}
/**
* tomoyo_get_last_name - Get last component of a domainname.
*
* @domain: Pointer to "struct tomoyo_domain_info".
*
* Returns the last component of the domainname.
*/
const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain)
{
const char *cp0 = domain->domainname->name;
const char *cp1 = strrchr(cp0, ' ');
if (cp1)
return cp1 + 1;
return cp0;
}
/* The list for "struct tomoyo_domain_initializer_entry". */
static LIST_HEAD(tomoyo_domain_initializer_list);
static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
/**
* tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
*
* @domainname: The name of domain. May be NULL.
* @program: The name of program.
* @is_not: True if it is "no_initialize_domain" entry.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_update_domain_initializer_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
struct tomoyo_domain_initializer_entry *new_entry;
struct tomoyo_domain_initializer_entry *ptr;
const struct tomoyo_path_info *saved_program;
const struct tomoyo_path_info *saved_domainname = NULL;
int error = -ENOMEM;
bool is_last_name = false;
if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
return -EINVAL; /* No patterns allowed. */
if (domainname) {
if (!tomoyo_is_domain_def(domainname) &&
tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
is_last_name = true;
else if (!tomoyo_is_correct_domain(domainname, __func__))
return -EINVAL;
saved_domainname = tomoyo_save_name(domainname);
if (!saved_domainname)
return -ENOMEM;
}
saved_program = tomoyo_save_name(program);
if (!saved_program)
return -ENOMEM;
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_domain_initializer_list_lock);
list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
goto out;
}
if (is_delete) {
error = -ENOENT;
goto out;
}
new_entry = tomoyo_alloc_element(sizeof(*new_entry));
if (!new_entry)
goto out;
new_entry->domainname = saved_domainname;
new_entry->program = saved_program;
new_entry->is_not = is_not;
new_entry->is_last_name = is_last_name;
list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
error = 0;
out:
up_write(&tomoyo_domain_initializer_list_lock);
/***** EXCLUSIVE SECTION END *****/
return error;
}
/**
* tomoyo_read_domain_initializer_policy - Read "struct tomoyo_domain_initializer_entry" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
down_read(&tomoyo_domain_initializer_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_initializer_list) {
const char *no;
const char *from = "";
const char *domain = "";
struct tomoyo_domain_initializer_entry *ptr;
ptr = list_entry(pos, struct tomoyo_domain_initializer_entry,
list);
if (ptr->is_deleted)
continue;
no = ptr->is_not ? "no_" : "";
if (ptr->domainname) {
from = " from ";
domain = ptr->domainname->name;
}
if (!tomoyo_io_printf(head,
"%s" TOMOYO_KEYWORD_INITIALIZE_DOMAIN
"%s%s%s\n", no, ptr->program->name, from,
domain)) {
done = false;
break;
}
}
up_read(&tomoyo_domain_initializer_list_lock);
return done;
}
/**
* tomoyo_write_domain_initializer_policy - Write "struct tomoyo_domain_initializer_entry" list.
*
* @data: String to parse.
* @is_not: True if it is "no_initialize_domain" entry.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
const bool is_delete)
{
char *cp = strstr(data, " from ");
if (cp) {
*cp = '\0';
return tomoyo_update_domain_initializer_entry(cp + 6, data,
is_not,
is_delete);
}
return tomoyo_update_domain_initializer_entry(NULL, data, is_not,
is_delete);
}
/**
* tomoyo_is_domain_initializer - Check whether the given program causes domainname reinitialization.
*
* @domainname: The name of domain.
* @program: The name of program.
* @last_name: The last component of @domainname.
*
* Returns true if executing @program reinitializes domain transition,
* false otherwise.
*/
static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
domainname,
const struct tomoyo_path_info *program,
const struct tomoyo_path_info *
last_name)
{
struct tomoyo_domain_initializer_entry *ptr;
bool flag = false;
down_read(&tomoyo_domain_initializer_list_lock);
list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_deleted)
continue;
if (ptr->domainname) {
if (!ptr->is_last_name) {
if (ptr->domainname != domainname)
continue;
} else {
if (tomoyo_pathcmp(ptr->domainname, last_name))
continue;
}
}
if (tomoyo_pathcmp(ptr->program, program))
continue;
if (ptr->is_not) {
flag = false;
break;
}
flag = true;
}
up_read(&tomoyo_domain_initializer_list_lock);
return flag;
}
/* The list for "struct tomoyo_domain_keeper_entry". */
static LIST_HEAD(tomoyo_domain_keeper_list);
static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
/**
* tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
*
* @domainname: The name of domain.
* @program: The name of program. May be NULL.
* @is_not: True if it is "no_keep_domain" entry.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_update_domain_keeper_entry(const char *domainname,
const char *program,
const bool is_not,
const bool is_delete)
{
struct tomoyo_domain_keeper_entry *new_entry;
struct tomoyo_domain_keeper_entry *ptr;
const struct tomoyo_path_info *saved_domainname;
const struct tomoyo_path_info *saved_program = NULL;
static DEFINE_MUTEX(lock);
int error = -ENOMEM;
bool is_last_name = false;
if (!tomoyo_is_domain_def(domainname) &&
tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
is_last_name = true;
else if (!tomoyo_is_correct_domain(domainname, __func__))
return -EINVAL;
if (program) {
if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
return -EINVAL;
saved_program = tomoyo_save_name(program);
if (!saved_program)
return -ENOMEM;
}
saved_domainname = tomoyo_save_name(domainname);
if (!saved_domainname)
return -ENOMEM;
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_domain_keeper_list_lock);
list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
continue;
ptr->is_deleted = is_delete;
error = 0;
goto out;
}
if (is_delete) {
error = -ENOENT;
goto out;
}
new_entry = tomoyo_alloc_element(sizeof(*new_entry));
if (!new_entry)
goto out;
new_entry->domainname = saved_domainname;
new_entry->program = saved_program;
new_entry->is_not = is_not;
new_entry->is_last_name = is_last_name;
list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
error = 0;
out:
up_write(&tomoyo_domain_keeper_list_lock);
/***** EXCLUSIVE SECTION END *****/
return error;
}
/**
* tomoyo_write_domain_keeper_policy - Write "struct tomoyo_domain_keeper_entry" list.
*
* @data: String to parse.
* @is_not: True if it is "no_keep_domain" entry.
* @is_delete: True if it is a delete request.
*
*/
int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
const bool is_delete)
{
char *cp = strstr(data, " from ");
if (cp) {
*cp = '\0';
return tomoyo_update_domain_keeper_entry(cp + 6, data, is_not,
is_delete);
}
return tomoyo_update_domain_keeper_entry(data, NULL, is_not, is_delete);
}
/**
* tomoyo_read_domain_keeper_policy - Read "struct tomoyo_domain_keeper_entry" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
down_read(&tomoyo_domain_keeper_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_keeper_list) {
struct tomoyo_domain_keeper_entry *ptr;
const char *no;
const char *from = "";
const char *program = "";
ptr = list_entry(pos, struct tomoyo_domain_keeper_entry, list);
if (ptr->is_deleted)
continue;
no = ptr->is_not ? "no_" : "";
if (ptr->program) {
from = " from ";
program = ptr->program->name;
}
if (!tomoyo_io_printf(head,
"%s" TOMOYO_KEYWORD_KEEP_DOMAIN
"%s%s%s\n", no, program, from,
ptr->domainname->name)) {
done = false;
break;
}
}
up_read(&tomoyo_domain_keeper_list_lock);
return done;
}
/**
* tomoyo_is_domain_keeper - Check whether the given program causes domain transition suppression.
*
* @domainname: The name of domain.
* @program: The name of program.
* @last_name: The last component of @domainname.
*
* Returns true if executing @program supresses domain transition,
* false otherwise.
*/
static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
const struct tomoyo_path_info *program,
const struct tomoyo_path_info *last_name)
{
struct tomoyo_domain_keeper_entry *ptr;
bool flag = false;
down_read(&tomoyo_domain_keeper_list_lock);
list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_deleted)
continue;
if (!ptr->is_last_name) {
if (ptr->domainname != domainname)
continue;
} else {
if (tomoyo_pathcmp(ptr->domainname, last_name))
continue;
}
if (ptr->program && tomoyo_pathcmp(ptr->program, program))
continue;
if (ptr->is_not) {
flag = false;
break;
}
flag = true;
}
up_read(&tomoyo_domain_keeper_list_lock);
return flag;
}
/* The list for "struct tomoyo_alias_entry". */
static LIST_HEAD(tomoyo_alias_list);
static DECLARE_RWSEM(tomoyo_alias_list_lock);
/**
* tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
*
* @original_name: The original program's real name.
* @aliased_name: The symbolic program's symbolic link's name.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_update_alias_entry(const char *original_name,
const char *aliased_name,
const bool is_delete)
{
struct tomoyo_alias_entry *new_entry;
struct tomoyo_alias_entry *ptr;
const struct tomoyo_path_info *saved_original_name;
const struct tomoyo_path_info *saved_aliased_name;
int error = -ENOMEM;
if (!tomoyo_is_correct_path(original_name, 1, -1, -1, __func__) ||
!tomoyo_is_correct_path(aliased_name, 1, -1, -1, __func__))
return -EINVAL; /* No patterns allowed. */
saved_original_name = tomoyo_save_name(original_name);
saved_aliased_name = tomoyo_save_name(aliased_name);
if (!saved_original_name || !saved_aliased_name)
return -ENOMEM;
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_alias_list_lock);
list_for_each_entry(ptr, &tomoyo_alias_list, list) {
if (ptr->original_name != saved_original_name ||
ptr->aliased_name != saved_aliased_name)
continue;
ptr->is_deleted = is_delete;
error = 0;
goto out;
}
if (is_delete) {
error = -ENOENT;
goto out;
}
new_entry = tomoyo_alloc_element(sizeof(*new_entry));
if (!new_entry)
goto out;
new_entry->original_name = saved_original_name;
new_entry->aliased_name = saved_aliased_name;
list_add_tail(&new_entry->list, &tomoyo_alias_list);
error = 0;
out:
up_write(&tomoyo_alias_list_lock);
/***** EXCLUSIVE SECTION END *****/
return error;
}
/**
* tomoyo_read_alias_policy - Read "struct tomoyo_alias_entry" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
down_read(&tomoyo_alias_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
struct tomoyo_alias_entry *ptr;
ptr = list_entry(pos, struct tomoyo_alias_entry, list);
if (ptr->is_deleted)
continue;
if (!tomoyo_io_printf(head, TOMOYO_KEYWORD_ALIAS "%s %s\n",
ptr->original_name->name,
ptr->aliased_name->name)) {
done = false;
break;
}
}
up_read(&tomoyo_alias_list_lock);
return done;
}
/**
* tomoyo_write_alias_policy - Write "struct tomoyo_alias_entry" list.
*
* @data: String to parse.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_write_alias_policy(char *data, const bool is_delete)
{
char *cp = strchr(data, ' ');
if (!cp)
return -EINVAL;
*cp++ = '\0';
return tomoyo_update_alias_entry(data, cp, is_delete);
}
/* Domain create/delete/undelete handler. */
/* #define TOMOYO_DEBUG_DOMAIN_UNDELETE */
/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
*
* Returns 0.
*/
int tomoyo_delete_domain(char *domainname)
{
struct tomoyo_domain_info *domain;
struct tomoyo_path_info name;
name.name = domainname;
tomoyo_fill_path_info(&name);
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_domain_list_lock);
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "tomoyo_delete_domain %s\n", domainname);
list_for_each_entry(domain, &tomoyo_domain_list, list) {
if (tomoyo_pathcmp(domain->domainname, &name))
continue;
printk(KERN_DEBUG "List: %p %u\n", domain, domain->is_deleted);
}
#endif
/* Is there an active domain? */
list_for_each_entry(domain, &tomoyo_domain_list, list) {
struct tomoyo_domain_info *domain2;
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
if (domain->is_deleted ||
tomoyo_pathcmp(domain->domainname, &name))
continue;
/* Mark already deleted domains as non undeletable. */
list_for_each_entry(domain2, &tomoyo_domain_list, list) {
if (!domain2->is_deleted ||
tomoyo_pathcmp(domain2->domainname, &name))
continue;
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
if (domain2->is_deleted != 255)
printk(KERN_DEBUG
"Marked %p as non undeletable\n",
domain2);
#endif
domain2->is_deleted = 255;
}
/* Delete and mark active domain as undeletable. */
domain->is_deleted = 1;
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "Marked %p as undeletable\n", domain);
#endif
break;
}
up_write(&tomoyo_domain_list_lock);
/***** EXCLUSIVE SECTION END *****/
return 0;
}
/**
* tomoyo_undelete_domain - Undelete a domain.
*
* @domainname: The name of domain.
*
* Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
*/
struct tomoyo_domain_info *tomoyo_undelete_domain(const char *domainname)
{
struct tomoyo_domain_info *domain;
struct tomoyo_domain_info *candidate_domain = NULL;
struct tomoyo_path_info name;
name.name = domainname;
tomoyo_fill_path_info(&name);
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_domain_list_lock);
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "tomoyo_undelete_domain %s\n", domainname);
list_for_each_entry(domain, &tomoyo_domain_list, list) {
if (tomoyo_pathcmp(domain->domainname, &name))
continue;
printk(KERN_DEBUG "List: %p %u\n", domain, domain->is_deleted);
}
#endif
list_for_each_entry(domain, &tomoyo_domain_list, list) {
if (tomoyo_pathcmp(&name, domain->domainname))
continue;
if (!domain->is_deleted) {
/* This domain is active. I can't undelete. */
candidate_domain = NULL;
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "%p is active. I can't undelete.\n",
domain);
#endif
break;
}
/* Is this domain undeletable? */
if (domain->is_deleted == 1)
candidate_domain = domain;
}
if (candidate_domain) {
candidate_domain->is_deleted = 0;
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "%p was undeleted.\n", candidate_domain);
#endif
}
up_write(&tomoyo_domain_list_lock);
/***** EXCLUSIVE SECTION END *****/
return candidate_domain;
}
/**
* tomoyo_find_or_assign_new_domain - Create a domain.
*
* @domainname: The name of domain.
* @profile: Profile number to assign if the domain was newly created.
*
* Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
*/
struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
domainname,
const u8 profile)
{
struct tomoyo_domain_info *domain = NULL;
const struct tomoyo_path_info *saved_domainname;
/***** EXCLUSIVE SECTION START *****/
down_write(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(domainname);
if (domain)
goto out;
if (!tomoyo_is_correct_domain(domainname, __func__))
goto out;
saved_domainname = tomoyo_save_name(domainname);
if (!saved_domainname)
goto out;
/* Can I reuse memory of deleted domain? */
list_for_each_entry(domain, &tomoyo_domain_list, list) {
struct task_struct *p;
struct tomoyo_acl_info *ptr;
bool flag;
if (!domain->is_deleted ||
domain->domainname != saved_domainname)
continue;
flag = false;
/***** CRITICAL SECTION START *****/
read_lock(&tasklist_lock);
for_each_process(p) {
if (tomoyo_real_domain(p) != domain)
continue;
flag = true;
break;
}
read_unlock(&tasklist_lock);
/***** CRITICAL SECTION END *****/
if (flag)
continue;
#ifdef TOMOYO_DEBUG_DOMAIN_UNDELETE
printk(KERN_DEBUG "Reusing %p %s\n", domain,
domain->domainname->name);
#endif
list_for_each_entry(ptr, &domain->acl_info_list, list) {
ptr->type |= TOMOYO_ACL_DELETED;
}
tomoyo_set_domain_flag(domain, true, domain->flags);
domain->profile = profile;
domain->quota_warned = false;
mb(); /* Avoid out-of-order execution. */
domain->is_deleted = 0;
goto out;
}
/* No memory reusable. Create using new memory. */
domain = tomoyo_alloc_element(sizeof(*domain));
if (domain) {
INIT_LIST_HEAD(&domain->acl_info_list);
domain->domainname = saved_domainname;
domain->profile = profile;
list_add_tail(&domain->list, &tomoyo_domain_list);
}
out:
up_write(&tomoyo_domain_list_lock);
/***** EXCLUSIVE SECTION END *****/
return domain;
}
/**
* tomoyo_find_next_domain - Find a domain.
*
* @bprm: Pointer to "struct linux_binprm".
* @next_domain: Pointer to pointer to "struct tomoyo_domain_info".
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_find_next_domain(struct linux_binprm *bprm,
struct tomoyo_domain_info **next_domain)
{
/*
* This function assumes that the size of buffer returned by
* tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
*/
struct tomoyo_page_buffer *tmp = tomoyo_alloc(sizeof(*tmp));
struct tomoyo_domain_info *old_domain = tomoyo_domain();
struct tomoyo_domain_info *domain = NULL;
const char *old_domain_name = old_domain->domainname->name;
const char *original_name = bprm->filename;
char *new_domain_name = NULL;
char *real_program_name = NULL;
char *symlink_program_name = NULL;
const u8 mode = tomoyo_check_flags(old_domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
int retval = -ENOMEM;
struct tomoyo_path_info r; /* real name */
struct tomoyo_path_info s; /* symlink name */
struct tomoyo_path_info l; /* last name */
static bool initialized;
if (!tmp)
goto out;
if (!initialized) {
/*
* Built-in initializers. This is needed because policies are
* not loaded until starting /sbin/init.
*/
tomoyo_update_domain_initializer_entry(NULL, "/sbin/hotplug",
false, false);
tomoyo_update_domain_initializer_entry(NULL, "/sbin/modprobe",
false, false);
initialized = true;
}
/* Get tomoyo_realpath of program. */
retval = -ENOENT;
/* I hope tomoyo_realpath() won't fail with -ENOMEM. */
real_program_name = tomoyo_realpath(original_name);
if (!real_program_name)
goto out;
/* Get tomoyo_realpath of symbolic link. */
symlink_program_name = tomoyo_realpath_nofollow(original_name);
if (!symlink_program_name)
goto out;
r.name = real_program_name;
tomoyo_fill_path_info(&r);
s.name = symlink_program_name;
tomoyo_fill_path_info(&s);
l.name = tomoyo_get_last_name(old_domain);
tomoyo_fill_path_info(&l);
/* Check 'alias' directive. */
if (tomoyo_pathcmp(&r, &s)) {
struct tomoyo_alias_entry *ptr;
/* Is this program allowed to be called via symbolic links? */
down_read(&tomoyo_alias_list_lock);
list_for_each_entry(ptr, &tomoyo_alias_list, list) {
if (ptr->is_deleted ||
tomoyo_pathcmp(&r, ptr->original_name) ||
tomoyo_pathcmp(&s, ptr->aliased_name))
continue;
memset(real_program_name, 0, TOMOYO_MAX_PATHNAME_LEN);
strncpy(real_program_name, ptr->aliased_name->name,
TOMOYO_MAX_PATHNAME_LEN - 1);
tomoyo_fill_path_info(&r);
break;
}
up_read(&tomoyo_alias_list_lock);
}
/* Check execute permission. */
retval = tomoyo_check_exec_perm(old_domain, &r, tmp);
if (retval < 0)
goto out;
new_domain_name = tmp->buffer;
if (tomoyo_is_domain_initializer(old_domain->domainname, &r, &l)) {
/* Transit to the child of tomoyo_kernel_domain domain. */
snprintf(new_domain_name, TOMOYO_MAX_PATHNAME_LEN + 1,
TOMOYO_ROOT_NAME " " "%s", real_program_name);
} else if (old_domain == &tomoyo_kernel_domain &&
!tomoyo_policy_loaded) {
/*
* Needn't to transit from kernel domain before starting
* /sbin/init. But transit from kernel domain if executing
* initializers because they might start before /sbin/init.
*/
domain = old_domain;
} else if (tomoyo_is_domain_keeper(old_domain->domainname, &r, &l)) {
/* Keep current domain. */
domain = old_domain;
} else {
/* Normal domain transition. */
snprintf(new_domain_name, TOMOYO_MAX_PATHNAME_LEN + 1,
"%s %s", old_domain_name, real_program_name);
}
if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
goto done;
down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(new_domain_name);
up_read(&tomoyo_domain_list_lock);
if (domain)
goto done;
if (is_enforce)
goto done;
domain = tomoyo_find_or_assign_new_domain(new_domain_name,
old_domain->profile);
done:
if (domain)
goto out;
printk(KERN_WARNING "TOMOYO-ERROR: Domain '%s' not defined.\n",
new_domain_name);
if (is_enforce)
retval = -EPERM;
else
tomoyo_set_domain_flag(old_domain, false,
TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
out:
tomoyo_free(real_program_name);
tomoyo_free(symlink_program_name);
*next_domain = domain ? domain : old_domain;
tomoyo_free(tmp);
return retval;
}

1241
security/tomoyo/file.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

482
security/tomoyo/realpath.c Normal file
Просмотреть файл

@ -0,0 +1,482 @@
/*
* security/tomoyo/realpath.c
*
* Get the canonicalized absolute pathnames. The basis for TOMOYO.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#include <linux/types.h>
#include <linux/mount.h>
#include <linux/mnt_namespace.h>
#include "common.h"
#include "realpath.h"
/**
* tomoyo_encode: Convert binary string to ascii string.
*
* @buffer: Buffer for ASCII string.
* @buflen: Size of @buffer.
* @str: Binary string.
*
* Returns 0 on success, -ENOMEM otherwise.
*/
int tomoyo_encode(char *buffer, int buflen, const char *str)
{
while (1) {
const unsigned char c = *(unsigned char *) str++;
if (tomoyo_is_valid(c)) {
if (--buflen <= 0)
break;
*buffer++ = (char) c;
if (c != '\\')
continue;
if (--buflen <= 0)
break;
*buffer++ = (char) c;
continue;
}
if (!c) {
if (--buflen <= 0)
break;
*buffer = '\0';
return 0;
}
buflen -= 4;
if (buflen <= 0)
break;
*buffer++ = '\\';
*buffer++ = (c >> 6) + '0';
*buffer++ = ((c >> 3) & 7) + '0';
*buffer++ = (c & 7) + '0';
}
return -ENOMEM;
}
/**
* tomoyo_realpath_from_path2 - Returns realpath(3) of the given dentry but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
* @newname: Pointer to buffer to return value in.
* @newname_len: Size of @newname.
*
* Returns 0 on success, negative value otherwise.
*
* If dentry is a directory, trailing '/' is appended.
* Characters out of 0x20 < c < 0x7F range are converted to
* \ooo style octal string.
* Character \ is converted to \\ string.
*/
int tomoyo_realpath_from_path2(struct path *path, char *newname,
int newname_len)
{
int error = -ENOMEM;
struct dentry *dentry = path->dentry;
char *sp;
if (!dentry || !path->mnt || !newname || newname_len <= 2048)
return -EINVAL;
if (dentry->d_op && dentry->d_op->d_dname) {
/* For "socket:[\$]" and "pipe:[\$]". */
static const int offset = 1536;
sp = dentry->d_op->d_dname(dentry, newname + offset,
newname_len - offset);
} else {
/* Taken from d_namespace_path(). */
struct path root;
struct path ns_root = { };
struct path tmp;
read_lock(&current->fs->lock);
root = current->fs->root;
path_get(&root);
read_unlock(&current->fs->lock);
spin_lock(&vfsmount_lock);
if (root.mnt && root.mnt->mnt_ns)
ns_root.mnt = mntget(root.mnt->mnt_ns->root);
if (ns_root.mnt)
ns_root.dentry = dget(ns_root.mnt->mnt_root);
spin_unlock(&vfsmount_lock);
spin_lock(&dcache_lock);
tmp = ns_root;
sp = __d_path(path, &tmp, newname, newname_len);
spin_unlock(&dcache_lock);
path_put(&root);
path_put(&ns_root);
}
if (IS_ERR(sp))
error = PTR_ERR(sp);
else
error = tomoyo_encode(newname, sp - newname, sp);
/* Append trailing '/' if dentry is a directory. */
if (!error && dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)
&& *newname) {
sp = newname + strlen(newname);
if (*(sp - 1) != '/') {
if (sp < newname + newname_len - 4) {
*sp++ = '/';
*sp = '\0';
} else {
error = -ENOMEM;
}
}
}
if (error)
printk(KERN_WARNING "tomoyo_realpath: Pathname too long.\n");
return error;
}
/**
* tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
*
* Returns the realpath of the given @path on success, NULL otherwise.
*
* These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath_from_path(struct path *path)
{
char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
<= TOMOYO_MAX_PATHNAME_LEN - 1);
if (!buf)
return NULL;
if (tomoyo_realpath_from_path2(path, buf,
TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
return buf;
tomoyo_free(buf);
return NULL;
}
/**
* tomoyo_realpath - Get realpath of a pathname.
*
* @pathname: The pathname to solve.
*
* Returns the realpath of @pathname on success, NULL otherwise.
*/
char *tomoyo_realpath(const char *pathname)
{
struct nameidata nd;
if (pathname && path_lookup(pathname, LOOKUP_FOLLOW, &nd) == 0) {
char *buf = tomoyo_realpath_from_path(&nd.path);
path_put(&nd.path);
return buf;
}
return NULL;
}
/**
* tomoyo_realpath_nofollow - Get realpath of a pathname.
*
* @pathname: The pathname to solve.
*
* Returns the realpath of @pathname on success, NULL otherwise.
*/
char *tomoyo_realpath_nofollow(const char *pathname)
{
struct nameidata nd;
if (pathname && path_lookup(pathname, 0, &nd) == 0) {
char *buf = tomoyo_realpath_from_path(&nd.path);
path_put(&nd.path);
return buf;
}
return NULL;
}
/* Memory allocated for non-string data. */
static unsigned int tomoyo_allocated_memory_for_elements;
/* Quota for holding non-string data. */
static unsigned int tomoyo_quota_for_elements;
/**
* tomoyo_alloc_element - Allocate permanent memory for structures.
*
* @size: Size in bytes.
*
* Returns pointer to allocated memory on success, NULL otherwise.
*
* Memory has to be zeroed.
* The RAM is chunked, so NEVER try to kfree() the returned pointer.
*/
void *tomoyo_alloc_element(const unsigned int size)
{
static char *buf;
static DEFINE_MUTEX(lock);
static unsigned int buf_used_len = PATH_MAX;
char *ptr = NULL;
/*Assumes sizeof(void *) >= sizeof(long) is true. */
const unsigned int word_aligned_size
= roundup(size, max(sizeof(void *), sizeof(long)));
if (word_aligned_size > PATH_MAX)
return NULL;
/***** EXCLUSIVE SECTION START *****/
mutex_lock(&lock);
if (buf_used_len + word_aligned_size > PATH_MAX) {
if (!tomoyo_quota_for_elements ||
tomoyo_allocated_memory_for_elements
+ PATH_MAX <= tomoyo_quota_for_elements)
ptr = kzalloc(PATH_MAX, GFP_KERNEL);
if (!ptr) {
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_alloc_element().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
} else {
buf = ptr;
tomoyo_allocated_memory_for_elements += PATH_MAX;
buf_used_len = word_aligned_size;
ptr = buf;
}
} else if (word_aligned_size) {
int i;
ptr = buf + buf_used_len;
buf_used_len += word_aligned_size;
for (i = 0; i < word_aligned_size; i++) {
if (!ptr[i])
continue;
printk(KERN_ERR "WARNING: Reserved memory was tainted! "
"The system might go wrong.\n");
ptr[i] = '\0';
}
}
mutex_unlock(&lock);
/***** EXCLUSIVE SECTION END *****/
return ptr;
}
/* Memory allocated for string data in bytes. */
static unsigned int tomoyo_allocated_memory_for_savename;
/* Quota for holding string data in bytes. */
static unsigned int tomoyo_quota_for_savename;
/*
* TOMOYO uses this hash only when appending a string into the string
* table. Frequency of appending strings is very low. So we don't need
* large (e.g. 64k) hash size. 256 will be sufficient.
*/
#define TOMOYO_MAX_HASH 256
/* Structure for string data. */
struct tomoyo_name_entry {
struct list_head list;
struct tomoyo_path_info entry;
};
/* Structure for available memory region. */
struct tomoyo_free_memory_block_list {
struct list_head list;
char *ptr; /* Pointer to a free area. */
int len; /* Length of the area. */
};
/*
* The list for "struct tomoyo_name_entry".
*
* This list is updated only inside tomoyo_save_name(), thus
* no global mutex exists.
*/
static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
/**
* tomoyo_save_name - Allocate permanent memory for string data.
*
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
*
* The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
const struct tomoyo_path_info *tomoyo_save_name(const char *name)
{
static LIST_HEAD(fmb_list);
static DEFINE_MUTEX(lock);
struct tomoyo_name_entry *ptr;
unsigned int hash;
/* fmb contains available size in bytes.
fmb is removed from the fmb_list when fmb->len becomes 0. */
struct tomoyo_free_memory_block_list *fmb;
int len;
char *cp;
if (!name)
return NULL;
len = strlen(name) + 1;
if (len > TOMOYO_MAX_PATHNAME_LEN) {
printk(KERN_WARNING "ERROR: Name too long "
"for tomoyo_save_name().\n");
return NULL;
}
hash = full_name_hash((const unsigned char *) name, len - 1);
/***** EXCLUSIVE SECTION START *****/
mutex_lock(&lock);
list_for_each_entry(ptr, &tomoyo_name_list[hash % TOMOYO_MAX_HASH],
list) {
if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
goto out;
}
list_for_each_entry(fmb, &fmb_list, list) {
if (len <= fmb->len)
goto ready;
}
if (!tomoyo_quota_for_savename ||
tomoyo_allocated_memory_for_savename + PATH_MAX
<= tomoyo_quota_for_savename)
cp = kzalloc(PATH_MAX, GFP_KERNEL);
else
cp = NULL;
fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
if (!cp || !fmb) {
kfree(cp);
kfree(fmb);
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_save_name().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
ptr = NULL;
goto out;
}
tomoyo_allocated_memory_for_savename += PATH_MAX;
list_add(&fmb->list, &fmb_list);
fmb->ptr = cp;
fmb->len = PATH_MAX;
ready:
ptr = tomoyo_alloc_element(sizeof(*ptr));
if (!ptr)
goto out;
ptr->entry.name = fmb->ptr;
memmove(fmb->ptr, name, len);
tomoyo_fill_path_info(&ptr->entry);
fmb->ptr += len;
fmb->len -= len;
list_add_tail(&ptr->list, &tomoyo_name_list[hash % TOMOYO_MAX_HASH]);
if (fmb->len == 0) {
list_del(&fmb->list);
kfree(fmb);
}
out:
mutex_unlock(&lock);
/***** EXCLUSIVE SECTION END *****/
return ptr ? &ptr->entry : NULL;
}
/**
* tomoyo_realpath_init - Initialize realpath related code.
*/
void __init tomoyo_realpath_init(void)
{
int i;
BUILD_BUG_ON(TOMOYO_MAX_PATHNAME_LEN > PATH_MAX);
for (i = 0; i < TOMOYO_MAX_HASH; i++)
INIT_LIST_HEAD(&tomoyo_name_list[i]);
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
down_read(&tomoyo_domain_list_lock);
if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
panic("Can't register tomoyo_kernel_domain");
up_read(&tomoyo_domain_list_lock);
}
/* Memory allocated for temporary purpose. */
static atomic_t tomoyo_dynamic_memory_size;
/**
* tomoyo_alloc - Allocate memory for temporary purpose.
*
* @size: Size in bytes.
*
* Returns pointer to allocated memory on success, NULL otherwise.
*/
void *tomoyo_alloc(const size_t size)
{
void *p = kzalloc(size, GFP_KERNEL);
if (p)
atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
return p;
}
/**
* tomoyo_free - Release memory allocated by tomoyo_alloc().
*
* @p: Pointer returned by tomoyo_alloc(). May be NULL.
*
* Returns nothing.
*/
void tomoyo_free(const void *p)
{
if (p) {
atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
kfree(p);
}
}
/**
* tomoyo_read_memory_counter - Check for memory usage in bytes.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns memory usage.
*/
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
{
if (!head->read_eof) {
const unsigned int shared
= tomoyo_allocated_memory_for_savename;
const unsigned int private
= tomoyo_allocated_memory_for_elements;
const unsigned int dynamic
= atomic_read(&tomoyo_dynamic_memory_size);
char buffer[64];
memset(buffer, 0, sizeof(buffer));
if (tomoyo_quota_for_savename)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
tomoyo_quota_for_savename);
else
buffer[0] = '\0';
tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
if (tomoyo_quota_for_elements)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
tomoyo_quota_for_elements);
else
buffer[0] = '\0';
tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
tomoyo_io_printf(head, "Total: %10u\n",
shared + private + dynamic);
head->read_eof = true;
}
return 0;
}
/**
* tomoyo_write_memory_quota - Set memory quota.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
*/
int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
unsigned int size;
if (sscanf(data, "Shared: %u", &size) == 1)
tomoyo_quota_for_savename = size;
else if (sscanf(data, "Private: %u", &size) == 1)
tomoyo_quota_for_elements = size;
return 0;
}

Просмотреть файл

@ -0,0 +1,66 @@
/*
* security/tomoyo/realpath.h
*
* Get the canonicalized absolute pathnames. The basis for TOMOYO.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#ifndef _SECURITY_TOMOYO_REALPATH_H
#define _SECURITY_TOMOYO_REALPATH_H
struct path;
struct tomoyo_path_info;
struct tomoyo_io_buffer;
/* Convert binary string to ascii string. */
int tomoyo_encode(char *buffer, int buflen, const char *str);
/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
int tomoyo_realpath_from_path2(struct path *path, char *newname,
int newname_len);
/*
* Returns realpath(3) of the given pathname but ignores chroot'ed root.
* These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath(const char *pathname);
/*
* Same with tomoyo_realpath() except that it doesn't follow the final symlink.
*/
char *tomoyo_realpath_nofollow(const char *pathname);
/* Same with tomoyo_realpath() except that the pathname is already solved. */
char *tomoyo_realpath_from_path(struct path *path);
/*
* Allocate memory for ACL entry.
* The RAM is chunked, so NEVER try to kfree() the returned pointer.
*/
void *tomoyo_alloc_element(const unsigned int size);
/*
* Keep the given name on the RAM.
* The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
const struct tomoyo_path_info *tomoyo_save_name(const char *name);
/* Allocate memory for temporary use (e.g. permission checks). */
void *tomoyo_alloc(const size_t size);
/* Free memory allocated by tomoyo_alloc(). */
void tomoyo_free(const void *p);
/* Check for memory usage. */
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
/* Set memory quota. */
int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
/* Initialize realpath related code. */
void __init tomoyo_realpath_init(void);
#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */

294
security/tomoyo/tomoyo.c Normal file
Просмотреть файл

@ -0,0 +1,294 @@
/*
* security/tomoyo/tomoyo.c
*
* LSM hooks for TOMOYO Linux.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#include <linux/security.h>
#include "common.h"
#include "tomoyo.h"
#include "realpath.h"
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
/*
* Since "struct tomoyo_domain_info *" is a sharable pointer,
* we don't need to duplicate.
*/
new->security = old->security;
return 0;
}
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
{
/*
* Do only if this function is called for the first time of an execve
* operation.
*/
if (bprm->cred_prepared)
return 0;
/*
* Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
* for the first time.
*/
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
/*
* Tell tomoyo_bprm_check_security() is called for the first time of an
* execve operation.
*/
bprm->cred->security = NULL;
return 0;
}
static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
{
struct tomoyo_domain_info *domain = bprm->cred->security;
/*
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
if (!domain) {
struct tomoyo_domain_info *next_domain = NULL;
int retval = tomoyo_find_next_domain(bprm, &next_domain);
if (!retval)
bprm->cred->security = next_domain;
return retval;
}
/*
* Read permission is checked against interpreters using next domain.
* '1' is the result of open_to_namei_flags(O_RDONLY).
*/
return tomoyo_check_open_permission(domain, &bprm->file->f_path, 1);
}
#ifdef CONFIG_SYSCTL
static int tomoyo_prepend(char **buffer, int *buflen, const char *str)
{
int namelen = strlen(str);
if (*buflen < namelen)
return -ENOMEM;
*buflen -= namelen;
*buffer -= namelen;
memcpy(*buffer, str, namelen);
return 0;
}
/**
* tomoyo_sysctl_path - return the realpath of a ctl_table.
* @table: pointer to "struct ctl_table".
*
* Returns realpath(3) of the @table on success.
* Returns NULL on failure.
*
* This function uses tomoyo_alloc(), so the caller must call tomoyo_free()
* if this function didn't return NULL.
*/
static char *tomoyo_sysctl_path(struct ctl_table *table)
{
int buflen = TOMOYO_MAX_PATHNAME_LEN;
char *buf = tomoyo_alloc(buflen);
char *end = buf + buflen;
int error = -ENOMEM;
if (!buf)
return NULL;
*--end = '\0';
buflen--;
while (table) {
char num[32];
const char *sp = table->procname;
if (!sp) {
memset(num, 0, sizeof(num));
snprintf(num, sizeof(num) - 1, "=%d=", table->ctl_name);
sp = num;
}
if (tomoyo_prepend(&end, &buflen, sp) ||
tomoyo_prepend(&end, &buflen, "/"))
goto out;
table = table->parent;
}
if (tomoyo_prepend(&end, &buflen, "/proc/sys"))
goto out;
error = tomoyo_encode(buf, end - buf, end);
out:
if (!error)
return buf;
tomoyo_free(buf);
return NULL;
}
static int tomoyo_sysctl(struct ctl_table *table, int op)
{
int error;
char *name;
op &= MAY_READ | MAY_WRITE;
if (!op)
return 0;
name = tomoyo_sysctl_path(table);
if (!name)
return -ENOMEM;
error = tomoyo_check_file_perm(tomoyo_domain(), name, op);
tomoyo_free(name);
return error;
}
#endif
static int tomoyo_path_truncate(struct path *path, loff_t length,
unsigned int time_attrs)
{
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_TRUNCATE_ACL,
path);
}
static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_UNLINK_ACL,
&path);
}
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
int mode)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_MKDIR_ACL,
&path);
}
static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_RMDIR_ACL,
&path);
}
static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_SYMLINK_ACL,
&path);
}
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
int mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
int type = TOMOYO_TYPE_CREATE_ACL;
switch (mode & S_IFMT) {
case S_IFCHR:
type = TOMOYO_TYPE_MKCHAR_ACL;
break;
case S_IFBLK:
type = TOMOYO_TYPE_MKBLOCK_ACL;
break;
case S_IFIFO:
type = TOMOYO_TYPE_MKFIFO_ACL;
break;
case S_IFSOCK:
type = TOMOYO_TYPE_MKSOCK_ACL;
break;
}
return tomoyo_check_1path_perm(tomoyo_domain(),
type, &path);
}
static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
struct path path1 = { new_dir->mnt, old_dentry };
struct path path2 = { new_dir->mnt, new_dentry };
return tomoyo_check_2path_perm(tomoyo_domain(),
TOMOYO_TYPE_LINK_ACL,
&path1, &path2);
}
static int tomoyo_path_rename(struct path *old_parent,
struct dentry *old_dentry,
struct path *new_parent,
struct dentry *new_dentry)
{
struct path path1 = { old_parent->mnt, old_dentry };
struct path path2 = { new_parent->mnt, new_dentry };
return tomoyo_check_2path_perm(tomoyo_domain(),
TOMOYO_TYPE_RENAME_ACL,
&path1, &path2);
}
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
return tomoyo_check_rewrite_permission(tomoyo_domain(), file);
return 0;
}
static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
{
int flags = f->f_flags;
if ((flags + 1) & O_ACCMODE)
flags++;
flags |= f->f_flags & (O_APPEND | O_TRUNC);
/* Don't check read permission here if called from do_execve(). */
if (current->in_execve)
return 0;
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
static struct security_operations tomoyo_security_ops = {
.name = "tomoyo",
.cred_prepare = tomoyo_cred_prepare,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
#ifdef CONFIG_SYSCTL
.sysctl = tomoyo_sysctl,
#endif
.file_fcntl = tomoyo_file_fcntl,
.dentry_open = tomoyo_dentry_open,
.path_truncate = tomoyo_path_truncate,
.path_unlink = tomoyo_path_unlink,
.path_mkdir = tomoyo_path_mkdir,
.path_rmdir = tomoyo_path_rmdir,
.path_symlink = tomoyo_path_symlink,
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
};
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
if (register_security(&tomoyo_security_ops))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
tomoyo_realpath_init();
return 0;
}
security_initcall(tomoyo_init);

106
security/tomoyo/tomoyo.h Normal file
Просмотреть файл

@ -0,0 +1,106 @@
/*
* security/tomoyo/tomoyo.h
*
* Implementation of the Domain-Based Mandatory Access Control.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0-pre 2009/02/01
*
*/
#ifndef _SECURITY_TOMOYO_TOMOYO_H
#define _SECURITY_TOMOYO_TOMOYO_H
struct tomoyo_path_info;
struct path;
struct inode;
struct linux_binprm;
struct pt_regs;
struct tomoyo_page_buffer;
int tomoyo_check_file_perm(struct tomoyo_domain_info *domain,
const char *filename, const u8 perm);
int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename,
struct tomoyo_page_buffer *buf);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
const u8 operation, struct path *path);
int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
const u8 operation, struct path *path1,
struct path *path2);
int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
struct file *filp);
int tomoyo_find_next_domain(struct linux_binprm *bprm,
struct tomoyo_domain_info **next_domain);
/* Index numbers for Access Controls. */
#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
/* Index numbers for File Controls. */
/*
* TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
* if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
* TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
* TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
* TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
* automatically cleared if TYPE_READ_WRITE_ACL is cleared.
*/
#define TOMOYO_TYPE_READ_WRITE_ACL 0
#define TOMOYO_TYPE_EXECUTE_ACL 1
#define TOMOYO_TYPE_READ_ACL 2
#define TOMOYO_TYPE_WRITE_ACL 3
#define TOMOYO_TYPE_CREATE_ACL 4
#define TOMOYO_TYPE_UNLINK_ACL 5
#define TOMOYO_TYPE_MKDIR_ACL 6
#define TOMOYO_TYPE_RMDIR_ACL 7
#define TOMOYO_TYPE_MKFIFO_ACL 8
#define TOMOYO_TYPE_MKSOCK_ACL 9
#define TOMOYO_TYPE_MKBLOCK_ACL 10
#define TOMOYO_TYPE_MKCHAR_ACL 11
#define TOMOYO_TYPE_TRUNCATE_ACL 12
#define TOMOYO_TYPE_SYMLINK_ACL 13
#define TOMOYO_TYPE_REWRITE_ACL 14
#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
#define TOMOYO_TYPE_LINK_ACL 0
#define TOMOYO_TYPE_RENAME_ACL 1
#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
#define TOMOYO_DOMAINPOLICY 0
#define TOMOYO_EXCEPTIONPOLICY 1
#define TOMOYO_DOMAIN_STATUS 2
#define TOMOYO_PROCESS_STATUS 3
#define TOMOYO_MEMINFO 4
#define TOMOYO_SELFDOMAIN 5
#define TOMOYO_VERSION 6
#define TOMOYO_PROFILE 7
#define TOMOYO_MANAGER 8
extern struct tomoyo_domain_info tomoyo_kernel_domain;
static inline struct tomoyo_domain_info *tomoyo_domain(void)
{
return current_cred()->security;
}
/* Caller holds tasklist_lock spinlock. */
static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
*task)
{
/***** CRITICAL SECTION START *****/
const struct cred *cred = get_task_cred(task);
struct tomoyo_domain_info *domain = cred->security;
put_cred(cred);
return domain;
/***** CRITICAL SECTION END *****/
}
#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */