Bug 920372 - Import Chromium seccomp-bpf compiler, rev 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a. r=kang

Newly imported:
* sandbox/linux/seccomp-bpf/
* sandbox/linux/sandbox_export.h
* base/posix/eintr_wrapper.h

Updated:
* base/basictypes.h
* base/macros.h

At the time of this writing (see future patches for this bug) the only
things we're using from sandbox/linux/seccomp-bpf/ are codegen.cc and
basicblock.cc, and the header files they require.  However, we may use
more of this code in the future, and it seems cleaner in general to
import the entire subtree.
This commit is contained in:
Jed Davis 2014-05-20 18:37:45 -07:00
Родитель 8f91f2c8db
Коммит 9e94aea459
33 изменённых файлов: 8438 добавлений и 335 удалений

Просмотреть файл

@ -0,0 +1,27 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Просмотреть файл

@ -1,70 +1,47 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains definitions of our old basic integral types
// ((u)int{8,16,32,64}) and further includes. I recommend that you use the C99
// standard types instead, and include <stdint.h>/<stddef.h>/etc. as needed.
// Note that the macros and macro-like constructs that were formerly defined in
// this file are now available separately in base/macros.h.
#ifndef BASE_BASICTYPES_H_
#define BASE_BASICTYPES_H_
#include <limits.h> // So we can set the bounds of our types
#include <stddef.h> // For size_t
#include <string.h> // for memcpy
#include <limits.h> // So we can set the bounds of our types.
#include <stddef.h> // For size_t.
#include <stdint.h> // For intptr_t.
#include "base/port.h" // Types that only need exist on certain systems
#include "base/macros.h"
#include "base/port.h" // Types that only need exist on certain systems.
#ifndef COMPILER_MSVC
// stdint.h is part of C99 but MSVC doesn't have it.
#include <stdint.h> // For intptr_t.
#endif
typedef signed char schar;
typedef signed char int8;
typedef short int16;
typedef int int32;
// DEPRECATED: Please use (u)int{8,16,32,64}_t instead (and include <stdint.h>).
typedef int8_t int8;
typedef uint8_t uint8;
typedef int16_t int16;
typedef int32_t int32;
typedef uint16_t uint16;
typedef uint32_t uint32;
// TODO(vtl): Figure what's up with the 64-bit types. Can we just define them as
// |int64_t|/|uint64_t|?
// The NSPR system headers define 64-bit as |long| when possible, except on
// Mac OS X. In order to not have typedef mismatches, we do the same on LP64.
//
// On Mac OS X, |long long| is used for 64-bit types for compatibility with
// <inttypes.h> format macros even in the LP64 model.
#if defined(__LP64__) && !defined(OS_MACOSX) && !defined(OS_OPENBSD)
typedef long int64;
#else
typedef long long int64;
#endif
// NOTE: It is DANGEROUS to compare signed with unsigned types in loop
// conditions and other conditional expressions, and it is DANGEROUS to
// compute object/allocation sizes, indices, and offsets with signed types.
// Integer overflow behavior for signed types is UNDEFINED in the C/C++
// standards, but is defined for unsigned types.
//
// Use the unsigned types if your variable represents a bit pattern (e.g. a
// hash value), object or allocation size, object count, offset,
// array/vector index, etc.
//
// Do NOT use 'unsigned' to express "this value should always be positive";
// use assertions for this.
//
// See the Chromium style guide for more information.
// https://sites.google.com/a/chromium.org/dev/developers/coding-style
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint32;
// See the comment above about NSPR and 64-bit.
#if defined(__LP64__) && !defined(OS_MACOSX) && !defined(OS_OPENBSD)
typedef long int64;
typedef unsigned long uint64;
#else
typedef long long int64;
typedef unsigned long long uint64;
#endif
// A type to represent a Unicode code-point value. As of Unicode 4.0,
// such values require up to 21 bits.
// (For type-checking on pointers, make this explicitly signed,
// and it should always be the signed version of whatever int32 is.)
typedef signed int char32;
// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
const uint8 kuint8max = (( uint8) 0xFF);
const uint16 kuint16max = ((uint16) 0xFFFF);
const uint32 kuint32max = ((uint32) 0xFFFFFFFF);
@ -78,292 +55,4 @@ const int32 kint32max = (( int32) 0x7FFFFFFF);
const int64 kint64min = (( int64) GG_LONGLONG(0x8000000000000000));
const int64 kint64max = (( int64) GG_LONGLONG(0x7FFFFFFFFFFFFFFF));
// Put this in the private: declarations for a class to be uncopyable.
#define DISALLOW_COPY(TypeName) \
TypeName(const TypeName&)
// Put this in the private: declarations for a class to be unassignable.
#define DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&)
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An older, deprecated, politically incorrect name for the above.
// NOTE: The usage of this macro was banned from our code base, but some
// third_party libraries are yet using it.
// TODO(tfarina): Figure out how to fix the usage of this macro in the
// third_party libraries and get rid of it.
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
//
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_COPY_AND_ASSIGN(TypeName)
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// One caveat is that arraysize() doesn't accept any array of an
// anonymous type or a type defined inside a function. In these rare
// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
// due to a limitation in C++'s template system. The limitation might
// eventually be removed, but it hasn't happened yet.
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef _MSC_VER
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
// but can be used on anonymous types or types defined inside
// functions. It's less safe than arraysize as it accepts some
// (although not all) pointers. Therefore, you should use arraysize
// whenever possible.
//
// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
// size_t.
//
// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
//
// "warning: division by zero in ..."
//
// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
//
// The following comments are on the implementation details, and can
// be ignored by the users.
//
// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
// the array) and sizeof(*(arr)) (the # of bytes in one array
// element). If the former is divisible by the latter, perhaps arr is
// indeed an array, in which case the division result is the # of
// elements in the array. Otherwise, arr cannot possibly be an array,
// and we generate a compiler error to prevent the code from
// compiling.
//
// Since the size of bool is implementation-defined, we need to cast
// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
// result has type size_t.
//
// This macro is not perfect as it wrongfully accepts certain
// pointers, namely where the pointer size is divisible by the pointee
// size. Since all our code has to go through a 32-bit compiler,
// where a pointer is 4 bytes, this means all pointers to a type whose
// size is 3 or greater than 4 will be (righteously) rejected.
#define ARRAYSIZE_UNSAFE(a) \
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
// Use implicit_cast as a safe version of static_cast or const_cast
// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
// a const pointer to Foo).
// When you use implicit_cast, the compiler checks that the cast is safe.
// Such explicit implicit_casts are necessary in surprisingly many
// situations where C++ demands an exact type match instead of an
// argument type convertible to a target type.
//
// The From type can be inferred, so the preferred syntax for using
// implicit_cast is the same as for static_cast etc.:
//
// implicit_cast<ToType>(expr)
//
// implicit_cast would have been part of the C++ standard library,
// but the proposal was submitted too late. It will probably make
// its way into the language in the future.
template<typename To, typename From>
inline To implicit_cast(From const &f) {
return f;
}
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
template <bool>
struct CompileAssert {
};
#undef COMPILE_ASSERT
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
// bit_cast<Dest,Source> is a template function that implements the
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
// very low-level functions like the protobuf library and fast math
// support.
//
// float f = 3.14159265358979;
// int i = bit_cast<int32>(f);
// // i = 0x40490fdb
//
// The classical address-casting method is:
//
// // WRONG
// float f = 3.14159265358979; // WRONG
// int i = * reinterpret_cast<int*>(&f); // WRONG
//
// The address-casting method actually produces undefined behavior
// according to ISO C++ specification section 3.10 -15 -. Roughly, this
// section says: if an object in memory has one type, and a program
// accesses it with a different type, then the result is undefined
// behavior for most values of "different type".
//
// This is true for any cast syntax, either *(int*)&f or
// *reinterpret_cast<int*>(&f). And it is particularly true for
// conversions between integral lvalues and floating-point lvalues.
//
// The purpose of 3.10 -15- is to allow optimizing compilers to assume
// that expressions with different types refer to different memory. gcc
// 4.0.1 has an optimizer that takes advantage of this. So a
// non-conforming program quietly produces wildly incorrect output.
//
// The problem is not the use of reinterpret_cast. The problem is type
// punning: holding an object in memory of one type and reading its bits
// back using a different type.
//
// The C++ standard is more subtle and complex than this, but that
// is the basic idea.
//
// Anyways ...
//
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9 . Also, of course,
// bit_cast<> wraps up the nasty logic in one place.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
//
// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
//
// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
// is likely to surprise you.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
typedef char VerifySizesAreEqual [sizeof(Dest) == sizeof(Source) ? 1 : -1];
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// Used to explicitly mark the return value of a function as unused. If you are
// really sure you don't want to do anything with the return value of a function
// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
//
// scoped_ptr<MyType> my_var = ...;
// if (take(my_var.get()) == SUCCESS)
// ignore_result(my_var.release());
//
template<typename T>
inline void ignore_result(const T&) {
}
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static instance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, AND there are no virtual methods, then a
// constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
// Use these to declare and define a static local variable (static T;) so that
// it is leaked so that its destructors are not called at exit. If you need
// thread-safe initialization, use base/lazy_instance.h instead.
#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
} // base
#endif // BASE_BASICTYPES_H_

Просмотреть файл

@ -0,0 +1,313 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains macros and macro-like constructs (e.g., templates) that
// are commonly used throughout Chromium source. (It may also contain things
// that are closely related to things that are commonly used that belong in this
// file.)
#ifndef BASE_MACROS_H_
#define BASE_MACROS_H_
#include <stddef.h> // For size_t.
#include <string.h> // For memcpy.
#include "base/compiler_specific.h" // For ALLOW_UNUSED.
// Put this in the private: declarations for a class to be uncopyable.
#define DISALLOW_COPY(TypeName) \
TypeName(const TypeName&)
// Put this in the private: declarations for a class to be unassignable.
#define DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&)
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An older, deprecated, politically incorrect name for the above.
// NOTE: The usage of this macro was banned from our code base, but some
// third_party libraries are yet using it.
// TODO(tfarina): Figure out how to fix the usage of this macro in the
// third_party libraries and get rid of it.
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
//
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_COPY_AND_ASSIGN(TypeName)
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// One caveat is that arraysize() doesn't accept any array of an
// anonymous type or a type defined inside a function. In these rare
// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
// due to a limitation in C++'s template system. The limitation might
// eventually be removed, but it hasn't happened yet.
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N>
char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef _MSC_VER
template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
// but can be used on anonymous types or types defined inside
// functions. It's less safe than arraysize as it accepts some
// (although not all) pointers. Therefore, you should use arraysize
// whenever possible.
//
// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
// size_t.
//
// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
//
// "warning: division by zero in ..."
//
// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
//
// The following comments are on the implementation details, and can
// be ignored by the users.
//
// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
// the array) and sizeof(*(arr)) (the # of bytes in one array
// element). If the former is divisible by the latter, perhaps arr is
// indeed an array, in which case the division result is the # of
// elements in the array. Otherwise, arr cannot possibly be an array,
// and we generate a compiler error to prevent the code from
// compiling.
//
// Since the size of bool is implementation-defined, we need to cast
// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
// result has type size_t.
//
// This macro is not perfect as it wrongfully accepts certain
// pointers, namely where the pointer size is divisible by the pointee
// size. Since all our code has to go through a 32-bit compiler,
// where a pointer is 4 bytes, this means all pointers to a type whose
// size is 3 or greater than 4 will be (righteously) rejected.
#define ARRAYSIZE_UNSAFE(a) \
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
// Use implicit_cast as a safe version of static_cast or const_cast
// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
// a const pointer to Foo).
// When you use implicit_cast, the compiler checks that the cast is safe.
// Such explicit implicit_casts are necessary in surprisingly many
// situations where C++ demands an exact type match instead of an
// argument type convertible to a target type.
//
// The From type can be inferred, so the preferred syntax for using
// implicit_cast is the same as for static_cast etc.:
//
// implicit_cast<ToType>(expr)
//
// implicit_cast would have been part of the C++ standard library,
// but the proposal was submitted too late. It will probably make
// its way into the language in the future.
template<typename To, typename From>
inline To implicit_cast(From const &f) {
return f;
}
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
#undef COMPILE_ASSERT
#if __cplusplus >= 201103L
// Under C++11, just use static_assert.
#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
#else
template <bool>
struct CompileAssert {
};
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ALLOW_UNUSED
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
#endif
// bit_cast<Dest,Source> is a template function that implements the
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
// very low-level functions like the protobuf library and fast math
// support.
//
// float f = 3.14159265358979;
// int i = bit_cast<int32>(f);
// // i = 0x40490fdb
//
// The classical address-casting method is:
//
// // WRONG
// float f = 3.14159265358979; // WRONG
// int i = * reinterpret_cast<int*>(&f); // WRONG
//
// The address-casting method actually produces undefined behavior
// according to ISO C++ specification section 3.10 -15 -. Roughly, this
// section says: if an object in memory has one type, and a program
// accesses it with a different type, then the result is undefined
// behavior for most values of "different type".
//
// This is true for any cast syntax, either *(int*)&f or
// *reinterpret_cast<int*>(&f). And it is particularly true for
// conversions between integral lvalues and floating-point lvalues.
//
// The purpose of 3.10 -15- is to allow optimizing compilers to assume
// that expressions with different types refer to different memory. gcc
// 4.0.1 has an optimizer that takes advantage of this. So a
// non-conforming program quietly produces wildly incorrect output.
//
// The problem is not the use of reinterpret_cast. The problem is type
// punning: holding an object in memory of one type and reading its bits
// back using a different type.
//
// The C++ standard is more subtle and complex than this, but that
// is the basic idea.
//
// Anyways ...
//
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9 . Also, of course,
// bit_cast<> wraps up the nasty logic in one place.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
//
// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
//
// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
// is likely to surprise you.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// Used to explicitly mark the return value of a function as unused. If you are
// really sure you don't want to do anything with the return value of a function
// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
//
// scoped_ptr<MyType> my_var = ...;
// if (TakeOwnership(my_var.get()) == SUCCESS)
// ignore_result(my_var.release());
//
template<typename T>
inline void ignore_result(const T&) {
}
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static instance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, AND there are no virtual methods, then a
// constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
// Use these to declare and define a static local variable (static T;) so that
// it is leaked so that its destructors are not called at exit. If you need
// thread-safe initialization, use base/lazy_instance.h instead.
#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
} // base
#endif // BASE_MACROS_H_

Просмотреть файл

@ -0,0 +1,67 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This provides a wrapper around system calls which may be interrupted by a
// signal and return EINTR. See man 7 signal.
// To prevent long-lasting loops (which would likely be a bug, such as a signal
// that should be masked) to go unnoticed, there is a limit after which the
// caller will nonetheless see an EINTR in Debug builds.
//
// On Windows, this wrapper macro does nothing.
//
// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
// value of close is significant. See http://crbug.com/269623.
#ifndef BASE_POSIX_EINTR_WRAPPER_H_
#define BASE_POSIX_EINTR_WRAPPER_H_
#include "build/build_config.h"
#if defined(OS_POSIX)
#include <errno.h>
#if defined(NDEBUG)
#define HANDLE_EINTR(x) ({ \
typeof(x) eintr_wrapper_result; \
do { \
eintr_wrapper_result = (x); \
} while (eintr_wrapper_result == -1 && errno == EINTR); \
eintr_wrapper_result; \
})
#else
#define HANDLE_EINTR(x) ({ \
int eintr_wrapper_counter = 0; \
typeof(x) eintr_wrapper_result; \
do { \
eintr_wrapper_result = (x); \
} while (eintr_wrapper_result == -1 && errno == EINTR && \
eintr_wrapper_counter++ < 100); \
eintr_wrapper_result; \
})
#endif // NDEBUG
#define IGNORE_EINTR(x) ({ \
typeof(x) eintr_wrapper_result; \
do { \
eintr_wrapper_result = (x); \
if (eintr_wrapper_result == -1 && errno == EINTR) { \
eintr_wrapper_result = 0; \
} \
} while (0); \
eintr_wrapper_result; \
})
#else
#define HANDLE_EINTR(x) (x)
#define IGNORE_EINTR(x) (x)
#endif // OS_POSIX
#endif // BASE_POSIX_EINTR_WRAPPER_H_

Просмотреть файл

@ -0,0 +1,23 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SANDBOX_EXPORT_H_
#define SANDBOX_LINUX_SANDBOX_EXPORT_H_
#if defined(COMPONENT_BUILD)
#if defined(SANDBOX_IMPLEMENTATION)
#define SANDBOX_EXPORT __attribute__((visibility("default")))
#define SANDBOX_EXPORT_PRIVATE __attribute__((visibility("default")))
#else
#define SANDBOX_EXPORT
#define SANDBOX_EXPORT_PRIVATE
#endif // defined(SANDBOX_IMPLEMENTATION)
#else // defined(COMPONENT_BUILD)
#define SANDBOX_EXPORT
#define SANDBOX_EXPORT_PRIVATE
#endif // defined(COMPONENT_BUILD)
#endif // SANDBOX_LINUX_SANDBOX_EXPORT_H_

Просмотреть файл

@ -0,0 +1,13 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/basicblock.h"
namespace sandbox {
BasicBlock::BasicBlock() {}
BasicBlock::~BasicBlock() {}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,49 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__
#define SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__
#include <vector>
#include "sandbox/linux/seccomp-bpf/instruction.h"
namespace sandbox {
struct BasicBlock {
BasicBlock();
~BasicBlock();
// Our implementation of the code generator uses a "Less" operator to
// identify common sequences of basic blocks. This would normally be
// really easy to do, but STL requires us to wrap the comparator into
// a class. We begrudgingly add some code here that provides this wrapping.
template <class T>
class Less {
public:
Less(const T& data,
int (*cmp)(const BasicBlock*, const BasicBlock*, const T& data))
: data_(data), cmp_(cmp) {}
bool operator()(const BasicBlock* a, const BasicBlock* b) const {
return cmp_(a, b, data_) < 0;
}
private:
const T& data_;
int (*cmp_)(const BasicBlock*, const BasicBlock*, const T&);
};
// Basic blocks are essentially nothing more than a set of instructions.
std::vector<Instruction*> instructions;
// In order to compute relative branch offsets we need to keep track of
// how far our block is away from the very last basic block. The "offset_"
// is measured in number of BPF instructions.
int offset;
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__

Просмотреть файл

@ -0,0 +1,116 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
#define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "build/build_config.h"
#include "sandbox/linux/tests/unit_tests.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
namespace sandbox {
// A BPF_DEATH_TEST is just the same as a BPF_TEST, but it assumes that the
// test will fail with a particular known error condition. Use the DEATH_XXX()
// macros from unit_tests.h to specify the expected error condition.
// A BPF_DEATH_TEST is always disabled under ThreadSanitizer, see
// crbug.com/243968.
#define BPF_DEATH_TEST(test_case_name, test_name, death, policy, aux...) \
void BPF_TEST_##test_name(sandbox::BPFTests<aux>::AuxType& BPF_AUX); \
TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
sandbox::BPFTests<aux>::TestArgs arg(BPF_TEST_##test_name, policy); \
sandbox::BPFTests<aux>::RunTestInProcess( \
sandbox::BPFTests<aux>::TestWrapper, &arg, death); \
} \
void BPF_TEST_##test_name(sandbox::BPFTests<aux>::AuxType& BPF_AUX)
// BPF_TEST() is a special version of SANDBOX_TEST(). It turns into a no-op,
// if the host does not have kernel support for running BPF filters.
// Also, it takes advantage of the Die class to avoid calling LOG(FATAL), from
// inside our tests, as we don't need or even want all the error handling that
// LOG(FATAL) would do.
// BPF_TEST() takes a C++ data type as an optional fourth parameter. If
// present, this sets up a variable that can be accessed as "BPF_AUX". This
// variable will be passed as an argument to the "policy" function. Policies
// would typically use it as an argument to SandboxBPF::Trap(), if they want to
// communicate data between the BPF_TEST() and a Trap() function.
#define BPF_TEST(test_case_name, test_name, policy, aux...) \
BPF_DEATH_TEST(test_case_name, test_name, DEATH_SUCCESS(), policy, aux)
// Assertions are handled exactly the same as with a normal SANDBOX_TEST()
#define BPF_ASSERT SANDBOX_ASSERT
// The "Aux" type is optional. We use an "empty" type by default, so that if
// the caller doesn't provide any type, all the BPF_AUX related data compiles
// to nothing.
template <class Aux = int[0]>
class BPFTests : public UnitTests {
public:
typedef Aux AuxType;
class TestArgs {
public:
TestArgs(void (*t)(AuxType&), sandbox::SandboxBPF::EvaluateSyscall p)
: test_(t), policy_(p), aux_() {}
void (*test() const)(AuxType&) { return test_; }
sandbox::SandboxBPF::EvaluateSyscall policy() const { return policy_; }
private:
friend class BPFTests;
void (*test_)(AuxType&);
sandbox::SandboxBPF::EvaluateSyscall policy_;
AuxType aux_;
};
static void TestWrapper(void* void_arg) {
TestArgs* arg = reinterpret_cast<TestArgs*>(void_arg);
sandbox::Die::EnableSimpleExit();
if (sandbox::SandboxBPF::SupportsSeccompSandbox(-1) ==
sandbox::SandboxBPF::STATUS_AVAILABLE) {
// Ensure the the sandbox is actually available at this time
int proc_fd;
BPF_ASSERT((proc_fd = open("/proc", O_RDONLY | O_DIRECTORY)) >= 0);
BPF_ASSERT(sandbox::SandboxBPF::SupportsSeccompSandbox(proc_fd) ==
sandbox::SandboxBPF::STATUS_AVAILABLE);
// Initialize and then start the sandbox with our custom policy
sandbox::SandboxBPF sandbox;
sandbox.set_proc_fd(proc_fd);
sandbox.SetSandboxPolicyDeprecated(arg->policy(), &arg->aux_);
BPF_ASSERT(sandbox.StartSandbox(
sandbox::SandboxBPF::PROCESS_SINGLE_THREADED));
arg->test()(arg->aux_);
} else {
printf("This BPF test is not fully running in this configuration!\n");
// Android and Valgrind are the only configurations where we accept not
// having kernel BPF support.
if (!IsAndroid() && !IsRunningOnValgrind()) {
const bool seccomp_bpf_is_supported = false;
BPF_ASSERT(seccomp_bpf_is_supported);
}
// Call the compiler and verify the policy. That's the least we can do,
// if we don't have kernel support.
sandbox::SandboxBPF sandbox;
sandbox.SetSandboxPolicyDeprecated(arg->policy(), &arg->aux_);
sandbox::SandboxBPF::Program* program =
sandbox.AssembleFilter(true /* force_verification */);
delete program;
sandbox::UnitTests::IgnoreThisTest();
}
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BPFTests);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__

Просмотреть файл

@ -0,0 +1,774 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdio.h>
#include "base/logging.h"
#include "sandbox/linux/seccomp-bpf/codegen.h"
namespace {
// Helper function for Traverse().
void TraverseRecursively(std::set<sandbox::Instruction*>* visited,
sandbox::Instruction* instruction) {
if (visited->find(instruction) == visited->end()) {
visited->insert(instruction);
switch (BPF_CLASS(instruction->code)) {
case BPF_JMP:
if (BPF_OP(instruction->code) != BPF_JA) {
TraverseRecursively(visited, instruction->jf_ptr);
}
TraverseRecursively(visited, instruction->jt_ptr);
break;
case BPF_RET:
break;
default:
TraverseRecursively(visited, instruction->next);
break;
}
}
}
} // namespace
namespace sandbox {
CodeGen::CodeGen() : compiled_(false) {}
CodeGen::~CodeGen() {
for (Instructions::iterator iter = instructions_.begin();
iter != instructions_.end();
++iter) {
delete *iter;
}
for (BasicBlocks::iterator iter = basic_blocks_.begin();
iter != basic_blocks_.end();
++iter) {
delete *iter;
}
}
void CodeGen::PrintProgram(const SandboxBPF::Program& program) {
for (SandboxBPF::Program::const_iterator iter = program.begin();
iter != program.end();
++iter) {
int ip = (int)(iter - program.begin());
fprintf(stderr, "%3d) ", ip);
switch (BPF_CLASS(iter->code)) {
case BPF_LD:
if (iter->code == BPF_LD + BPF_W + BPF_ABS) {
fprintf(stderr, "LOAD %d // ", (int)iter->k);
if (iter->k == offsetof(struct arch_seccomp_data, nr)) {
fprintf(stderr, "System call number\n");
} else if (iter->k == offsetof(struct arch_seccomp_data, arch)) {
fprintf(stderr, "Architecture\n");
} else if (iter->k ==
offsetof(struct arch_seccomp_data, instruction_pointer)) {
fprintf(stderr, "Instruction pointer (LSB)\n");
} else if (iter->k ==
offsetof(struct arch_seccomp_data, instruction_pointer) +
4) {
fprintf(stderr, "Instruction pointer (MSB)\n");
} else if (iter->k >= offsetof(struct arch_seccomp_data, args) &&
iter->k < offsetof(struct arch_seccomp_data, args) + 48 &&
(iter->k - offsetof(struct arch_seccomp_data, args)) % 4 ==
0) {
fprintf(
stderr,
"Argument %d (%cSB)\n",
(int)(iter->k - offsetof(struct arch_seccomp_data, args)) / 8,
(iter->k - offsetof(struct arch_seccomp_data, args)) % 8 ? 'M'
: 'L');
} else {
fprintf(stderr, "???\n");
}
} else {
fprintf(stderr, "LOAD ???\n");
}
break;
case BPF_JMP:
if (BPF_OP(iter->code) == BPF_JA) {
fprintf(stderr, "JMP %d\n", ip + iter->k + 1);
} else {
fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n",
BPF_OP(iter->code) == BPF_JSET ? "&" :
BPF_OP(iter->code) == BPF_JEQ ? "==" :
BPF_OP(iter->code) == BPF_JGE ? ">=" :
BPF_OP(iter->code) == BPF_JGT ? ">" : "???",
(int)iter->k,
ip + iter->jt + 1, ip + iter->jf + 1);
}
break;
case BPF_RET:
fprintf(stderr, "RET 0x%x // ", iter->k);
if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
} else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
} else if (iter->k == SECCOMP_RET_ALLOW) {
fprintf(stderr, "Allowed\n");
} else {
fprintf(stderr, "???\n");
}
break;
case BPF_ALU:
fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
? "A := -A\n" : "A := A %s 0x%x\n",
BPF_OP(iter->code) == BPF_ADD ? "+" :
BPF_OP(iter->code) == BPF_SUB ? "-" :
BPF_OP(iter->code) == BPF_MUL ? "*" :
BPF_OP(iter->code) == BPF_DIV ? "/" :
BPF_OP(iter->code) == BPF_MOD ? "%" :
BPF_OP(iter->code) == BPF_OR ? "|" :
BPF_OP(iter->code) == BPF_XOR ? "^" :
BPF_OP(iter->code) == BPF_AND ? "&" :
BPF_OP(iter->code) == BPF_LSH ? "<<" :
BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
(int)iter->k);
break;
default:
fprintf(stderr, "???\n");
break;
}
}
return;
}
Instruction* CodeGen::MakeInstruction(uint16_t code,
uint32_t k,
Instruction* next) {
// We can handle non-jumping instructions and "always" jumps. Both of
// them are followed by exactly one "next" instruction.
// We allow callers to defer specifying "next", but then they must call
// "joinInstructions" later.
if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) {
SANDBOX_DIE(
"Must provide both \"true\" and \"false\" branch "
"for a BPF_JMP");
}
if (next && BPF_CLASS(code) == BPF_RET) {
SANDBOX_DIE("Cannot append instructions after a return statement");
}
if (BPF_CLASS(code) == BPF_JMP) {
// "Always" jumps use the "true" branch target, only.
Instruction* insn = new Instruction(code, 0, next, NULL);
instructions_.push_back(insn);
return insn;
} else {
// Non-jumping instructions do not use any of the branch targets.
Instruction* insn = new Instruction(code, k, next);
instructions_.push_back(insn);
return insn;
}
}
Instruction* CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
if (BPF_CLASS(code) != BPF_RET) {
SANDBOX_DIE("ErrorCodes can only be used in return expressions");
}
if (err.error_type_ != ErrorCode::ET_SIMPLE &&
err.error_type_ != ErrorCode::ET_TRAP) {
SANDBOX_DIE("ErrorCode is not suitable for returning from a BPF program");
}
return MakeInstruction(code, err.err_);
}
Instruction* CodeGen::MakeInstruction(uint16_t code,
uint32_t k,
Instruction* jt,
Instruction* jf) {
// We can handle all conditional jumps. They are followed by both a
// "true" and a "false" branch.
if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) {
SANDBOX_DIE("Expected a BPF_JMP instruction");
}
if (!jt && !jf) {
// We allow callers to defer specifying exactly one of the branch
// targets. It must then be set later by calling "JoinInstructions".
SANDBOX_DIE("Branches must jump to a valid instruction");
}
Instruction* insn = new Instruction(code, k, jt, jf);
instructions_.push_back(insn);
return insn;
}
void CodeGen::JoinInstructions(Instruction* head, Instruction* tail) {
// Merge two instructions, or set the branch target for an "always" jump.
// This function should be called, if the caller didn't initially provide
// a value for "next" when creating the instruction.
if (BPF_CLASS(head->code) == BPF_JMP) {
if (BPF_OP(head->code) == BPF_JA) {
if (head->jt_ptr) {
SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
}
head->jt_ptr = tail;
} else {
if (!head->jt_ptr && head->jf_ptr) {
head->jt_ptr = tail;
} else if (!head->jf_ptr && head->jt_ptr) {
head->jf_ptr = tail;
} else {
SANDBOX_DIE("Cannot append instructions after a jump");
}
}
} else if (BPF_CLASS(head->code) == BPF_RET) {
SANDBOX_DIE("Cannot append instructions after a return statement");
} else if (head->next) {
SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
} else {
head->next = tail;
}
return;
}
void CodeGen::Traverse(Instruction* instruction,
void (*fnc)(Instruction*, void*),
void* aux) {
std::set<Instruction*> visited;
TraverseRecursively(&visited, instruction);
for (std::set<Instruction*>::const_iterator iter = visited.begin();
iter != visited.end();
++iter) {
fnc(*iter, aux);
}
}
void CodeGen::FindBranchTargets(const Instruction& instructions,
BranchTargets* branch_targets) {
// Follow all possible paths through the "instructions" graph and compute
// a list of branch targets. This will later be needed to compute the
// boundaries of basic blocks.
// We maintain a set of all instructions that we have previously seen. This
// set ultimately converges on all instructions in the program.
std::set<const Instruction*> seen_instructions;
Instructions stack;
for (const Instruction* insn = &instructions; insn;) {
seen_instructions.insert(insn);
if (BPF_CLASS(insn->code) == BPF_JMP) {
// Found a jump. Increase count of incoming edges for each of the jump
// targets.
++(*branch_targets)[insn->jt_ptr];
if (BPF_OP(insn->code) != BPF_JA) {
++(*branch_targets)[insn->jf_ptr];
stack.push_back(const_cast<Instruction*>(insn));
}
// Start a recursive decent for depth-first traversal.
if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
// We haven't seen the "true" branch yet. Traverse it now. We have
// already remembered the "false" branch on the stack and will
// traverse it later.
insn = insn->jt_ptr;
continue;
} else {
// Now try traversing the "false" branch.
insn = NULL;
}
} else {
// This is a non-jump instruction, just continue to the next instruction
// (if any). It's OK if "insn" becomes NULL when reaching a return
// instruction.
if (!insn->next != (BPF_CLASS(insn->code) == BPF_RET)) {
SANDBOX_DIE(
"Internal compiler error; return instruction must be at "
"the end of the BPF program");
}
if (seen_instructions.find(insn->next) == seen_instructions.end()) {
insn = insn->next;
} else {
// We have seen this instruction before. That could happen if it is
// a branch target. No need to continue processing.
insn = NULL;
}
}
while (!insn && !stack.empty()) {
// We are done processing all the way to a leaf node, backtrack up the
// stack to any branches that we haven't processed yet. By definition,
// this has to be a "false" branch, as we always process the "true"
// branches right away.
insn = stack.back();
stack.pop_back();
if (seen_instructions.find(insn->jf_ptr) == seen_instructions.end()) {
// We haven't seen the "false" branch yet. So, that's where we'll
// go now.
insn = insn->jf_ptr;
} else {
// We have seen both the "true" and the "false" branch, continue
// up the stack.
if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
SANDBOX_DIE(
"Internal compiler error; cannot find all "
"branch targets");
}
insn = NULL;
}
}
}
return;
}
BasicBlock* CodeGen::MakeBasicBlock(Instruction* head, Instruction* tail) {
// Iterate over all the instructions between "head" and "tail" and
// insert them into a new basic block.
BasicBlock* bb = new BasicBlock;
for (;; head = head->next) {
bb->instructions.push_back(head);
if (head == tail) {
break;
}
if (BPF_CLASS(head->code) == BPF_JMP) {
SANDBOX_DIE("Found a jump inside of a basic block");
}
}
basic_blocks_.push_back(bb);
return bb;
}
void CodeGen::AddBasicBlock(Instruction* head,
Instruction* tail,
const BranchTargets& branch_targets,
TargetsToBlocks* basic_blocks,
BasicBlock** firstBlock) {
// Add a new basic block to "basic_blocks". Also set "firstBlock", if it
// has not been set before.
BranchTargets::const_iterator iter = branch_targets.find(head);
if ((iter == branch_targets.end()) != !*firstBlock ||
!*firstBlock != basic_blocks->empty()) {
SANDBOX_DIE(
"Only the very first basic block should have no "
"incoming jumps");
}
BasicBlock* bb = MakeBasicBlock(head, tail);
if (!*firstBlock) {
*firstBlock = bb;
}
(*basic_blocks)[head] = bb;
return;
}
BasicBlock* CodeGen::CutGraphIntoBasicBlocks(
Instruction* instructions,
const BranchTargets& branch_targets,
TargetsToBlocks* basic_blocks) {
// Textbook implementation of a basic block generator. All basic blocks
// start with a branch target and end with either a return statement or
// a jump (or are followed by an instruction that forms the beginning of a
// new block). Both conditional and "always" jumps are supported.
BasicBlock* first_block = NULL;
std::set<const Instruction*> seen_instructions;
Instructions stack;
Instruction* tail = NULL;
Instruction* head = instructions;
for (Instruction* insn = head; insn;) {
if (seen_instructions.find(insn) != seen_instructions.end()) {
// We somehow went in a circle. This should never be possible. Not even
// cyclic graphs are supposed to confuse us this much.
SANDBOX_DIE("Internal compiler error; cannot compute basic blocks");
}
seen_instructions.insert(insn);
if (tail && branch_targets.find(insn) != branch_targets.end()) {
// We reached a branch target. Start a new basic block (this means,
// flushing the previous basic block first).
AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block);
head = insn;
}
if (BPF_CLASS(insn->code) == BPF_JMP) {
// We reached a jump instruction, this completes our current basic
// block. Flush it and continue by traversing both the true and the
// false branch of the jump. We need to maintain a stack to do so.
AddBasicBlock(head, insn, branch_targets, basic_blocks, &first_block);
if (BPF_OP(insn->code) != BPF_JA) {
stack.push_back(insn->jf_ptr);
}
insn = insn->jt_ptr;
// If we are jumping to an instruction that we have previously
// processed, we are done with this branch. Continue by backtracking
// up the stack.
while (seen_instructions.find(insn) != seen_instructions.end()) {
backtracking:
if (stack.empty()) {
// We successfully traversed all reachable instructions.
return first_block;
} else {
// Going up the stack.
insn = stack.back();
stack.pop_back();
}
}
// Starting a new basic block.
tail = NULL;
head = insn;
} else {
// We found a non-jumping instruction, append it to current basic
// block.
tail = insn;
insn = insn->next;
if (!insn) {
// We reached a return statement, flush the current basic block and
// backtrack up the stack.
AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block);
goto backtracking;
}
}
}
return first_block;
}
// We define a comparator that inspects the sequence of instructions in our
// basic block and any blocks referenced by this block. This function can be
// used in a "less" comparator for the purpose of storing pointers to basic
// blocks in STL containers; this gives an easy option to use STL to find
// shared tail sequences of basic blocks.
static int PointerCompare(const BasicBlock* block1,
const BasicBlock* block2,
const TargetsToBlocks& blocks) {
// Return <0, 0, or >0 depending on the ordering of "block1" and "block2".
// If we are looking at the exact same block, this is trivial and we don't
// need to do a full comparison.
if (block1 == block2) {
return 0;
}
// We compare the sequence of instructions in both basic blocks.
const Instructions& insns1 = block1->instructions;
const Instructions& insns2 = block2->instructions;
// Basic blocks should never be empty.
CHECK(!insns1.empty());
CHECK(!insns2.empty());
Instructions::const_iterator iter1 = insns1.begin();
Instructions::const_iterator iter2 = insns2.begin();
for (;; ++iter1, ++iter2) {
// If we have reached the end of the sequence of instructions in one or
// both basic blocks, we know the relative ordering between the two blocks
// and can return.
if (iter1 == insns1.end()) {
if (iter2 == insns2.end()) {
// If the two blocks are the same length (and have elementwise-equal
// code and k fields, which is the only way we can reach this point),
// and the last instruction isn't a JMP or a RET, then we must compare
// their successors.
Instruction* const insns1_last = insns1.back();
Instruction* const insns2_last = insns2.back();
if (BPF_CLASS(insns1_last->code) != BPF_JMP &&
BPF_CLASS(insns1_last->code) != BPF_RET) {
// Non jumping instructions will always have a valid next instruction.
CHECK(insns1_last->next);
CHECK(insns2_last->next);
return PointerCompare(blocks.find(insns1_last->next)->second,
blocks.find(insns2_last->next)->second,
blocks);
} else {
return 0;
}
}
return -1;
} else if (iter2 == insns2.end()) {
return 1;
}
// Compare the individual fields for both instructions.
const Instruction& insn1 = **iter1;
const Instruction& insn2 = **iter2;
if (insn1.code == insn2.code) {
if (insn1.k == insn2.k) {
// Only conditional jump instructions use the jt_ptr and jf_ptr
// fields.
if (BPF_CLASS(insn1.code) == BPF_JMP) {
if (BPF_OP(insn1.code) != BPF_JA) {
// Recursively compare the "true" and "false" branches.
// A well-formed BPF program can't have any cycles, so we know
// that our recursive algorithm will ultimately terminate.
// In the unlikely event that the programmer made a mistake and
// went out of the way to give us a cyclic program, we will crash
// with a stack overflow. We are OK with that.
int c = PointerCompare(blocks.find(insn1.jt_ptr)->second,
blocks.find(insn2.jt_ptr)->second,
blocks);
if (c == 0) {
c = PointerCompare(blocks.find(insn1.jf_ptr)->second,
blocks.find(insn2.jf_ptr)->second,
blocks);
if (c == 0) {
continue;
} else {
return c;
}
} else {
return c;
}
} else {
int c = PointerCompare(blocks.find(insn1.jt_ptr)->second,
blocks.find(insn2.jt_ptr)->second,
blocks);
if (c == 0) {
continue;
} else {
return c;
}
}
} else {
continue;
}
} else {
return insn1.k - insn2.k;
}
} else {
return insn1.code - insn2.code;
}
}
}
void CodeGen::MergeTails(TargetsToBlocks* blocks) {
// We enter all of our basic blocks into a set using the BasicBlock::Less()
// comparator. This naturally results in blocks with identical tails of
// instructions to map to the same entry in the set. Whenever we discover
// that a particular chain of instructions is already in the set, we merge
// the basic blocks and update the pointer in the "blocks" map.
// Returns the number of unique basic blocks.
// N.B. We don't merge instructions on a granularity that is finer than
// a basic block. In practice, this is sufficiently rare that we don't
// incur a big cost.
// Similarly, we currently don't merge anything other than tails. In
// the future, we might decide to revisit this decision and attempt to
// merge arbitrary sub-sequences of instructions.
BasicBlock::Less<TargetsToBlocks> less(*blocks, PointerCompare);
typedef std::set<BasicBlock*, BasicBlock::Less<TargetsToBlocks> > Set;
Set seen_basic_blocks(less);
for (TargetsToBlocks::iterator iter = blocks->begin(); iter != blocks->end();
++iter) {
BasicBlock* bb = iter->second;
Set::const_iterator entry = seen_basic_blocks.find(bb);
if (entry == seen_basic_blocks.end()) {
// This is the first time we see this particular sequence of
// instructions. Enter the basic block into the set of known
// basic blocks.
seen_basic_blocks.insert(bb);
} else {
// We have previously seen another basic block that defines the same
// sequence of instructions. Merge the two blocks and update the
// pointer in the "blocks" map.
iter->second = *entry;
}
}
}
void CodeGen::ComputeIncomingBranches(BasicBlock* block,
const TargetsToBlocks& targets_to_blocks,
IncomingBranches* incoming_branches) {
// We increment the number of incoming branches each time we encounter a
// basic block. But we only traverse recursively the very first time we
// encounter a new block. This is necessary to make topological sorting
// work correctly.
if (++(*incoming_branches)[block] == 1) {
Instruction* last_insn = block->instructions.back();
if (BPF_CLASS(last_insn->code) == BPF_JMP) {
ComputeIncomingBranches(targets_to_blocks.find(last_insn->jt_ptr)->second,
targets_to_blocks,
incoming_branches);
if (BPF_OP(last_insn->code) != BPF_JA) {
ComputeIncomingBranches(
targets_to_blocks.find(last_insn->jf_ptr)->second,
targets_to_blocks,
incoming_branches);
}
} else if (BPF_CLASS(last_insn->code) != BPF_RET) {
ComputeIncomingBranches(targets_to_blocks.find(last_insn->next)->second,
targets_to_blocks,
incoming_branches);
}
}
}
void CodeGen::TopoSortBasicBlocks(BasicBlock* first_block,
const TargetsToBlocks& blocks,
BasicBlocks* basic_blocks) {
// Textbook implementation of a toposort. We keep looking for basic blocks
// that don't have any incoming branches (initially, this is just the
// "first_block") and add them to the topologically sorted list of
// "basic_blocks". As we do so, we remove outgoing branches. This potentially
// ends up making our descendants eligible for the sorted list. The
// sorting algorithm terminates when there are no more basic blocks that have
// no incoming branches. If we didn't move all blocks from the set of
// "unordered_blocks" to the sorted list of "basic_blocks", there must have
// been a cyclic dependency. This should never happen in a BPF program, as
// well-formed BPF programs only ever have forward branches.
IncomingBranches unordered_blocks;
ComputeIncomingBranches(first_block, blocks, &unordered_blocks);
std::set<BasicBlock*> heads;
for (;;) {
// Move block from "unordered_blocks" to "basic_blocks".
basic_blocks->push_back(first_block);
// Inspect last instruction in the basic block. This is typically either a
// jump or a return statement. But it could also be a "normal" instruction
// that is followed by a jump target.
Instruction* last_insn = first_block->instructions.back();
if (BPF_CLASS(last_insn->code) == BPF_JMP) {
// Remove outgoing branches. This might end up moving our descendants
// into set of "head" nodes that no longer have any incoming branches.
TargetsToBlocks::const_iterator iter;
if (BPF_OP(last_insn->code) != BPF_JA) {
iter = blocks.find(last_insn->jf_ptr);
if (!--unordered_blocks[iter->second]) {
heads.insert(iter->second);
}
}
iter = blocks.find(last_insn->jt_ptr);
if (!--unordered_blocks[iter->second]) {
first_block = iter->second;
continue;
}
} else if (BPF_CLASS(last_insn->code) != BPF_RET) {
// We encountered an instruction that doesn't change code flow. Try to
// pick the next "first_block" from "last_insn->next", if possible.
TargetsToBlocks::const_iterator iter;
iter = blocks.find(last_insn->next);
if (!--unordered_blocks[iter->second]) {
first_block = iter->second;
continue;
} else {
// Our basic block is supposed to be followed by "last_insn->next",
// but dependencies prevent this from happening. Insert a BPF_JA
// instruction to correct the code flow.
Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, last_insn->next);
first_block->instructions.push_back(ja);
last_insn->next = ja;
}
}
if (heads.empty()) {
if (unordered_blocks.size() != basic_blocks->size()) {
SANDBOX_DIE("Internal compiler error; cyclic graph detected");
}
return;
}
// Proceed by picking an arbitrary node from the set of basic blocks that
// do not have any incoming branches.
first_block = *heads.begin();
heads.erase(heads.begin());
}
}
void CodeGen::ComputeRelativeJumps(BasicBlocks* basic_blocks,
const TargetsToBlocks& targets_to_blocks) {
// While we previously used pointers in jt_ptr and jf_ptr to link jump
// instructions to their targets, we now convert these jumps to relative
// jumps that are suitable for loading the BPF program into the kernel.
int offset = 0;
// Since we just completed a toposort, all jump targets are guaranteed to
// go forward. This means, iterating over the basic blocks in reverse makes
// it trivial to compute the correct offsets.
BasicBlock* bb = NULL;
BasicBlock* last_bb = NULL;
for (BasicBlocks::reverse_iterator iter = basic_blocks->rbegin();
iter != basic_blocks->rend();
++iter) {
last_bb = bb;
bb = *iter;
Instruction* insn = bb->instructions.back();
if (BPF_CLASS(insn->code) == BPF_JMP) {
// Basic block ended in a jump instruction. We can now compute the
// appropriate offsets.
if (BPF_OP(insn->code) == BPF_JA) {
// "Always" jumps use the 32bit "k" field for the offset, instead
// of the 8bit "jt" and "jf" fields.
int jmp = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
insn->k = jmp;
insn->jt = insn->jf = 0;
} else {
// The offset computations for conditional jumps are just the same
// as for "always" jumps.
int jt = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
int jf = offset - targets_to_blocks.find(insn->jf_ptr)->second->offset;
// There is an added complication, because conditional relative jumps
// can only jump at most 255 instructions forward. If we have to jump
// further, insert an extra "always" jump.
Instructions::size_type jmp = bb->instructions.size();
if (jt > 255 || (jt == 255 && jf > 255)) {
Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jt_ptr);
bb->instructions.push_back(ja);
ja->k = jt;
ja->jt = ja->jf = 0;
// The newly inserted "always" jump, of course, requires us to adjust
// the jump targets in the original conditional jump.
jt = 0;
++jf;
}
if (jf > 255) {
Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jf_ptr);
bb->instructions.insert(bb->instructions.begin() + jmp, ja);
ja->k = jf;
ja->jt = ja->jf = 0;
// Again, we have to adjust the jump targets in the original
// conditional jump.
++jt;
jf = 0;
}
// Now we can finally set the relative jump targets in the conditional
// jump instruction. Afterwards, we must no longer access the jt_ptr
// and jf_ptr fields.
insn->jt = jt;
insn->jf = jf;
}
} else if (BPF_CLASS(insn->code) != BPF_RET &&
targets_to_blocks.find(insn->next)->second != last_bb) {
SANDBOX_DIE("Internal compiler error; invalid basic block encountered");
}
// Proceed to next basic block.
offset += bb->instructions.size();
bb->offset = offset;
}
return;
}
void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks,
SandboxBPF::Program* program) {
// Our basic blocks have been sorted and relative jump offsets have been
// computed. The last remaining step is for all the instructions in our
// basic blocks to be concatenated into a BPF program.
program->clear();
for (BasicBlocks::const_iterator bb_iter = basic_blocks.begin();
bb_iter != basic_blocks.end();
++bb_iter) {
const BasicBlock& bb = **bb_iter;
for (Instructions::const_iterator insn_iter = bb.instructions.begin();
insn_iter != bb.instructions.end();
++insn_iter) {
const Instruction& insn = **insn_iter;
program->push_back(
(struct sock_filter) {insn.code, insn.jt, insn.jf, insn.k});
}
}
return;
}
void CodeGen::Compile(Instruction* instructions, SandboxBPF::Program* program) {
if (compiled_) {
SANDBOX_DIE(
"Cannot call Compile() multiple times. Create a new code "
"generator instead");
}
compiled_ = true;
BranchTargets branch_targets;
FindBranchTargets(*instructions, &branch_targets);
TargetsToBlocks all_blocks;
BasicBlock* first_block =
CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks);
MergeTails(&all_blocks);
BasicBlocks basic_blocks;
TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks);
ComputeRelativeJumps(&basic_blocks, all_blocks);
ConcatenateBasicBlocks(basic_blocks, program);
return;
}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,160 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__
#define SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__
#include <map>
#include <set>
#include <vector>
#include "sandbox/linux/sandbox_export.h"
#include "sandbox/linux/seccomp-bpf/basicblock.h"
#include "sandbox/linux/seccomp-bpf/instruction.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
namespace sandbox {
typedef std::vector<Instruction*> Instructions;
typedef std::vector<BasicBlock*> BasicBlocks;
typedef std::map<const Instruction*, int> BranchTargets;
typedef std::map<const Instruction*, BasicBlock*> TargetsToBlocks;
typedef std::map<const BasicBlock*, int> IncomingBranches;
// The code generator instantiates a basic compiler that can convert a
// graph of BPF instructions into a well-formed stream of BPF instructions.
// Most notably, it ensures that jumps are always forward and don't exceed
// the limit of 255 instructions imposed by the instruction set.
//
// Callers would typically create a new CodeGen object and then use it to
// build a DAG of Instructions. They'll eventually call Compile() to convert
// this DAG to a SandboxBPF::Program.
//
// Instructions can be chained at the time when they are created, or they
// can be joined later by calling JoinInstructions().
//
// CodeGen gen;
// Instruction *dag, *branch;
// dag =
// gen.MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
// offsetof(struct arch_seccomp_data, nr),
// branch =
// gen.MakeInstruction(BPF_JMP+BPF_EQ+BPF_K, __NR_getpid,
// Trap(GetPidHandler, NULL), NULL);
// gen.JoinInstructions(branch,
// gen.MakeInstruction(BPF_RET+BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED)));
//
// // Simplified code follows; in practice, it is important to avoid calling
// // any C++ destructors after starting the sandbox.
// SandboxBPF::Program program;
// gen.Compile(dag, program);
// const struct sock_fprog prog = {
// static_cast<unsigned short>(program->size()), &program[0] };
// prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
//
class SANDBOX_EXPORT CodeGen {
public:
CodeGen();
~CodeGen();
// This is a helper method that can be used for debugging purposes. It is
// not normally called.
static void PrintProgram(const SandboxBPF::Program& program);
// Create a new instruction. Instructions form a DAG. The instruction objects
// are owned by the CodeGen object. They do not need to be explicitly
// deleted.
// For details on the possible parameters refer to <linux/filter.h>
Instruction* MakeInstruction(uint16_t code,
uint32_t k,
Instruction* next = NULL);
Instruction* MakeInstruction(uint16_t code, const ErrorCode& err);
Instruction* MakeInstruction(uint16_t code,
uint32_t k,
Instruction* jt,
Instruction* jf);
// Join two (sequences of) instructions. This is useful, if the "next"
// parameter had not originally been given in the call to MakeInstruction(),
// or if a (conditional) jump still has an unsatisfied target.
void JoinInstructions(Instruction* head, Instruction* tail);
// Traverse the graph of instructions and visit each instruction once.
// Traversal order is implementation-defined. It is acceptable to make
// changes to the graph from within the callback function. These changes
// do not affect traversal.
// The "fnc" function gets called with both the instruction and the opaque
// "aux" pointer.
void Traverse(Instruction*, void (*fnc)(Instruction*, void* aux), void* aux);
// Compiles the graph of instructions into a BPF program that can be passed
// to the kernel. Please note that this function modifies the graph in place
// and must therefore only be called once per graph.
void Compile(Instruction* instructions, SandboxBPF::Program* program);
private:
friend class CodeGenUnittestHelper;
// Find all the instructions that are the target of BPF_JMPs.
void FindBranchTargets(const Instruction& instructions,
BranchTargets* branch_targets);
// Combine instructions between "head" and "tail" into a new basic block.
// Basic blocks are defined as sequences of instructions whose only branch
// target is the very first instruction; furthermore, any BPF_JMP or BPF_RET
// instruction must be at the very end of the basic block.
BasicBlock* MakeBasicBlock(Instruction* head, Instruction* tail);
// Creates a basic block and adds it to "basic_blocks"; sets "first_block"
// if it is still NULL.
void AddBasicBlock(Instruction* head,
Instruction* tail,
const BranchTargets& branch_targets,
TargetsToBlocks* basic_blocks,
BasicBlock** first_block);
// Cuts the DAG of instructions into basic blocks.
BasicBlock* CutGraphIntoBasicBlocks(Instruction* instructions,
const BranchTargets& branch_targets,
TargetsToBlocks* blocks);
// Find common tail sequences of basic blocks and coalesce them.
void MergeTails(TargetsToBlocks* blocks);
// For each basic block, compute the number of incoming branches.
void ComputeIncomingBranches(BasicBlock* block,
const TargetsToBlocks& targets_to_blocks,
IncomingBranches* incoming_branches);
// Topologically sort the basic blocks so that all jumps are forward jumps.
// This is a requirement for any well-formed BPF program.
void TopoSortBasicBlocks(BasicBlock* first_block,
const TargetsToBlocks& blocks,
BasicBlocks* basic_blocks);
// Convert jt_ptr_ and jf_ptr_ fields in BPF_JMP instructions to valid
// jt_ and jf_ jump offsets. This can result in BPF_JA instructions being
// inserted, if we need to jump over more than 256 instructions.
void ComputeRelativeJumps(BasicBlocks* basic_blocks,
const TargetsToBlocks& targets_to_blocks);
// Concatenate instructions from all basic blocks into a BPF program that
// can be passed to the kernel.
void ConcatenateBasicBlocks(const BasicBlocks&, SandboxBPF::Program* program);
// We stick all instructions and basic blocks into pools that get destroyed
// when the CodeGen object is destroyed. This way, we neither need to worry
// about explicitly managing ownership, nor do we need to worry about using
// smart pointers in the presence of circular references.
Instructions instructions_;
BasicBlocks basic_blocks_;
// Compile() must only ever be called once as it makes destructive changes
// to the DAG.
bool compiled_;
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__

Просмотреть файл

@ -0,0 +1,538 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <algorithm>
#include <set>
#include <vector>
#include "sandbox/linux/seccomp-bpf/codegen.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/tests/unit_tests.h"
namespace sandbox {
class SandboxUnittestHelper : public SandboxBPF {
public:
typedef SandboxBPF::Program Program;
};
// We want to access some of the private methods in the code generator. We
// do so by defining a "friend" that makes these methods public for us.
class CodeGenUnittestHelper : public CodeGen {
public:
void FindBranchTargets(const Instruction& instructions,
BranchTargets* branch_targets) {
CodeGen::FindBranchTargets(instructions, branch_targets);
}
BasicBlock* CutGraphIntoBasicBlocks(Instruction* insns,
const BranchTargets& branch_targets,
TargetsToBlocks* blocks) {
return CodeGen::CutGraphIntoBasicBlocks(insns, branch_targets, blocks);
}
void MergeTails(TargetsToBlocks* blocks) { CodeGen::MergeTails(blocks); }
};
enum { NO_FLAGS = 0x0000, HAS_MERGEABLE_TAILS = 0x0001, };
Instruction* SampleProgramOneInstruction(CodeGen* codegen, int* flags) {
// Create the most basic valid BPF program:
// RET ERR_ALLOWED
*flags = NO_FLAGS;
return codegen->MakeInstruction(BPF_RET + BPF_K,
ErrorCode(ErrorCode::ERR_ALLOWED));
}
Instruction* SampleProgramSimpleBranch(CodeGen* codegen, int* flags) {
// Create a program with a single branch:
// JUMP if eq 42 then $0 else $1
// 0: RET EPERM
// 1: RET ERR_ALLOWED
*flags = NO_FLAGS;
return codegen->MakeInstruction(
BPF_JMP + BPF_JEQ + BPF_K,
42,
codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(EPERM)),
codegen->MakeInstruction(BPF_RET + BPF_K,
ErrorCode(ErrorCode::ERR_ALLOWED)));
}
Instruction* SampleProgramAtypicalBranch(CodeGen* codegen, int* flags) {
// Create a program with a single branch:
// JUMP if eq 42 then $0 else $0
// 0: RET ERR_ALLOWED
// N.B.: As the instructions in both sides of the branch are already
// the same object, we do not actually have any "mergeable" branches.
// This needs to be reflected in our choice of "flags".
*flags = NO_FLAGS;
Instruction* ret = codegen->MakeInstruction(
BPF_RET + BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED));
return codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, ret, ret);
}
Instruction* SampleProgramComplex(CodeGen* codegen, int* flags) {
// Creates a basic BPF program that we'll use to test some of the code:
// JUMP if eq 42 the $0 else $1 (insn6)
// 0: LD 23 (insn5)
// 1: JUMP if eq 42 then $2 else $4 (insn4)
// 2: JUMP to $3 (insn1)
// 3: LD 42 (insn0)
// RET ErrorCode(42) (insn2)
// 4: LD 42 (insn3)
// RET ErrorCode(42) (insn3+)
*flags = HAS_MERGEABLE_TAILS;
Instruction* insn0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 42);
SANDBOX_ASSERT(insn0);
SANDBOX_ASSERT(insn0->code == BPF_LD + BPF_W + BPF_ABS);
SANDBOX_ASSERT(insn0->k == 42);
SANDBOX_ASSERT(insn0->next == NULL);
Instruction* insn1 = codegen->MakeInstruction(BPF_JMP + BPF_JA, 0, insn0);
SANDBOX_ASSERT(insn1);
SANDBOX_ASSERT(insn1->code == BPF_JMP + BPF_JA);
SANDBOX_ASSERT(insn1->jt_ptr == insn0);
Instruction* insn2 = codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(42));
SANDBOX_ASSERT(insn2);
SANDBOX_ASSERT(insn2->code == BPF_RET + BPF_K);
SANDBOX_ASSERT(insn2->next == NULL);
// We explicitly duplicate instructions so that MergeTails() can coalesce
// them later.
Instruction* insn3 = codegen->MakeInstruction(
BPF_LD + BPF_W + BPF_ABS,
42,
codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(42)));
Instruction* insn4 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn1, insn3);
SANDBOX_ASSERT(insn4);
SANDBOX_ASSERT(insn4->code == BPF_JMP + BPF_JEQ + BPF_K);
SANDBOX_ASSERT(insn4->k == 42);
SANDBOX_ASSERT(insn4->jt_ptr == insn1);
SANDBOX_ASSERT(insn4->jf_ptr == insn3);
codegen->JoinInstructions(insn0, insn2);
SANDBOX_ASSERT(insn0->next == insn2);
Instruction* insn5 =
codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 23, insn4);
SANDBOX_ASSERT(insn5);
SANDBOX_ASSERT(insn5->code == BPF_LD + BPF_W + BPF_ABS);
SANDBOX_ASSERT(insn5->k == 23);
SANDBOX_ASSERT(insn5->next == insn4);
// Force a basic block that ends in neither a jump instruction nor a return
// instruction. It only contains "insn5". This exercises one of the less
// common code paths in the topo-sort algorithm.
// This also gives us a diamond-shaped pattern in our graph, which stresses
// another aspect of the topo-sort algorithm (namely, the ability to
// correctly count the incoming branches for subtrees that are not disjunct).
Instruction* insn6 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn5, insn4);
return insn6;
}
Instruction* SampleProgramConfusingTails(CodeGen* codegen, int* flags) {
// This simple program demonstrates https://crbug.com/351103/
// The two "LOAD 0" instructions are blocks of their own. MergeTails() could
// be tempted to merge them since they are the same. However, they are
// not mergeable because they fall-through to non semantically equivalent
// blocks.
// Without the fix for this bug, this program should trigger the check in
// CompileAndCompare: the serialized graphs from the program and its compiled
// version will differ.
//
// 0) LOAD 1 // ???
// 1) if A == 0x1; then JMP 2 else JMP 3
// 2) LOAD 0 // System call number
// 3) if A == 0x2; then JMP 4 else JMP 5
// 4) LOAD 0 // System call number
// 5) if A == 0x1; then JMP 6 else JMP 7
// 6) RET 0x50000 // errno = 0
// 7) RET 0x50001 // errno = 1
*flags = NO_FLAGS;
Instruction* i7 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
Instruction* i6 = codegen->MakeInstruction(BPF_RET, ErrorCode(0));
Instruction* i5 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
Instruction* i4 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
Instruction* i3 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
Instruction* i2 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
Instruction* i1 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
return i0;
}
Instruction* SampleProgramConfusingTailsBasic(CodeGen* codegen, int* flags) {
// Without the fix for https://crbug.com/351103/, (see
// SampleProgramConfusingTails()), this would generate a cyclic graph and
// crash as the two "LOAD 0" instructions would get merged.
//
// 0) LOAD 1 // ???
// 1) if A == 0x1; then JMP 2 else JMP 3
// 2) LOAD 0 // System call number
// 3) if A == 0x2; then JMP 4 else JMP 5
// 4) LOAD 0 // System call number
// 5) RET 0x50001 // errno = 1
*flags = NO_FLAGS;
Instruction* i5 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
Instruction* i4 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
Instruction* i3 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
Instruction* i2 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
Instruction* i1 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
return i0;
}
Instruction* SampleProgramConfusingTailsMergeable(CodeGen* codegen,
int* flags) {
// This is similar to SampleProgramConfusingTails(), except that
// instructions 2 and 4 are now RET instructions.
// In PointerCompare(), this exercises the path where two blocks are of the
// same length and identical and the last instruction is a JMP or RET, so the
// following blocks don't need to be looked at and the blocks are mergeable.
//
// 0) LOAD 1 // ???
// 1) if A == 0x1; then JMP 2 else JMP 3
// 2) RET 0x5002a // errno = 42
// 3) if A == 0x2; then JMP 4 else JMP 5
// 4) RET 0x5002a // errno = 42
// 5) if A == 0x1; then JMP 6 else JMP 7
// 6) RET 0x50000 // errno = 0
// 7) RET 0x50001 // errno = 1
*flags = HAS_MERGEABLE_TAILS;
Instruction* i7 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
Instruction* i6 = codegen->MakeInstruction(BPF_RET, ErrorCode(0));
Instruction* i5 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
Instruction* i4 = codegen->MakeInstruction(BPF_RET, ErrorCode(42));
Instruction* i3 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
Instruction* i2 = codegen->MakeInstruction(BPF_RET, ErrorCode(42));
Instruction* i1 =
codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
return i0;
}
void ForAllPrograms(void (*test)(CodeGenUnittestHelper*, Instruction*, int)) {
Instruction* (*function_table[])(CodeGen* codegen, int* flags) = {
SampleProgramOneInstruction,
SampleProgramSimpleBranch,
SampleProgramAtypicalBranch,
SampleProgramComplex,
SampleProgramConfusingTails,
SampleProgramConfusingTailsBasic,
SampleProgramConfusingTailsMergeable,
};
for (size_t i = 0; i < arraysize(function_table); ++i) {
CodeGenUnittestHelper codegen;
int flags = NO_FLAGS;
Instruction *prg = function_table[i](&codegen, &flags);
test(&codegen, prg, flags);
}
}
void MakeInstruction(CodeGenUnittestHelper* codegen,
Instruction* program, int) {
// Nothing to do here
}
SANDBOX_TEST(CodeGen, MakeInstruction) {
ForAllPrograms(MakeInstruction);
}
void FindBranchTargets(CodeGenUnittestHelper* codegen, Instruction* prg, int) {
BranchTargets branch_targets;
codegen->FindBranchTargets(*prg, &branch_targets);
// Verifying the general properties that should be true for every
// well-formed BPF program.
// Perform a depth-first traversal of the BPF program an verify that all
// targets of BPF_JMP instructions are represented in the "branch_targets".
// At the same time, compute a set of both the branch targets and all the
// instructions in the program.
std::vector<Instruction*> stack;
std::set<Instruction*> all_instructions;
std::set<Instruction*> target_instructions;
BranchTargets::const_iterator end = branch_targets.end();
for (Instruction* insn = prg;;) {
all_instructions.insert(insn);
if (BPF_CLASS(insn->code) == BPF_JMP) {
target_instructions.insert(insn->jt_ptr);
SANDBOX_ASSERT(insn->jt_ptr != NULL);
SANDBOX_ASSERT(branch_targets.find(insn->jt_ptr) != end);
if (BPF_OP(insn->code) != BPF_JA) {
target_instructions.insert(insn->jf_ptr);
SANDBOX_ASSERT(insn->jf_ptr != NULL);
SANDBOX_ASSERT(branch_targets.find(insn->jf_ptr) != end);
stack.push_back(insn->jf_ptr);
}
insn = insn->jt_ptr;
} else if (BPF_CLASS(insn->code) == BPF_RET) {
SANDBOX_ASSERT(insn->next == NULL);
if (stack.empty()) {
break;
}
insn = stack.back();
stack.pop_back();
} else {
SANDBOX_ASSERT(insn->next != NULL);
insn = insn->next;
}
}
SANDBOX_ASSERT(target_instructions.size() == branch_targets.size());
// We can now subtract the set of the branch targets from the set of all
// instructions. This gives us a set with the instructions that nobody
// ever jumps to. Verify that they are no included in the
// "branch_targets" that FindBranchTargets() computed for us.
Instructions non_target_instructions(all_instructions.size() -
target_instructions.size());
set_difference(all_instructions.begin(),
all_instructions.end(),
target_instructions.begin(),
target_instructions.end(),
non_target_instructions.begin());
for (Instructions::const_iterator iter = non_target_instructions.begin();
iter != non_target_instructions.end();
++iter) {
SANDBOX_ASSERT(branch_targets.find(*iter) == end);
}
}
SANDBOX_TEST(CodeGen, FindBranchTargets) { ForAllPrograms(FindBranchTargets); }
void CutGraphIntoBasicBlocks(CodeGenUnittestHelper* codegen,
Instruction* prg,
int) {
BranchTargets branch_targets;
codegen->FindBranchTargets(*prg, &branch_targets);
TargetsToBlocks all_blocks;
BasicBlock* first_block =
codegen->CutGraphIntoBasicBlocks(prg, branch_targets, &all_blocks);
SANDBOX_ASSERT(first_block != NULL);
SANDBOX_ASSERT(first_block->instructions.size() > 0);
Instruction* first_insn = first_block->instructions[0];
// Basic blocks are supposed to start with a branch target and end with
// either a jump or a return instruction. It can also end, if the next
// instruction forms the beginning of a new basic block. There should be
// no other jumps or return instructions in the middle of a basic block.
for (TargetsToBlocks::const_iterator bb_iter = all_blocks.begin();
bb_iter != all_blocks.end();
++bb_iter) {
BasicBlock* bb = bb_iter->second;
SANDBOX_ASSERT(bb != NULL);
SANDBOX_ASSERT(bb->instructions.size() > 0);
Instruction* insn = bb->instructions[0];
SANDBOX_ASSERT(insn == first_insn ||
branch_targets.find(insn) != branch_targets.end());
for (Instructions::const_iterator insn_iter = bb->instructions.begin();;) {
insn = *insn_iter;
if (++insn_iter != bb->instructions.end()) {
SANDBOX_ASSERT(BPF_CLASS(insn->code) != BPF_JMP);
SANDBOX_ASSERT(BPF_CLASS(insn->code) != BPF_RET);
} else {
SANDBOX_ASSERT(BPF_CLASS(insn->code) == BPF_JMP ||
BPF_CLASS(insn->code) == BPF_RET ||
branch_targets.find(insn->next) != branch_targets.end());
break;
}
SANDBOX_ASSERT(branch_targets.find(*insn_iter) == branch_targets.end());
}
}
}
SANDBOX_TEST(CodeGen, CutGraphIntoBasicBlocks) {
ForAllPrograms(CutGraphIntoBasicBlocks);
}
void MergeTails(CodeGenUnittestHelper* codegen, Instruction* prg, int flags) {
BranchTargets branch_targets;
codegen->FindBranchTargets(*prg, &branch_targets);
TargetsToBlocks all_blocks;
BasicBlock* first_block =
codegen->CutGraphIntoBasicBlocks(prg, branch_targets, &all_blocks);
// The shape of our graph and thus the function of our program should
// still be unchanged after we run MergeTails(). We verify this by
// serializing the graph and verifying that it is still the same.
// We also verify that at least some of the edges changed because of
// tail merging.
std::string graph[2];
std::string edges[2];
// The loop executes twice. After the first run, we call MergeTails() on
// our graph.
for (int i = 0;;) {
// Traverse the entire program in depth-first order.
std::vector<BasicBlock*> stack;
for (BasicBlock* bb = first_block;;) {
// Serialize the instructions in this basic block. In general, we only
// need to serialize "code" and "k"; except for a BPF_JA instruction
// where "k" isn't set.
// The stream of instructions should be unchanged after MergeTails().
for (Instructions::const_iterator iter = bb->instructions.begin();
iter != bb->instructions.end();
++iter) {
graph[i].append(reinterpret_cast<char*>(&(*iter)->code),
sizeof((*iter)->code));
if (BPF_CLASS((*iter)->code) != BPF_JMP ||
BPF_OP((*iter)->code) != BPF_JA) {
graph[i].append(reinterpret_cast<char*>(&(*iter)->k),
sizeof((*iter)->k));
}
}
// Also serialize the addresses the basic blocks as we encounter them.
// This will change as basic blocks are coalesed by MergeTails().
edges[i].append(reinterpret_cast<char*>(&bb), sizeof(bb));
// Depth-first traversal of the graph. We only ever need to look at the
// very last instruction in the basic block, as that is the only one that
// can change code flow.
Instruction* insn = bb->instructions.back();
if (BPF_CLASS(insn->code) == BPF_JMP) {
// For jump instructions, we need to remember the "false" branch while
// traversing the "true" branch. This is not necessary for BPF_JA which
// only has a single branch.
if (BPF_OP(insn->code) != BPF_JA) {
stack.push_back(all_blocks[insn->jf_ptr]);
}
bb = all_blocks[insn->jt_ptr];
} else if (BPF_CLASS(insn->code) == BPF_RET) {
// After a BPF_RET, see if we need to back track.
if (stack.empty()) {
break;
}
bb = stack.back();
stack.pop_back();
} else {
// For "normal" instructions, just follow to the next basic block.
bb = all_blocks[insn->next];
}
}
// Our loop runs exactly two times.
if (++i > 1) {
break;
}
codegen->MergeTails(&all_blocks);
}
SANDBOX_ASSERT(graph[0] == graph[1]);
if (flags & HAS_MERGEABLE_TAILS) {
SANDBOX_ASSERT(edges[0] != edges[1]);
} else {
SANDBOX_ASSERT(edges[0] == edges[1]);
}
}
SANDBOX_TEST(CodeGen, MergeTails) {
ForAllPrograms(MergeTails);
}
void CompileAndCompare(CodeGenUnittestHelper* codegen, Instruction* prg, int) {
// TopoSortBasicBlocks() has internal checks that cause it to fail, if it
// detects a problem. Typically, if anything goes wrong, this looks to the
// TopoSort algorithm as if there had been cycles in the input data.
// This provides a pretty good unittest.
// We hand-crafted the program returned by SampleProgram() to exercise
// several of the more interesting code-paths. See comments in
// SampleProgram() for details.
// In addition to relying on the internal consistency checks in the compiler,
// we also serialize the graph and the resulting BPF program and compare
// them. With the exception of BPF_JA instructions that might have been
// inserted, both instruction streams should be equivalent.
// As Compile() modifies the instructions, we have to serialize the graph
// before calling Compile().
std::string source;
Instructions source_stack;
for (const Instruction* insn = prg, *next; insn; insn = next) {
if (BPF_CLASS(insn->code) == BPF_JMP) {
if (BPF_OP(insn->code) == BPF_JA) {
// Do not serialize BPF_JA instructions (see above).
next = insn->jt_ptr;
continue;
} else {
source_stack.push_back(insn->jf_ptr);
next = insn->jt_ptr;
}
} else if (BPF_CLASS(insn->code) == BPF_RET) {
if (source_stack.empty()) {
next = NULL;
} else {
next = source_stack.back();
source_stack.pop_back();
}
} else {
next = insn->next;
}
// Only serialize "code" and "k". That's all the information we need to
// compare. The rest of the information is encoded in the order of
// instructions.
source.append(reinterpret_cast<const char*>(&insn->code),
sizeof(insn->code));
source.append(reinterpret_cast<const char*>(&insn->k), sizeof(insn->k));
}
// Compile the program
SandboxUnittestHelper::Program bpf;
codegen->Compile(prg, &bpf);
// Serialize the resulting BPF instructions.
std::string assembly;
std::vector<int> assembly_stack;
for (int idx = 0; idx >= 0;) {
SANDBOX_ASSERT(idx < (int)bpf.size());
struct sock_filter& insn = bpf[idx];
if (BPF_CLASS(insn.code) == BPF_JMP) {
if (BPF_OP(insn.code) == BPF_JA) {
// Do not serialize BPF_JA instructions (see above).
idx += insn.k + 1;
continue;
} else {
assembly_stack.push_back(idx + insn.jf + 1);
idx += insn.jt + 1;
}
} else if (BPF_CLASS(insn.code) == BPF_RET) {
if (assembly_stack.empty()) {
idx = -1;
} else {
idx = assembly_stack.back();
assembly_stack.pop_back();
}
} else {
++idx;
}
// Serialize the same information that we serialized before compilation.
assembly.append(reinterpret_cast<char*>(&insn.code), sizeof(insn.code));
assembly.append(reinterpret_cast<char*>(&insn.k), sizeof(insn.k));
}
SANDBOX_ASSERT(source == assembly);
}
SANDBOX_TEST(CodeGen, All) {
ForAllPrograms(CompileAndCompare);
}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,529 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <fcntl.h>
#include <linux/unistd.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <pthread.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/ipc.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/shm.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include "base/posix/eintr_wrapper.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/services/linux_syscalls.h"
using sandbox::ErrorCode;
using sandbox::SandboxBPF;
using sandbox::arch_seccomp_data;
#define ERR EPERM
// We don't expect our sandbox to do anything useful yet. So, we will fail
// almost immediately. For now, force the code to continue running. The
// following line should be removed as soon as the sandbox is starting to
// actually enforce restrictions in a meaningful way:
#define _exit(x) do { } while (0)
namespace {
bool SendFds(int transport, const void *buf, size_t len, ...) {
int count = 0;
va_list ap;
va_start(ap, len);
while (va_arg(ap, int) >= 0) {
++count;
}
va_end(ap);
if (!count) {
return false;
}
char cmsg_buf[CMSG_SPACE(count*sizeof(int))];
memset(cmsg_buf, 0, sizeof(cmsg_buf));
struct iovec iov[2] = { { 0 } };
struct msghdr msg = { 0 };
int dummy = 0;
iov[0].iov_base = &dummy;
iov[0].iov_len = sizeof(dummy);
if (buf && len > 0) {
iov[1].iov_base = const_cast<void *>(buf);
iov[1].iov_len = len;
}
msg.msg_iov = iov;
msg.msg_iovlen = (buf && len > 0) ? 2 : 1;
msg.msg_control = cmsg_buf;
msg.msg_controllen = CMSG_LEN(count*sizeof(int));
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(count*sizeof(int));
va_start(ap, len);
for (int i = 0, fd; (fd = va_arg(ap, int)) >= 0; ++i) {
(reinterpret_cast<int *>(CMSG_DATA(cmsg)))[i] = fd;
}
return sendmsg(transport, &msg, 0) ==
static_cast<ssize_t>(sizeof(dummy) + ((buf && len > 0) ? len : 0));
}
bool GetFds(int transport, void *buf, size_t *len, ...) {
int count = 0;
va_list ap;
va_start(ap, len);
for (int *fd; (fd = va_arg(ap, int *)) != NULL; ++count) {
*fd = -1;
}
va_end(ap);
if (!count) {
return false;
}
char cmsg_buf[CMSG_SPACE(count*sizeof(int))];
memset(cmsg_buf, 0, sizeof(cmsg_buf));
struct iovec iov[2] = { { 0 } };
struct msghdr msg = { 0 };
int err;
iov[0].iov_base = &err;
iov[0].iov_len = sizeof(int);
if (buf && len && *len > 0) {
iov[1].iov_base = buf;
iov[1].iov_len = *len;
}
msg.msg_iov = iov;
msg.msg_iovlen = (buf && len && *len > 0) ? 2 : 1;
msg.msg_control = cmsg_buf;
msg.msg_controllen = CMSG_LEN(count*sizeof(int));
ssize_t bytes = recvmsg(transport, &msg, 0);
if (len) {
*len = bytes > static_cast<int>(sizeof(int)) ? bytes - sizeof(int) : 0;
}
if (bytes != static_cast<ssize_t>(sizeof(int) + iov[1].iov_len)) {
if (bytes >= 0) {
errno = 0;
}
return false;
}
if (err) {
// "err" is the first four bytes of the payload. If these are non-zero,
// the sender on the other side of the socketpair sent us an errno value.
// We don't expect to get any file handles in this case.
errno = err;
return false;
}
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
if ((msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) ||
!cmsg ||
cmsg->cmsg_level != SOL_SOCKET ||
cmsg->cmsg_type != SCM_RIGHTS ||
cmsg->cmsg_len != CMSG_LEN(count*sizeof(int))) {
errno = EBADF;
return false;
}
va_start(ap, len);
for (int *fd, i = 0; (fd = va_arg(ap, int *)) != NULL; ++i) {
*fd = (reinterpret_cast<int *>(CMSG_DATA(cmsg)))[i];
}
va_end(ap);
return true;
}
// POSIX doesn't define any async-signal safe function for converting
// an integer to ASCII. We'll have to define our own version.
// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
// conversion was successful or NULL otherwise. It never writes more than "sz"
// bytes. Output will be truncated as needed, and a NUL character is always
// appended.
char *itoa_r(int i, char *buf, size_t sz) {
// Make sure we can write at least one NUL byte.
size_t n = 1;
if (n > sz) {
return NULL;
}
// Handle negative numbers.
char *start = buf;
int minint = 0;
if (i < 0) {
// Make sure we can write the '-' character.
if (++n > sz) {
*start = '\000';
return NULL;
}
*start++ = '-';
// Turn our number positive.
if (i == -i) {
// The lowest-most negative integer needs special treatment.
minint = 1;
i = -(i + 1);
} else {
// "Normal" negative numbers are easy.
i = -i;
}
}
// Loop until we have converted the entire number. Output at least one
// character (i.e. '0').
char *ptr = start;
do {
// Make sure there is still enough space left in our output buffer.
if (++n > sz) {
buf = NULL;
goto truncate;
}
// Output the next digit and (if necessary) compensate for the lowest-most
// negative integer needing special treatment. This works because, no
// matter the bit width of the integer, the lowest-most integer always ends
// in 2, 4, 6, or 8.
*ptr++ = i%10 + '0' + minint;
minint = 0;
i /= 10;
} while (i);
truncate: // Terminate the output with a NUL character.
*ptr = '\000';
// Conversion to ASCII actually resulted in the digits being in reverse
// order. We can't easily generate them in forward order, as we can't tell
// the number of characters needed until we are done converting.
// So, now, we reverse the string (except for the possible "-" sign).
while (--ptr > start) {
char ch = *ptr;
*ptr = *start;
*start++ = ch;
}
return buf;
}
// This handler gets called, whenever we encounter a system call that we
// don't recognize explicitly. For the purposes of this program, we just
// log the system call and then deny it. More elaborate sandbox policies
// might try to evaluate the system call in user-space, instead.
// The only notable complication is that this function must be async-signal
// safe. This restricts the libary functions that we can call.
intptr_t DefaultHandler(const struct arch_seccomp_data& data, void *) {
static const char msg0[] = "Disallowed system call #";
static const char msg1[] = "\n";
char buf[sizeof(msg0) - 1 + 25 + sizeof(msg1)];
*buf = '\000';
strncat(buf, msg0, sizeof(buf) - 1);
char *ptr = strrchr(buf, '\000');
itoa_r(data.nr, ptr, sizeof(buf) - (ptr - buf));
ptr = strrchr(ptr, '\000');
strncat(ptr, msg1, sizeof(buf) - (ptr - buf));
ptr = strrchr(ptr, '\000');
if (HANDLE_EINTR(write(2, buf, ptr - buf))) { }
return -ERR;
}
ErrorCode Evaluator(SandboxBPF* sandbox, int sysno, void *) {
switch (sysno) {
#if defined(__NR_accept)
case __NR_accept: case __NR_accept4:
#endif
case __NR_alarm:
case __NR_brk:
case __NR_clock_gettime:
case __NR_close:
case __NR_dup: case __NR_dup2:
case __NR_epoll_create: case __NR_epoll_ctl: case __NR_epoll_wait:
case __NR_exit: case __NR_exit_group:
case __NR_fcntl:
#if defined(__NR_fcntl64)
case __NR_fcntl64:
#endif
case __NR_fdatasync:
case __NR_fstat:
#if defined(__NR_fstat64)
case __NR_fstat64:
#endif
case __NR_ftruncate:
case __NR_futex:
case __NR_getdents: case __NR_getdents64:
case __NR_getegid:
#if defined(__NR_getegid32)
case __NR_getegid32:
#endif
case __NR_geteuid:
#if defined(__NR_geteuid32)
case __NR_geteuid32:
#endif
case __NR_getgid:
#if defined(__NR_getgid32)
case __NR_getgid32:
#endif
case __NR_getitimer: case __NR_setitimer:
#if defined(__NR_getpeername)
case __NR_getpeername:
#endif
case __NR_getpid: case __NR_gettid:
#if defined(__NR_getsockname)
case __NR_getsockname:
#endif
case __NR_gettimeofday:
case __NR_getuid:
#if defined(__NR_getuid32)
case __NR_getuid32:
#endif
#if defined(__NR__llseek)
case __NR__llseek:
#endif
case __NR_lseek:
case __NR_nanosleep:
case __NR_pipe: case __NR_pipe2:
case __NR_poll:
case __NR_pread64: case __NR_preadv:
case __NR_pwrite64: case __NR_pwritev:
case __NR_read: case __NR_readv:
case __NR_restart_syscall:
case __NR_set_robust_list:
case __NR_rt_sigaction:
#if defined(__NR_sigaction)
case __NR_sigaction:
#endif
#if defined(__NR_signal)
case __NR_signal:
#endif
case __NR_rt_sigprocmask:
#if defined(__NR_sigprocmask)
case __NR_sigprocmask:
#endif
#if defined(__NR_shutdown)
case __NR_shutdown:
#endif
case __NR_rt_sigreturn:
#if defined(__NR_sigreturn)
case __NR_sigreturn:
#endif
#if defined(__NR_socketpair)
case __NR_socketpair:
#endif
case __NR_time:
case __NR_uname:
case __NR_write: case __NR_writev:
return ErrorCode(ErrorCode::ERR_ALLOWED);
case __NR_prctl:
// Allow PR_SET_DUMPABLE and PR_GET_DUMPABLE. Do not allow anything else.
return sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
PR_SET_DUMPABLE,
ErrorCode(ErrorCode::ERR_ALLOWED),
sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
PR_GET_DUMPABLE,
ErrorCode(ErrorCode::ERR_ALLOWED),
sandbox->Trap(DefaultHandler, NULL)));
// The following system calls are temporarily permitted. This must be
// tightened later. But we currently don't implement enough of the sandboxing
// API to do so.
// As is, this sandbox isn't exactly safe :-/
#if defined(__NR_sendmsg)
case __NR_sendmsg: case __NR_sendto:
case __NR_recvmsg: case __NR_recvfrom:
case __NR_getsockopt: case __NR_setsockopt:
#elif defined(__NR_socketcall)
case __NR_socketcall:
#endif
#if defined(__NR_shmat)
case __NR_shmat: case __NR_shmctl: case __NR_shmdt: case __NR_shmget:
#elif defined(__NR_ipc)
case __NR_ipc:
#endif
#if defined(__NR_mmap2)
case __NR_mmap2:
#else
case __NR_mmap:
#endif
#if defined(__NR_ugetrlimit)
case __NR_ugetrlimit:
#endif
case __NR_getrlimit:
case __NR_ioctl:
case __NR_clone:
case __NR_munmap: case __NR_mprotect: case __NR_madvise:
case __NR_remap_file_pages:
return ErrorCode(ErrorCode::ERR_ALLOWED);
// Everything that isn't explicitly allowed is denied.
default:
return sandbox->Trap(DefaultHandler, NULL);
}
}
void *ThreadFnc(void *arg) {
return arg;
}
void *SendmsgStressThreadFnc(void *arg) {
if (arg) { }
static const int repetitions = 100;
static const int kNumFds = 3;
for (int rep = 0; rep < repetitions; ++rep) {
int fds[2 + kNumFds];
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) {
perror("socketpair()");
_exit(1);
}
size_t len = 4;
char buf[4];
if (!SendFds(fds[0], "test", 4, fds[1], fds[1], fds[1], -1) ||
!GetFds(fds[1], buf, &len, fds+2, fds+3, fds+4, NULL) ||
len != 4 ||
memcmp(buf, "test", len) ||
write(fds[2], "demo", 4) != 4 ||
read(fds[0], buf, 4) != 4 ||
memcmp(buf, "demo", 4)) {
perror("sending/receiving of fds");
_exit(1);
}
for (int i = 0; i < 2+kNumFds; ++i) {
if (close(fds[i])) {
perror("close");
_exit(1);
}
}
}
return NULL;
}
} // namespace
int main(int argc, char *argv[]) {
if (argc) { }
if (argv) { }
int proc_fd = open("/proc", O_RDONLY|O_DIRECTORY);
if (SandboxBPF::SupportsSeccompSandbox(proc_fd) !=
SandboxBPF::STATUS_AVAILABLE) {
perror("sandbox");
_exit(1);
}
SandboxBPF sandbox;
sandbox.set_proc_fd(proc_fd);
sandbox.SetSandboxPolicyDeprecated(Evaluator, NULL);
if (!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)) {
fprintf(stderr, "StartSandbox() failed");
_exit(1);
}
// Check that we can create threads
pthread_t thr;
if (!pthread_create(&thr, NULL, ThreadFnc,
reinterpret_cast<void *>(0x1234))) {
void *ret;
pthread_join(thr, &ret);
if (ret != reinterpret_cast<void *>(0x1234)) {
perror("clone() failed");
_exit(1);
}
} else {
perror("clone() failed");
_exit(1);
}
// Check that we handle restart_syscall() without dieing. This is a little
// tricky to trigger. And I can't think of a good way to verify whether it
// actually executed.
signal(SIGALRM, SIG_IGN);
const struct itimerval tv = { { 0, 0 }, { 0, 5*1000 } };
const struct timespec tmo = { 0, 100*1000*1000 };
setitimer(ITIMER_REAL, &tv, NULL);
nanosleep(&tmo, NULL);
// Check that we can query the size of the stack, but that all other
// calls to getrlimit() fail.
if (((errno = 0), !getrlimit(RLIMIT_STACK, NULL)) || errno != EFAULT ||
((errno = 0), !getrlimit(RLIMIT_CORE, NULL)) || errno != ERR) {
perror("getrlimit()");
_exit(1);
}
// Check that we can query TCGETS and TIOCGWINSZ, but no other ioctls().
if (((errno = 0), !ioctl(2, TCGETS, NULL)) || errno != EFAULT ||
((errno = 0), !ioctl(2, TIOCGWINSZ, NULL)) || errno != EFAULT ||
((errno = 0), !ioctl(2, TCSETS, NULL)) || errno != ERR) {
perror("ioctl()");
_exit(1);
}
// Check that prctl() can manipulate the dumpable flag, but nothing else.
if (((errno = 0), !prctl(PR_GET_DUMPABLE)) || errno ||
((errno = 0), prctl(PR_SET_DUMPABLE, 1)) || errno ||
((errno = 0), !prctl(PR_SET_SECCOMP, 0)) || errno != ERR) {
perror("prctl()");
_exit(1);
}
// Check that we can send and receive file handles.
int fds[3];
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) {
perror("socketpair()");
_exit(1);
}
size_t len = 4;
char buf[4];
if (!SendFds(fds[0], "test", 4, fds[1], -1) ||
!GetFds(fds[1], buf, &len, fds+2, NULL) ||
len != 4 ||
memcmp(buf, "test", len) ||
write(fds[2], "demo", 4) != 4 ||
read(fds[0], buf, 4) != 4 ||
memcmp(buf, "demo", 4) ||
close(fds[0]) ||
close(fds[1]) ||
close(fds[2])) {
perror("sending/receiving of fds");
_exit(1);
}
// Check whether SysV IPC works.
int shmid;
void *addr;
if ((shmid = shmget(IPC_PRIVATE, 4096, IPC_CREAT|0600)) < 0 ||
(addr = shmat(shmid, NULL, 0)) == reinterpret_cast<void *>(-1) ||
shmdt(addr) ||
shmctl(shmid, IPC_RMID, NULL)) {
perror("sysv IPC");
_exit(1);
}
// Print a message so that the user can see the sandbox is activated.
time_t tm = time(NULL);
printf("Sandbox has been started at %s", ctime(&tm));
// Stress-test the sendmsg() code
static const int kSendmsgStressNumThreads = 10;
pthread_t sendmsgStressThreads[kSendmsgStressNumThreads];
for (int i = 0; i < kSendmsgStressNumThreads; ++i) {
if (pthread_create(sendmsgStressThreads + i, NULL,
SendmsgStressThreadFnc, NULL)) {
perror("pthread_create");
_exit(1);
}
}
for (int i = 0; i < kSendmsgStressNumThreads; ++i) {
pthread_join(sendmsgStressThreads[i], NULL);
}
return 0;
}

Просмотреть файл

@ -0,0 +1,85 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <linux/unistd.h>
#include <stdio.h>
#include <sys/prctl.h>
#include <string>
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/syscall.h"
namespace sandbox {
void Die::ExitGroup() {
// exit_group() should exit our program. After all, it is defined as a
// function that doesn't return. But things can theoretically go wrong.
// Especially, since we are dealing with system call filters. Continuing
// execution would be very bad in most cases where ExitGroup() gets called.
// So, we'll try a few other strategies too.
SandboxSyscall(__NR_exit_group, 1);
// We have no idea what our run-time environment looks like. So, signal
// handlers might or might not do the right thing. Try to reset settings
// to a defined state; but we have not way to verify whether we actually
// succeeded in doing so. Nonetheless, triggering a fatal signal could help
// us terminate.
signal(SIGSEGV, SIG_DFL);
SandboxSyscall(__NR_prctl, PR_SET_DUMPABLE, (void*)0, (void*)0, (void*)0);
if (*(volatile char*)0) {
}
// If there is no way for us to ask for the program to exit, the next
// best thing we can do is to loop indefinitely. Maybe, somebody will notice
// and file a bug...
// We in fact retry the system call inside of our loop so that it will
// stand out when somebody tries to diagnose the problem by using "strace".
for (;;) {
SandboxSyscall(__NR_exit_group, 1);
}
}
void Die::SandboxDie(const char* msg, const char* file, int line) {
if (simple_exit_) {
LogToStderr(msg, file, line);
} else {
logging::LogMessage(file, line, logging::LOG_FATAL).stream() << msg;
}
ExitGroup();
}
void Die::RawSandboxDie(const char* msg) {
if (!msg)
msg = "";
RAW_LOG(FATAL, msg);
ExitGroup();
}
void Die::SandboxInfo(const char* msg, const char* file, int line) {
if (!suppress_info_) {
logging::LogMessage(file, line, logging::LOG_INFO).stream() << msg;
}
}
void Die::LogToStderr(const char* msg, const char* file, int line) {
if (msg) {
char buf[40];
snprintf(buf, sizeof(buf), "%d", line);
std::string s = std::string(file) + ":" + buf + ":" + msg + "\n";
// No need to loop. Short write()s are unlikely and if they happen we
// probably prefer them over a loop that blocks.
ignore_result(
HANDLE_EINTR(SandboxSyscall(__NR_write, 2, s.c_str(), s.length())));
}
}
bool Die::simple_exit_ = false;
bool Die::suppress_info_ = false;
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,68 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
#define SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
#include "base/basictypes.h"
#include "sandbox/linux/sandbox_export.h"
namespace sandbox {
// This is the main API for using this file. Prints a error message and
// exits with a fatal error. This is not async-signal safe.
#define SANDBOX_DIE(m) sandbox::Die::SandboxDie(m, __FILE__, __LINE__)
// An async signal safe version of the same API. Won't print the filename
// and line numbers.
#define RAW_SANDBOX_DIE(m) sandbox::Die::RawSandboxDie(m)
// Adds an informational message to the log file or stderr as appropriate.
#define SANDBOX_INFO(m) sandbox::Die::SandboxInfo(m, __FILE__, __LINE__)
class SANDBOX_EXPORT Die {
public:
// Terminate the program, even if the current sandbox policy prevents some
// of the more commonly used functions used for exiting.
// Most users would want to call SANDBOX_DIE() instead, as it logs extra
// information. But calling ExitGroup() is correct and in some rare cases
// preferable. So, we make it part of the public API.
static void ExitGroup() __attribute__((noreturn));
// This method gets called by SANDBOX_DIE(). There is normally no reason
// to call it directly unless you are defining your own exiting macro.
static void SandboxDie(const char* msg, const char* file, int line)
__attribute__((noreturn));
static void RawSandboxDie(const char* msg) __attribute__((noreturn));
// This method gets called by SANDBOX_INFO(). There is normally no reason
// to call it directly unless you are defining your own logging macro.
static void SandboxInfo(const char* msg, const char* file, int line);
// Writes a message to stderr. Used as a fall-back choice, if we don't have
// any other way to report an error.
static void LogToStderr(const char* msg, const char* file, int line);
// We generally want to run all exit handlers. This means, on SANDBOX_DIE()
// we should be calling LOG(FATAL). But there are some situations where
// we just need to print a message and then terminate. This would typically
// happen in cases where we consume the error message internally (e.g. in
// unit tests or in the supportsSeccompSandbox() method).
static void EnableSimpleExit() { simple_exit_ = true; }
// Sometimes we need to disable all informational messages (e.g. from within
// unittests).
static void SuppressInfoMessages(bool flag) { suppress_info_ = flag; }
private:
static bool simple_exit_;
static bool suppress_info_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Die);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_DIE_H__

Просмотреть файл

@ -0,0 +1,104 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/die.h"
#include "sandbox/linux/seccomp-bpf/errorcode.h"
namespace sandbox {
ErrorCode::ErrorCode(int err) {
switch (err) {
case ERR_ALLOWED:
err_ = SECCOMP_RET_ALLOW;
error_type_ = ET_SIMPLE;
break;
case ERR_MIN_ERRNO... ERR_MAX_ERRNO:
err_ = SECCOMP_RET_ERRNO + err;
error_type_ = ET_SIMPLE;
break;
default:
SANDBOX_DIE("Invalid use of ErrorCode object");
}
}
ErrorCode::ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id)
: error_type_(ET_TRAP),
fnc_(fnc),
aux_(const_cast<void*>(aux)),
safe_(safe),
err_(SECCOMP_RET_TRAP + id) {}
ErrorCode::ErrorCode(int argno,
ArgType width,
Operation op,
uint64_t value,
const ErrorCode* passed,
const ErrorCode* failed)
: error_type_(ET_COND),
value_(value),
argno_(argno),
width_(width),
op_(op),
passed_(passed),
failed_(failed),
err_(SECCOMP_RET_INVALID) {
if (op < 0 || op >= OP_NUM_OPS) {
SANDBOX_DIE("Invalid opcode in BPF sandbox rules");
}
}
bool ErrorCode::Equals(const ErrorCode& err) const {
if (error_type_ == ET_INVALID || err.error_type_ == ET_INVALID) {
SANDBOX_DIE("Dereferencing invalid ErrorCode");
}
if (error_type_ != err.error_type_) {
return false;
}
if (error_type_ == ET_SIMPLE || error_type_ == ET_TRAP) {
return err_ == err.err_;
} else if (error_type_ == ET_COND) {
return value_ == err.value_ && argno_ == err.argno_ &&
width_ == err.width_ && op_ == err.op_ &&
passed_->Equals(*err.passed_) && failed_->Equals(*err.failed_);
} else {
SANDBOX_DIE("Corrupted ErrorCode");
}
}
bool ErrorCode::LessThan(const ErrorCode& err) const {
// Implementing a "LessThan()" operator allows us to use ErrorCode objects
// as keys in STL containers; most notably, it also allows us to put them
// into std::set<>. Actual ordering is not important as long as it is
// deterministic.
if (error_type_ == ET_INVALID || err.error_type_ == ET_INVALID) {
SANDBOX_DIE("Dereferencing invalid ErrorCode");
}
if (error_type_ != err.error_type_) {
return error_type_ < err.error_type_;
} else {
if (error_type_ == ET_SIMPLE || error_type_ == ET_TRAP) {
return err_ < err.err_;
} else if (error_type_ == ET_COND) {
if (value_ != err.value_) {
return value_ < err.value_;
} else if (argno_ != err.argno_) {
return argno_ < err.argno_;
} else if (width_ != err.width_) {
return width_ < err.width_;
} else if (op_ != err.op_) {
return op_ < err.op_;
} else if (!passed_->Equals(*err.passed_)) {
return passed_->LessThan(*err.passed_);
} else if (!failed_->Equals(*err.failed_)) {
return failed_->LessThan(*err.failed_);
} else {
return false;
}
} else {
SANDBOX_DIE("Corrupted ErrorCode");
}
}
}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,198 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__
#define SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__
#include "sandbox/linux/sandbox_export.h"
#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
#include "sandbox/linux/seccomp-bpf/trap.h"
namespace sandbox {
struct arch_seccomp_data;
// This class holds all the possible values that can be returned by a sandbox
// policy.
// We can either wrap a symbolic ErrorCode (i.e. ERR_XXX enum values), an
// errno value (in the range 0..4095), a pointer to a TrapFnc callback
// handling a SECCOMP_RET_TRAP trap, or a complex constraint.
// All of the commonly used values are stored in the "err_" field. So, code
// that is using the ErrorCode class typically operates on a single 32bit
// field.
class SANDBOX_EXPORT ErrorCode {
public:
enum {
// Allow this system call. The value of ERR_ALLOWED is pretty much
// completely arbitrary. But we want to pick it so that is is unlikely
// to be passed in accidentally, when the user intended to return an
// "errno" (see below) value instead.
ERR_ALLOWED = 0x04000000,
// Deny the system call with a particular "errno" value.
// N.B.: It is also possible to return "0" here. That would normally
// indicate success, but it won't actually run the system call.
// This is very different from return ERR_ALLOWED.
ERR_MIN_ERRNO = 0,
// TODO(markus): Android only supports errno up to 255
// (crbug.com/181647).
ERR_MAX_ERRNO = 4095,
};
// While BPF filter programs always operate on 32bit quantities, the kernel
// always sees system call arguments as 64bit values. This statement is true
// no matter whether the host system is natively operating in 32bit or 64bit.
// The BPF compiler hides the fact that BPF instructions cannot directly
// access 64bit quantities. But policies are still advised to specify whether
// a system call expects a 32bit or a 64bit quantity.
enum ArgType {
// When passed as an argument to SandboxBPF::Cond(), TP_32BIT requests that
// the conditional test should operate on the 32bit part of the system call
// argument.
// On 64bit architectures, this verifies that user space did not pass
// a 64bit value as an argument to the system call. If it did, that will be
// interpreted as an attempt at breaking the sandbox and results in the
// program getting terminated.
// In other words, only perform a 32bit test, if you are sure this
// particular system call would never legitimately take a 64bit
// argument.
// Implementation detail: TP_32BIT does two things. 1) it restricts the
// conditional test to operating on the LSB only, and 2) it adds code to
// the BPF filter program verifying that the MSB the kernel received from
// user space is either 0, or 0xFFFFFFFF; the latter is acceptable, iff bit
// 31 was set in the system call argument. It deals with 32bit arguments
// having been sign extended.
TP_32BIT,
// When passed as an argument to SandboxBPF::Cond(), TP_64BIT requests that
// the conditional test should operate on the full 64bit argument. It is
// generally harmless to perform a 64bit test on 32bit systems, as the
// kernel will always see the top 32 bits of all arguments as zero'd out.
// This approach has the desirable property that for tests of pointer
// values, we can always use TP_64BIT no matter the host architecture.
// But of course, that also means, it is possible to write conditional
// policies that turn into no-ops on 32bit systems; this is by design.
TP_64BIT,
};
enum Operation {
// Test whether the system call argument is equal to the operand.
OP_EQUAL,
// Test whether the system call argument is greater (or equal) to the
// operand. Please note that all tests always operate on unsigned
// values. You can generally emulate signed tests, if that's what you
// need.
// TODO(markus): Check whether we should automatically emulate signed
// operations.
OP_GREATER_UNSIGNED,
OP_GREATER_EQUAL_UNSIGNED,
// Tests a system call argument against a bit mask.
// The "ALL_BITS" variant performs this test: "arg & mask == mask"
// This implies that a mask of zero always results in a passing test.
// The "ANY_BITS" variant performs this test: "arg & mask != 0"
// This implies that a mask of zero always results in a failing test.
OP_HAS_ALL_BITS,
OP_HAS_ANY_BITS,
// Total number of operations.
OP_NUM_OPS,
};
enum ErrorType {
ET_INVALID,
ET_SIMPLE,
ET_TRAP,
ET_COND,
};
// We allow the default constructor, as it makes the ErrorCode class
// much easier to use. But if we ever encounter an invalid ErrorCode
// when compiling a BPF filter, we deliberately generate an invalid
// program that will get flagged both by our Verifier class and by
// the Linux kernel.
ErrorCode() : error_type_(ET_INVALID), err_(SECCOMP_RET_INVALID) {}
explicit ErrorCode(int err);
// For all practical purposes, ErrorCodes are treated as if they were
// structs. The copy constructor and assignment operator are trivial and
// we do not need to explicitly specify them.
// Most notably, it is in fact perfectly OK to directly copy the passed_ and
// failed_ field. They only ever get set by our private constructor, and the
// callers handle life-cycle management for these objects.
// Destructor
~ErrorCode() {}
bool Equals(const ErrorCode& err) const;
bool LessThan(const ErrorCode& err) const;
uint32_t err() const { return err_; }
ErrorType error_type() const { return error_type_; }
bool safe() const { return safe_; }
uint64_t value() const { return value_; }
int argno() const { return argno_; }
ArgType width() const { return width_; }
Operation op() const { return op_; }
const ErrorCode* passed() const { return passed_; }
const ErrorCode* failed() const { return failed_; }
struct LessThan {
bool operator()(const ErrorCode& a, const ErrorCode& b) const {
return a.LessThan(b);
}
};
private:
friend class CodeGen;
friend class SandboxBPF;
friend class Trap;
// If we are wrapping a callback, we must assign a unique id. This id is
// how the kernel tells us which one of our different SECCOMP_RET_TRAP
// cases has been triggered.
ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id);
// Some system calls require inspection of arguments. This constructor
// allows us to specify additional constraints.
ErrorCode(int argno,
ArgType width,
Operation op,
uint64_t value,
const ErrorCode* passed,
const ErrorCode* failed);
ErrorType error_type_;
union {
// Fields needed for SECCOMP_RET_TRAP callbacks
struct {
Trap::TrapFnc fnc_; // Callback function and arg, if trap was
void* aux_; // triggered by the kernel's BPF filter.
bool safe_; // Keep sandbox active while calling fnc_()
};
// Fields needed when inspecting additional arguments.
struct {
uint64_t value_; // Value that we are comparing with.
int argno_; // Syscall arg number that we are inspecting.
ArgType width_; // Whether we are looking at a 32/64bit value.
Operation op_; // Comparison operation.
const ErrorCode* passed_; // Value to be returned if comparison passed,
const ErrorCode* failed_; // or if it failed.
};
};
// 32bit field used for all possible types of ErrorCode values. This is
// the value that uniquely identifies any ErrorCode and it (typically) can
// be emitted directly into a BPF filter program.
uint32_t err_;
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__

Просмотреть файл

@ -0,0 +1,85 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/tests/unit_tests.h"
namespace sandbox {
namespace {
SANDBOX_TEST(ErrorCode, ErrnoConstructor) {
ErrorCode e0;
SANDBOX_ASSERT(e0.err() == SECCOMP_RET_INVALID);
ErrorCode e1(ErrorCode::ERR_ALLOWED);
SANDBOX_ASSERT(e1.err() == SECCOMP_RET_ALLOW);
ErrorCode e2(EPERM);
SANDBOX_ASSERT(e2.err() == SECCOMP_RET_ERRNO + EPERM);
SandboxBPF sandbox;
ErrorCode e3 = sandbox.Trap(NULL, NULL);
SANDBOX_ASSERT((e3.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP);
}
SANDBOX_TEST(ErrorCode, Trap) {
SandboxBPF sandbox;
ErrorCode e0 = sandbox.Trap(NULL, "a");
ErrorCode e1 = sandbox.Trap(NULL, "b");
SANDBOX_ASSERT((e0.err() & SECCOMP_RET_DATA) + 1 ==
(e1.err() & SECCOMP_RET_DATA));
ErrorCode e2 = sandbox.Trap(NULL, "a");
SANDBOX_ASSERT((e0.err() & SECCOMP_RET_DATA) ==
(e2.err() & SECCOMP_RET_DATA));
}
SANDBOX_TEST(ErrorCode, Equals) {
ErrorCode e1(ErrorCode::ERR_ALLOWED);
ErrorCode e2(ErrorCode::ERR_ALLOWED);
SANDBOX_ASSERT(e1.Equals(e1));
SANDBOX_ASSERT(e1.Equals(e2));
SANDBOX_ASSERT(e2.Equals(e1));
ErrorCode e3(EPERM);
SANDBOX_ASSERT(!e1.Equals(e3));
SandboxBPF sandbox;
ErrorCode e4 = sandbox.Trap(NULL, "a");
ErrorCode e5 = sandbox.Trap(NULL, "b");
ErrorCode e6 = sandbox.Trap(NULL, "a");
SANDBOX_ASSERT(!e1.Equals(e4));
SANDBOX_ASSERT(!e3.Equals(e4));
SANDBOX_ASSERT(!e5.Equals(e4));
SANDBOX_ASSERT( e6.Equals(e4));
}
SANDBOX_TEST(ErrorCode, LessThan) {
ErrorCode e1(ErrorCode::ERR_ALLOWED);
ErrorCode e2(ErrorCode::ERR_ALLOWED);
SANDBOX_ASSERT(!e1.LessThan(e1));
SANDBOX_ASSERT(!e1.LessThan(e2));
SANDBOX_ASSERT(!e2.LessThan(e1));
ErrorCode e3(EPERM);
SANDBOX_ASSERT(!e1.LessThan(e3));
SANDBOX_ASSERT( e3.LessThan(e1));
SandboxBPF sandbox;
ErrorCode e4 = sandbox.Trap(NULL, "a");
ErrorCode e5 = sandbox.Trap(NULL, "b");
ErrorCode e6 = sandbox.Trap(NULL, "a");
SANDBOX_ASSERT(e1.LessThan(e4));
SANDBOX_ASSERT(e3.LessThan(e4));
SANDBOX_ASSERT(e4.LessThan(e5));
SANDBOX_ASSERT(!e4.LessThan(e6));
SANDBOX_ASSERT(!e6.LessThan(e4));
}
} // namespace
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,62 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__
#define SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__
#include <stdint.h>
namespace sandbox {
// The fields in this structure have the same meaning as the corresponding
// fields in "struct sock_filter". See <linux/filter.h> for a lot more
// detail.
// code -- Opcode of the instruction. This is typically a bitwise
// combination BPF_XXX values.
// k -- Operand; BPF instructions take zero or one operands. Operands
// are 32bit-wide constants, if present. They can be immediate
// values (if BPF_K is present in "code_"), addresses (if BPF_ABS
// is present in "code_"), or relative jump offsets (if BPF_JMP
// and BPF_JA are present in "code_").
// jt, jf -- all conditional jumps have a 8bit-wide jump offset that allows
// jumps of up to 256 instructions forward. Conditional jumps are
// identified by BPF_JMP in "code_", but the lack of BPF_JA.
// Conditional jumps have a "t"rue and "f"alse branch.
struct Instruction {
// Constructor for an non-jumping instruction or for an unconditional
// "always" jump.
Instruction(uint16_t c, uint32_t parm, Instruction* n)
: code(c), next(n), k(parm) {}
// Constructor for a conditional jump instruction.
Instruction(uint16_t c, uint32_t parm, Instruction* jt, Instruction* jf)
: code(c), jt_ptr(jt), jf_ptr(jf), k(parm) {}
uint16_t code;
union {
// When code generation is complete, we will have computed relative
// branch targets that are in the range 0..255.
struct {
uint8_t jt, jf;
};
// While assembling the BPF program, we use pointers for branch targets.
// Once we have computed basic blocks, these pointers will be entered as
// keys in a TargetsToBlocks map and should no longer be dereferenced
// directly.
struct {
Instruction* jt_ptr, *jf_ptr;
};
// While assembling the BPF program, non-jumping instructions are linked
// by the "next_" pointer. This field is no longer needed when we have
// computed basic blocks.
Instruction* next;
};
uint32_t k;
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__

Просмотреть файл

@ -0,0 +1,197 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__
#define SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__
// The Seccomp2 kernel ABI is not part of older versions of glibc.
// As we can't break compilation with these versions of the library,
// we explicitly define all missing symbols.
// If we ever decide that we can now rely on system headers, the following
// include files should be enabled:
// #include <linux/audit.h>
// #include <linux/seccomp.h>
#include <asm/unistd.h>
#include <linux/filter.h>
// For audit.h
#ifndef EM_ARM
#define EM_ARM 40
#endif
#ifndef EM_386
#define EM_386 3
#endif
#ifndef EM_X86_64
#define EM_X86_64 62
#endif
#ifndef __AUDIT_ARCH_64BIT
#define __AUDIT_ARCH_64BIT 0x80000000
#endif
#ifndef __AUDIT_ARCH_LE
#define __AUDIT_ARCH_LE 0x40000000
#endif
#ifndef AUDIT_ARCH_ARM
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#endif
#ifndef AUDIT_ARCH_I386
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#endif
#ifndef AUDIT_ARCH_X86_64
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#endif
// For prctl.h
#ifndef PR_SET_SECCOMP
#define PR_SET_SECCOMP 22
#define PR_GET_SECCOMP 21
#endif
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
#endif
#ifndef IPC_64
#define IPC_64 0x0100
#endif
#ifndef BPF_MOD
#define BPF_MOD 0x90
#endif
#ifndef BPF_XOR
#define BPF_XOR 0xA0
#endif
// In order to build will older tool chains, we currently have to avoid
// including <linux/seccomp.h>. Until that can be fixed (if ever). Rely on
// our own definitions of the seccomp kernel ABI.
#ifndef SECCOMP_MODE_FILTER
#define SECCOMP_MODE_DISABLED 0
#define SECCOMP_MODE_STRICT 1
#define SECCOMP_MODE_FILTER 2 // User user-supplied filter
#endif
#ifndef SECCOMP_RET_KILL
// Return values supported for BPF filter programs. Please note that the
// "illegal" SECCOMP_RET_INVALID is not supported by the kernel, should only
// ever be used internally, and would result in the kernel killing our process.
#define SECCOMP_RET_KILL 0x00000000U // Kill the task immediately
#define SECCOMP_RET_INVALID 0x00010000U // Illegal return value
#define SECCOMP_RET_TRAP 0x00030000U // Disallow and force a SIGSYS
#define SECCOMP_RET_ERRNO 0x00050000U // Returns an errno
#define SECCOMP_RET_TRACE 0x7ff00000U // Pass to a tracer or disallow
#define SECCOMP_RET_ALLOW 0x7fff0000U // Allow
#define SECCOMP_RET_ACTION 0xffff0000U // Masks for the return value
#define SECCOMP_RET_DATA 0x0000ffffU // sections
#else
#define SECCOMP_RET_INVALID 0x00010000U // Illegal return value
#endif
#ifndef SYS_SECCOMP
#define SYS_SECCOMP 1
#endif
// Impose some reasonable maximum BPF program size. Realistically, the
// kernel probably has much lower limits. But by limiting to less than
// 30 bits, we can ease requirements on some of our data types.
#define SECCOMP_MAX_PROGRAM_SIZE (1<<30)
#if defined(__i386__)
#define MIN_SYSCALL 0u
#define MAX_PUBLIC_SYSCALL 1024u
#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
#define SECCOMP_ARCH AUDIT_ARCH_I386
#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, REG_EAX)
#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, REG_EAX)
#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, REG_EIP)
#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, REG_EBX)
#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, REG_ECX)
#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, REG_EDX)
#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, REG_ESI)
#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, REG_EDI)
#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, REG_EBP)
#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 4)
#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 0)
#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 4)
#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 0)
#elif defined(__x86_64__)
#define MIN_SYSCALL 0u
#define MAX_PUBLIC_SYSCALL 1024u
#define MAX_SYSCALL MAX_PUBLIC_SYSCALL
#define SECCOMP_ARCH AUDIT_ARCH_X86_64
#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, REG_RAX)
#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, REG_RAX)
#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, REG_RIP)
#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, REG_RDI)
#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, REG_RSI)
#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, REG_RDX)
#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, REG_R10)
#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, REG_R8)
#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, REG_R9)
#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 4)
#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 0)
#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 4)
#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 0)
#elif defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
// ARM EABI includes "ARM private" system calls starting at |__ARM_NR_BASE|,
// and a "ghost syscall private to the kernel", cmpxchg,
// at |__ARM_NR_BASE+0x00fff0|.
// See </arch/arm/include/asm/unistd.h> in the Linux kernel.
#define MIN_SYSCALL ((unsigned int)__NR_SYSCALL_BASE)
#define MAX_PUBLIC_SYSCALL (MIN_SYSCALL + 1024u)
#define MIN_PRIVATE_SYSCALL ((unsigned int)__ARM_NR_BASE)
#define MAX_PRIVATE_SYSCALL (MIN_PRIVATE_SYSCALL + 16u)
#define MIN_GHOST_SYSCALL ((unsigned int)__ARM_NR_BASE + 0xfff0u)
#define MAX_SYSCALL (MIN_GHOST_SYSCALL + 4u)
#define SECCOMP_ARCH AUDIT_ARCH_ARM
// ARM sigcontext_t is different from i386/x86_64.
// See </arch/arm/include/asm/sigcontext.h> in the Linux kernel.
#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.arm_##_reg)
// ARM EABI syscall convention.
#define SECCOMP_RESULT(_ctx) SECCOMP_REG(_ctx, r0)
#define SECCOMP_SYSCALL(_ctx) SECCOMP_REG(_ctx, r7)
#define SECCOMP_IP(_ctx) SECCOMP_REG(_ctx, pc)
#define SECCOMP_PARM1(_ctx) SECCOMP_REG(_ctx, r0)
#define SECCOMP_PARM2(_ctx) SECCOMP_REG(_ctx, r1)
#define SECCOMP_PARM3(_ctx) SECCOMP_REG(_ctx, r2)
#define SECCOMP_PARM4(_ctx) SECCOMP_REG(_ctx, r3)
#define SECCOMP_PARM5(_ctx) SECCOMP_REG(_ctx, r4)
#define SECCOMP_PARM6(_ctx) SECCOMP_REG(_ctx, r5)
#define SECCOMP_NR_IDX (offsetof(struct arch_seccomp_data, nr))
#define SECCOMP_ARCH_IDX (offsetof(struct arch_seccomp_data, arch))
#define SECCOMP_IP_MSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 4)
#define SECCOMP_IP_LSB_IDX (offsetof(struct arch_seccomp_data, \
instruction_pointer) + 0)
#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 4)
#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) + \
8*(nr) + 0)
#else
#error Unsupported target platform
#endif
#endif // SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,292 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__
#define SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__
#include <stddef.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <algorithm>
#include <limits>
#include <map>
#include <set>
#include <utility>
#include <vector>
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "sandbox/linux/sandbox_export.h"
#include "sandbox/linux/seccomp-bpf/die.h"
#include "sandbox/linux/seccomp-bpf/errorcode.h"
#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
namespace sandbox {
struct arch_seccomp_data {
int nr;
uint32_t arch;
uint64_t instruction_pointer;
uint64_t args[6];
};
struct arch_sigsys {
void* ip;
int nr;
unsigned int arch;
};
class CodeGen;
class SandboxBPFPolicy;
class SandboxUnittestHelper;
struct Instruction;
class SANDBOX_EXPORT SandboxBPF {
public:
enum SandboxStatus {
STATUS_UNKNOWN, // Status prior to calling supportsSeccompSandbox()
STATUS_UNSUPPORTED, // The kernel does not appear to support sandboxing
STATUS_UNAVAILABLE, // Currently unavailable but might work again later
STATUS_AVAILABLE, // Sandboxing is available but not currently active
STATUS_ENABLED // The sandbox is now active
};
// Depending on the level of kernel support, seccomp-bpf may require the
// process to be single-threaded in order to enable it. When calling
// StartSandbox(), the program should indicate whether or not the sandbox
// should try and engage with multi-thread support.
enum SandboxThreadState {
PROCESS_INVALID,
PROCESS_SINGLE_THREADED, // The program is currently single-threaded.
// Note: PROCESS_MULTI_THREADED requires experimental kernel support that
// has not been contributed to upstream Linux.
PROCESS_MULTI_THREADED, // The program may be multi-threaded.
};
// When calling setSandboxPolicy(), the caller can provide an arbitrary
// pointer in |aux|. This pointer will then be forwarded to the sandbox
// policy each time a call is made through an EvaluateSyscall function
// pointer. One common use case would be to pass the "aux" pointer as an
// argument to Trap() functions.
typedef ErrorCode (*EvaluateSyscall)(SandboxBPF* sandbox_compiler,
int system_call_number,
void* aux);
typedef std::vector<std::pair<EvaluateSyscall, void*> > Evaluators;
// A vector of BPF instructions that need to be installed as a filter
// program in the kernel.
typedef std::vector<struct sock_filter> Program;
// Constructors and destructors.
// NOTE: Setting a policy and starting the sandbox is a one-way operation.
// The kernel does not provide any option for unloading a loaded
// sandbox. Strictly speaking, that means we should disallow calling
// the destructor, if StartSandbox() has ever been called. In practice,
// this makes it needlessly complicated to operate on "Sandbox"
// objects. So, we instead opted to allow object destruction. But it
// should be noted that during its lifetime, the object probably made
// irreversible state changes to the runtime environment. These changes
// stay in effect even after the destructor has been run.
SandboxBPF();
~SandboxBPF();
// Checks whether a particular system call number is valid on the current
// architecture. E.g. on ARM there's a non-contiguous range of private
// system calls.
static bool IsValidSyscallNumber(int sysnum);
// There are a lot of reasons why the Seccomp sandbox might not be available.
// This could be because the kernel does not support Seccomp mode, or it
// could be because another sandbox is already active.
// "proc_fd" should be a file descriptor for "/proc", or -1 if not
// provided by the caller.
static SandboxStatus SupportsSeccompSandbox(int proc_fd);
// The sandbox needs to be able to access files in "/proc/self". If this
// directory is not accessible when "startSandbox()" gets called, the caller
// can provide an already opened file descriptor by calling "set_proc_fd()".
// The sandbox becomes the new owner of this file descriptor and will
// eventually close it when "StartSandbox()" executes.
void set_proc_fd(int proc_fd);
// The system call evaluator function is called with the system
// call number. It can decide to allow the system call unconditionally
// by returning ERR_ALLOWED; it can deny the system call unconditionally by
// returning an appropriate "errno" value; or it can request inspection
// of system call argument(s) by returning a suitable ErrorCode.
// The "aux" parameter can be used to pass optional data to the system call
// evaluator. There are different possible uses for this data, but one of the
// use cases would be for the policy to then forward this pointer to a Trap()
// handler. In this case, of course, the data that is pointed to must remain
// valid for the entire time that Trap() handlers can be called; typically,
// this would be the lifetime of the program.
// DEPRECATED: use the policy interface below.
void SetSandboxPolicyDeprecated(EvaluateSyscall syscallEvaluator, void* aux);
// Set the BPF policy as |policy|. Ownership of |policy| is transfered here
// to the sandbox object.
void SetSandboxPolicy(SandboxBPFPolicy* policy);
// We can use ErrorCode to request calling of a trap handler. This method
// performs the required wrapping of the callback function into an
// ErrorCode object.
// The "aux" field can carry a pointer to arbitrary data. See EvaluateSyscall
// for a description of how to pass data from SetSandboxPolicy() to a Trap()
// handler.
ErrorCode Trap(Trap::TrapFnc fnc, const void* aux);
// Calls a user-space trap handler and disables all sandboxing for system
// calls made from this trap handler.
// This feature is available only if explicitly enabled by the user having
// set the CHROME_SANDBOX_DEBUGGING environment variable.
// Returns an ET_INVALID ErrorCode, if called when not enabled.
// NOTE: This feature, by definition, disables all security features of
// the sandbox. It should never be used in production, but it can be
// very useful to diagnose code that is incompatible with the sandbox.
// If even a single system call returns "UnsafeTrap", the security of
// entire sandbox should be considered compromised.
ErrorCode UnsafeTrap(Trap::TrapFnc fnc, const void* aux);
// From within an UnsafeTrap() it is often useful to be able to execute
// the system call that triggered the trap. The ForwardSyscall() method
// makes this easy. It is more efficient than calling glibc's syscall()
// function, as it avoid the extra round-trip to the signal handler. And
// it automatically does the correct thing to report kernel-style error
// conditions, rather than setting errno. See the comments for TrapFnc for
// details. In other words, the return value from ForwardSyscall() is
// directly suitable as a return value for a trap handler.
static intptr_t ForwardSyscall(const struct arch_seccomp_data& args);
// We can also use ErrorCode to request evaluation of a conditional
// statement based on inspection of system call parameters.
// This method wrap an ErrorCode object around the conditional statement.
// Argument "argno" (1..6) will be compared to "value" using comparator
// "op". If the condition is true "passed" will be returned, otherwise
// "failed".
// If "is32bit" is set, the argument must in the range of 0x0..(1u << 32 - 1)
// If it is outside this range, the sandbox treats the system call just
// the same as any other ABI violation (i.e. it aborts with an error
// message).
ErrorCode Cond(int argno,
ErrorCode::ArgType is_32bit,
ErrorCode::Operation op,
uint64_t value,
const ErrorCode& passed,
const ErrorCode& failed);
// Kill the program and print an error message.
ErrorCode Kill(const char* msg);
// This is the main public entry point. It finds all system calls that
// need rewriting, sets up the resources needed by the sandbox, and
// enters Seccomp mode.
// The calling process must specify its current SandboxThreadState, as a way
// to tell the sandbox which type of kernel support it should engage.
// It is possible to stack multiple sandboxes by creating separate "Sandbox"
// objects and calling "StartSandbox()" on each of them. Please note, that
// this requires special care, though, as newly stacked sandboxes can never
// relax restrictions imposed by earlier sandboxes. Furthermore, installing
// a new policy requires making system calls, that might already be
// disallowed.
// Finally, stacking does add more kernel overhead than having a single
// combined policy. So, it should only be used if there are no alternatives.
bool StartSandbox(SandboxThreadState thread_state) WARN_UNUSED_RESULT;
// Assembles a BPF filter program from the current policy. After calling this
// function, you must not call any other sandboxing function.
// Typically, AssembleFilter() is only used by unit tests and by sandbox
// internals. It should not be used by production code.
// For performance reasons, we normally only run the assembled BPF program
// through the verifier, iff the program was built in debug mode.
// But by setting "force_verification", the caller can request that the
// verifier is run unconditionally. This is useful for unittests.
Program* AssembleFilter(bool force_verification);
// Returns the fatal ErrorCode that is used to indicate that somebody
// attempted to pass a 64bit value in a 32bit system call argument.
// This method is primarily needed for testing purposes.
ErrorCode Unexpected64bitArgument();
private:
friend class CodeGen;
friend class SandboxUnittestHelper;
friend class ErrorCode;
struct Range {
Range(uint32_t f, uint32_t t, const ErrorCode& e)
: from(f), to(t), err(e) {}
uint32_t from, to;
ErrorCode err;
};
typedef std::vector<Range> Ranges;
typedef std::map<uint32_t, ErrorCode> ErrMap;
typedef std::set<ErrorCode, struct ErrorCode::LessThan> Conds;
// Get a file descriptor pointing to "/proc", if currently available.
int proc_fd() { return proc_fd_; }
// Creates a subprocess and runs "code_in_sandbox" inside of the specified
// policy. The caller has to make sure that "this" has not yet been
// initialized with any other policies.
bool RunFunctionInPolicy(void (*code_in_sandbox)(),
EvaluateSyscall syscall_evaluator,
void* aux);
// Performs a couple of sanity checks to verify that the kernel supports the
// features that we need for successful sandboxing.
// The caller has to make sure that "this" has not yet been initialized with
// any other policies.
bool KernelSupportSeccompBPF();
// Verify that the current policy passes some basic sanity checks.
void PolicySanityChecks(SandboxBPFPolicy* policy);
// Assembles and installs a filter based on the policy that has previously
// been configured with SetSandboxPolicy().
void InstallFilter(SandboxThreadState thread_state);
// Verify the correctness of a compiled program by comparing it against the
// current policy. This function should only ever be called by unit tests and
// by the sandbox internals. It should not be used by production code.
void VerifyProgram(const Program& program, bool has_unsafe_traps);
// Finds all the ranges of system calls that need to be handled. Ranges are
// sorted in ascending order of system call numbers. There are no gaps in the
// ranges. System calls with identical ErrorCodes are coalesced into a single
// range.
void FindRanges(Ranges* ranges);
// Returns a BPF program snippet that implements a jump table for the
// given range of system call numbers. This function runs recursively.
Instruction* AssembleJumpTable(CodeGen* gen,
Ranges::const_iterator start,
Ranges::const_iterator stop);
// Returns a BPF program snippet that makes the BPF filter program exit
// with the given ErrorCode "err". N.B. the ErrorCode may very well be a
// conditional expression; if so, this function will recursively call
// CondExpression() and possibly RetExpression() to build a complex set of
// instructions.
Instruction* RetExpression(CodeGen* gen, const ErrorCode& err);
// Returns a BPF program that evaluates the conditional expression in
// "cond" and returns the appropriate value from the BPF filter program.
// This function recursively calls RetExpression(); it should only ever be
// called from RetExpression().
Instruction* CondExpression(CodeGen* gen, const ErrorCode& cond);
static SandboxStatus status_;
bool quiet_;
int proc_fd_;
scoped_ptr<const SandboxBPFPolicy> policy_;
Conds* conds_;
bool sandbox_has_started_;
DISALLOW_COPY_AND_ASSIGN(SandboxBPF);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__

Просмотреть файл

@ -0,0 +1,35 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_
#define SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_
#include "base/basictypes.h"
namespace sandbox {
class ErrorCode;
class SandboxBPF;
// This is the interface to implement to define a BPF sandbox policy.
class SandboxBPFPolicy {
public:
SandboxBPFPolicy() {}
virtual ~SandboxBPFPolicy() {}
// The EvaluateSyscall method is called with the system call number. It can
// decide to allow the system call unconditionally by returning ERR_ALLOWED;
// it can deny the system call unconditionally by returning an appropriate
// "errno" value; or it can request inspection of system call argument(s) by
// returning a suitable ErrorCode.
virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
int system_call_number) const = 0;
private:
DISALLOW_COPY_AND_ASSIGN(SandboxBPFPolicy);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,243 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/syscall.h"
#include <asm/unistd.h>
#include <errno.h>
#include "base/basictypes.h"
namespace sandbox {
asm( // We need to be able to tell the kernel exactly where we made a
// system call. The C++ compiler likes to sometimes clone or
// inline code, which would inadvertently end up duplicating
// the entry point.
// "gcc" can suppress code duplication with suitable function
// attributes, but "clang" doesn't have this ability.
// The "clang" developer mailing list suggested that the correct
// and portable solution is a file-scope assembly block.
// N.B. We do mark our code as a proper function so that backtraces
// work correctly. But we make absolutely no attempt to use the
// ABI's calling conventions for passing arguments. We will only
// ever be called from assembly code and thus can pick more
// suitable calling conventions.
#if defined(__i386__)
".text\n"
".align 16, 0x90\n"
".type SyscallAsm, @function\n"
"SyscallAsm:.cfi_startproc\n"
// Check if "%eax" is negative. If so, do not attempt to make a
// system call. Instead, compute the return address that is visible
// to the kernel after we execute "int $0x80". This address can be
// used as a marker that BPF code inspects.
"test %eax, %eax\n"
"jge 1f\n"
// Always, make sure that our code is position-independent, or
// address space randomization might not work on i386. This means,
// we can't use "lea", but instead have to rely on "call/pop".
"call 0f; .cfi_adjust_cfa_offset 4\n"
"0:pop %eax; .cfi_adjust_cfa_offset -4\n"
"addl $2f-0b, %eax\n"
"ret\n"
// Save register that we don't want to clobber. On i386, we need to
// save relatively aggressively, as there are a couple or registers
// that are used internally (e.g. %ebx for position-independent
// code, and %ebp for the frame pointer), and as we need to keep at
// least a few registers available for the register allocator.
"1:push %esi; .cfi_adjust_cfa_offset 4\n"
"push %edi; .cfi_adjust_cfa_offset 4\n"
"push %ebx; .cfi_adjust_cfa_offset 4\n"
"push %ebp; .cfi_adjust_cfa_offset 4\n"
// Copy entries from the array holding the arguments into the
// correct CPU registers.
"movl 0(%edi), %ebx\n"
"movl 4(%edi), %ecx\n"
"movl 8(%edi), %edx\n"
"movl 12(%edi), %esi\n"
"movl 20(%edi), %ebp\n"
"movl 16(%edi), %edi\n"
// Enter the kernel.
"int $0x80\n"
// This is our "magic" return address that the BPF filter sees.
"2:"
// Restore any clobbered registers that we didn't declare to the
// compiler.
"pop %ebp; .cfi_adjust_cfa_offset -4\n"
"pop %ebx; .cfi_adjust_cfa_offset -4\n"
"pop %edi; .cfi_adjust_cfa_offset -4\n"
"pop %esi; .cfi_adjust_cfa_offset -4\n"
"ret\n"
".cfi_endproc\n"
"9:.size SyscallAsm, 9b-SyscallAsm\n"
#elif defined(__x86_64__)
".text\n"
".align 16, 0x90\n"
".type SyscallAsm, @function\n"
"SyscallAsm:.cfi_startproc\n"
// Check if "%rax" is negative. If so, do not attempt to make a
// system call. Instead, compute the return address that is visible
// to the kernel after we execute "syscall". This address can be
// used as a marker that BPF code inspects.
"test %rax, %rax\n"
"jge 1f\n"
// Always make sure that our code is position-independent, or the
// linker will throw a hissy fit on x86-64.
"call 0f; .cfi_adjust_cfa_offset 8\n"
"0:pop %rax; .cfi_adjust_cfa_offset -8\n"
"addq $2f-0b, %rax\n"
"ret\n"
// We declared all clobbered registers to the compiler. On x86-64,
// there really isn't much of a problem with register pressure. So,
// we can go ahead and directly copy the entries from the arguments
// array into the appropriate CPU registers.
"1:movq 0(%r12), %rdi\n"
"movq 8(%r12), %rsi\n"
"movq 16(%r12), %rdx\n"
"movq 24(%r12), %r10\n"
"movq 32(%r12), %r8\n"
"movq 40(%r12), %r9\n"
// Enter the kernel.
"syscall\n"
// This is our "magic" return address that the BPF filter sees.
"2:ret\n"
".cfi_endproc\n"
"9:.size SyscallAsm, 9b-SyscallAsm\n"
#elif defined(__arm__)
// Throughout this file, we use the same mode (ARM vs. thumb)
// that the C++ compiler uses. This means, when transfering control
// from C++ to assembly code, we do not need to switch modes (e.g.
// by using the "bx" instruction). It also means that our assembly
// code should not be invoked directly from code that lives in
// other compilation units, as we don't bother implementing thumb
// interworking. That's OK, as we don't make any of the assembly
// symbols public. They are all local to this file.
".text\n"
".align 2\n"
".type SyscallAsm, %function\n"
#if defined(__thumb__)
".thumb_func\n"
#else
".arm\n"
#endif
"SyscallAsm:.fnstart\n"
"@ args = 0, pretend = 0, frame = 8\n"
"@ frame_needed = 1, uses_anonymous_args = 0\n"
#if defined(__thumb__)
".cfi_startproc\n"
"push {r7, lr}\n"
".cfi_offset 14, -4\n"
".cfi_offset 7, -8\n"
"mov r7, sp\n"
".cfi_def_cfa_register 7\n"
".cfi_def_cfa_offset 8\n"
#else
"stmfd sp!, {fp, lr}\n"
"add fp, sp, #4\n"
#endif
// Check if "r0" is negative. If so, do not attempt to make a
// system call. Instead, compute the return address that is visible
// to the kernel after we execute "swi 0". This address can be
// used as a marker that BPF code inspects.
"cmp r0, #0\n"
"bge 1f\n"
"adr r0, 2f\n"
"b 2f\n"
// We declared (almost) all clobbered registers to the compiler. On
// ARM there is no particular register pressure. So, we can go
// ahead and directly copy the entries from the arguments array
// into the appropriate CPU registers.
"1:ldr r5, [r6, #20]\n"
"ldr r4, [r6, #16]\n"
"ldr r3, [r6, #12]\n"
"ldr r2, [r6, #8]\n"
"ldr r1, [r6, #4]\n"
"mov r7, r0\n"
"ldr r0, [r6, #0]\n"
// Enter the kernel
"swi 0\n"
// Restore the frame pointer. Also restore the program counter from
// the link register; this makes us return to the caller.
#if defined(__thumb__)
"2:pop {r7, pc}\n"
".cfi_endproc\n"
#else
"2:ldmfd sp!, {fp, pc}\n"
#endif
".fnend\n"
"9:.size SyscallAsm, 9b-SyscallAsm\n"
#endif
); // asm
intptr_t SandboxSyscall(int nr,
intptr_t p0, intptr_t p1, intptr_t p2,
intptr_t p3, intptr_t p4, intptr_t p5) {
// We rely on "intptr_t" to be the exact size as a "void *". This is
// typically true, but just in case, we add a check. The language
// specification allows platforms some leeway in cases, where
// "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect
// that this would only be an issue for IA64, which we are currently not
// planning on supporting. And it is even possible that this would work
// on IA64, but for lack of actual hardware, I cannot test.
COMPILE_ASSERT(sizeof(void *) == sizeof(intptr_t),
pointer_types_and_intptr_must_be_exactly_the_same_size);
const intptr_t args[6] = { p0, p1, p2, p3, p4, p5 };
// Invoke our file-scope assembly code. The constraints have been picked
// carefully to match what the rest of the assembly code expects in input,
// output, and clobbered registers.
#if defined(__i386__)
intptr_t ret = nr;
asm volatile(
"call SyscallAsm\n"
// N.B. These are not the calling conventions normally used by the ABI.
: "=a"(ret)
: "0"(ret), "D"(args)
: "cc", "esp", "memory", "ecx", "edx");
#elif defined(__x86_64__)
intptr_t ret = nr;
{
register const intptr_t *data __asm__("r12") = args;
asm volatile(
"lea -128(%%rsp), %%rsp\n" // Avoid red zone.
"call SyscallAsm\n"
"lea 128(%%rsp), %%rsp\n"
// N.B. These are not the calling conventions normally used by the ABI.
: "=a"(ret)
: "0"(ret), "r"(data)
: "cc", "rsp", "memory",
"rcx", "rdi", "rsi", "rdx", "r8", "r9", "r10", "r11");
}
#elif defined(__arm__)
intptr_t ret;
{
register intptr_t inout __asm__("r0") = nr;
register const intptr_t *data __asm__("r6") = args;
asm volatile(
"bl SyscallAsm\n"
// N.B. These are not the calling conventions normally used by the ABI.
: "=r"(inout)
: "0"(inout), "r"(data)
: "cc", "lr", "memory", "r1", "r2", "r3", "r4", "r5"
#if !defined(__thumb__)
// In thumb mode, we cannot use "r7" as a general purpose register, as
// it is our frame pointer. We have to manually manage and preserve it.
// In ARM mode, we have a dedicated frame pointer register and "r7" is
// thus available as a general purpose register. We don't preserve it,
// but instead mark it as clobbered.
, "r7"
#endif // !defined(__thumb__)
);
ret = inout;
}
#else
errno = ENOSYS;
intptr_t ret = -1;
#endif
return ret;
}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,148 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
#define SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
#include <stdint.h>
#include "sandbox/linux/sandbox_export.h"
namespace sandbox {
// We have to make sure that we have a single "magic" return address for
// our system calls, which we can check from within a BPF filter. This
// works by writing a little bit of asm() code that a) enters the kernel, and
// that also b) can be invoked in a way that computes this return address.
// Passing "nr" as "-1" computes the "magic" return address. Passing any
// other value invokes the appropriate system call.
SANDBOX_EXPORT intptr_t SandboxSyscall(int nr,
intptr_t p0,
intptr_t p1,
intptr_t p2,
intptr_t p3,
intptr_t p4,
intptr_t p5);
// System calls can take up to six parameters. Traditionally, glibc
// implements this property by using variadic argument lists. This works, but
// confuses modern tools such as valgrind, because we are nominally passing
// uninitialized data whenever we call through this function and pass less
// than the full six arguments.
// So, instead, we use C++'s template system to achieve a very similar
// effect. C++ automatically sets the unused parameters to zero for us, and
// it also does the correct type expansion (e.g. from 32bit to 64bit) where
// necessary.
// We have to use C-style cast operators as we want to be able to accept both
// integer and pointer types.
// We explicitly mark all functions as inline. This is not necessary in
// optimized builds, where the compiler automatically figures out that it
// can inline everything. But it makes stack traces of unoptimized builds
// easier to read as it hides implementation details.
#if __cplusplus >= 201103 // C++11
template <class T0 = intptr_t,
class T1 = intptr_t,
class T2 = intptr_t,
class T3 = intptr_t,
class T4 = intptr_t,
class T5 = intptr_t>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr,
T0 p0 = 0,
T1 p1 = 0,
T2 p2 = 0,
T3 p3 = 0,
T4 p4 = 0,
T5 p5 = 0)
__attribute__((always_inline));
template <class T0, class T1, class T2, class T3, class T4, class T5>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
return SandboxSyscall(nr,
(intptr_t)p0,
(intptr_t)p1,
(intptr_t)p2,
(intptr_t)p3,
(intptr_t)p4,
(intptr_t)p5);
}
#else // Pre-C++11
// TODO(markus): C++11 has a much more concise and readable solution for
// expressing what we are doing here. Delete the fall-back code for older
// compilers as soon as we have fully switched to C++11
template <class T0, class T1, class T2, class T3, class T4, class T5>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5)
__attribute__((always_inline));
template <class T0, class T1, class T2, class T3, class T4, class T5>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
return SandboxSyscall(nr,
(intptr_t)p0,
(intptr_t)p1,
(intptr_t)p2,
(intptr_t)p3,
(intptr_t)p4,
(intptr_t)p5);
}
template <class T0, class T1, class T2, class T3, class T4>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4)
__attribute__((always_inline));
template <class T0, class T1, class T2, class T3, class T4>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4) {
return SandboxSyscall(nr, p0, p1, p2, p3, p4, 0);
}
template <class T0, class T1, class T2, class T3>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3)
__attribute__((always_inline));
template <class T0, class T1, class T2, class T3>
SANDBOX_EXPORT inline intptr_t
SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3) {
return SandboxSyscall(nr, p0, p1, p2, p3, 0, 0);
}
template <class T0, class T1, class T2>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2)
__attribute__((always_inline));
template <class T0, class T1, class T2>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2) {
return SandboxSyscall(nr, p0, p1, p2, 0, 0, 0);
}
template <class T0, class T1>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1)
__attribute__((always_inline));
template <class T0, class T1>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1) {
return SandboxSyscall(nr, p0, p1, 0, 0, 0, 0);
}
template <class T0>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0)
__attribute__((always_inline));
template <class T0>
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0) {
return SandboxSyscall(nr, p0, 0, 0, 0, 0, 0);
}
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr)
__attribute__((always_inline));
SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr) {
return SandboxSyscall(nr, 0, 0, 0, 0, 0, 0);
}
#endif // Pre-C++11
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__

Просмотреть файл

@ -0,0 +1,92 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
#include "base/basictypes.h"
#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
namespace sandbox {
uint32_t SyscallIterator::Next() {
if (done_) {
return num_;
}
uint32_t val;
do {
// |num_| has been initialized to 0, which we assume is also MIN_SYSCALL.
// This true for supported architectures (Intel and ARM EABI).
COMPILE_ASSERT(MIN_SYSCALL == 0u, min_syscall_should_always_be_zero);
val = num_;
// First we iterate up to MAX_PUBLIC_SYSCALL, which is equal to MAX_SYSCALL
// on Intel architectures, but leaves room for private syscalls on ARM.
if (num_ <= MAX_PUBLIC_SYSCALL) {
if (invalid_only_ && num_ < MAX_PUBLIC_SYSCALL) {
num_ = MAX_PUBLIC_SYSCALL;
} else {
++num_;
}
#if defined(__arm__)
// ARM EABI includes "ARM private" system calls starting at
// MIN_PRIVATE_SYSCALL, and a "ghost syscall private to the kernel" at
// MIN_GHOST_SYSCALL.
} else if (num_ < MIN_PRIVATE_SYSCALL - 1) {
num_ = MIN_PRIVATE_SYSCALL - 1;
} else if (num_ <= MAX_PRIVATE_SYSCALL) {
if (invalid_only_ && num_ < MAX_PRIVATE_SYSCALL) {
num_ = MAX_PRIVATE_SYSCALL;
} else {
++num_;
}
} else if (num_ < MIN_GHOST_SYSCALL - 1) {
num_ = MIN_GHOST_SYSCALL - 1;
} else if (num_ <= MAX_SYSCALL) {
if (invalid_only_ && num_ < MAX_SYSCALL) {
num_ = MAX_SYSCALL;
} else {
++num_;
}
#endif
// BPF programs only ever operate on unsigned quantities. So, that's how
// we iterate; we return values from 0..0xFFFFFFFFu. But there are places,
// where the kernel might interpret system call numbers as signed
// quantities, so the boundaries between signed and unsigned values are
// potential problem cases. We want to explicitly return these values from
// our iterator.
} else if (num_ < 0x7FFFFFFFu) {
num_ = 0x7FFFFFFFu;
} else if (num_ < 0x80000000u) {
num_ = 0x80000000u;
} else if (num_ < 0xFFFFFFFFu) {
num_ = 0xFFFFFFFFu;
}
} while (invalid_only_ && IsValid(val));
done_ |= val == 0xFFFFFFFFu;
return val;
}
bool SyscallIterator::IsValid(uint32_t num) {
uint32_t min_syscall = MIN_SYSCALL;
if (num >= min_syscall && num <= MAX_PUBLIC_SYSCALL) {
return true;
}
if (IsArmPrivate(num)) {
return true;
}
return false;
}
#if defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
bool SyscallIterator::IsArmPrivate(uint32_t num) {
return (num >= MIN_PRIVATE_SYSCALL && num <= MAX_PRIVATE_SYSCALL) ||
(num >= MIN_GHOST_SYSCALL && num <= MAX_SYSCALL);
}
#else
bool SyscallIterator::IsArmPrivate(uint32_t) { return false; }
#endif
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,56 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
#define SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
#include <stdint.h>
#include "base/basictypes.h"
#include "sandbox/linux/sandbox_export.h"
namespace sandbox {
// Iterates over the entire system call range from 0..0xFFFFFFFFu. This
// iterator is aware of how system calls look like and will skip quickly
// over ranges that can't contain system calls. It iterates more slowly
// whenever it reaches a range that is potentially problematic, returning
// the last invalid value before a valid range of system calls, and the
// first invalid value after a valid range of syscalls. It iterates over
// individual values whenever it is in the normal range for system calls
// (typically MIN_SYSCALL..MAX_SYSCALL).
// If |invalid_only| is true, this iterator will only return invalid
// syscall numbers, but will still skip quickly over invalid ranges,
// returning the first invalid value in the range and then skipping
// to the last invalid value in the range.
//
// Example usage:
// for (SyscallIterator iter(false); !iter.Done(); ) {
// uint32_t sysnum = iter.Next();
// // Do something with sysnum.
// }
//
// TODO(markus): Make this a classic C++ iterator.
class SANDBOX_EXPORT SyscallIterator {
public:
explicit SyscallIterator(bool invalid_only)
: invalid_only_(invalid_only), done_(false), num_(0) {}
bool Done() const { return done_; }
uint32_t Next();
static bool IsValid(uint32_t num);
private:
static bool IsArmPrivate(uint32_t num);
bool invalid_only_;
bool done_;
uint32_t num_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SyscallIterator);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__

Просмотреть файл

@ -0,0 +1,136 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
#include "sandbox/linux/tests/unit_tests.h"
namespace sandbox {
namespace {
SANDBOX_TEST(SyscallIterator, Monotonous) {
for (int i = 0; i < 2; ++i) {
bool invalid_only = !i; // Testing both |invalid_only| cases.
SyscallIterator iter(invalid_only);
uint32_t next = iter.Next();
if (!invalid_only) {
// The iterator should start at 0.
SANDBOX_ASSERT(next == 0);
}
for (uint32_t last = next; !iter.Done(); last = next) {
next = iter.Next();
SANDBOX_ASSERT(last < next);
}
// The iterator should always return 0xFFFFFFFFu as the last value.
SANDBOX_ASSERT(next == 0xFFFFFFFFu);
}
}
SANDBOX_TEST(SyscallIterator, PublicSyscallRange) {
SyscallIterator iter(false);
uint32_t next = iter.Next();
// The iterator should cover the public syscall range
// MIN_SYSCALL..MAX_PUBLIC_SYSCALL, without skipping syscalls.
// We're assuming MIN_SYSCALL == 0 for all architectures,
// this is currently valid for Intel and ARM EABI.
SANDBOX_ASSERT(MIN_SYSCALL == 0);
SANDBOX_ASSERT(next == MIN_SYSCALL);
for (uint32_t last = next; next < MAX_PUBLIC_SYSCALL + 1; last = next) {
SANDBOX_ASSERT((next = iter.Next()) == last + 1);
}
SANDBOX_ASSERT(next == MAX_PUBLIC_SYSCALL + 1);
}
#if defined(__arm__)
SANDBOX_TEST(SyscallIterator, ARMPrivateSyscallRange) {
SyscallIterator iter(false);
uint32_t next = iter.Next();
while (next < MIN_PRIVATE_SYSCALL - 1) {
next = iter.Next();
}
// The iterator should cover the ARM private syscall range
// without skipping syscalls.
SANDBOX_ASSERT(next == MIN_PRIVATE_SYSCALL - 1);
for (uint32_t last = next; next < MAX_PRIVATE_SYSCALL + 1; last = next) {
SANDBOX_ASSERT((next = iter.Next()) == last + 1);
}
SANDBOX_ASSERT(next == MAX_PRIVATE_SYSCALL + 1);
}
SANDBOX_TEST(SyscallIterator, ARMHiddenSyscallRange) {
SyscallIterator iter(false);
uint32_t next = iter.Next();
while (next < MIN_GHOST_SYSCALL - 1) {
next = iter.Next();
}
// The iterator should cover the ARM hidden syscall range
// without skipping syscalls.
SANDBOX_ASSERT(next == MIN_GHOST_SYSCALL - 1);
for (uint32_t last = next; next < MAX_SYSCALL + 1; last = next) {
SANDBOX_ASSERT((next = iter.Next()) == last + 1);
}
SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
}
#endif
SANDBOX_TEST(SyscallIterator, Invalid) {
for (int i = 0; i < 2; ++i) {
bool invalid_only = !i; // Testing both |invalid_only| cases.
SyscallIterator iter(invalid_only);
uint32_t next = iter.Next();
while (next < MAX_SYSCALL + 1) {
next = iter.Next();
}
SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
while (next < 0x7FFFFFFFu) {
next = iter.Next();
}
// The iterator should return the signed/unsigned corner cases.
SANDBOX_ASSERT(next == 0x7FFFFFFFu);
next = iter.Next();
SANDBOX_ASSERT(next == 0x80000000u);
SANDBOX_ASSERT(!iter.Done());
next = iter.Next();
SANDBOX_ASSERT(iter.Done());
SANDBOX_ASSERT(next == 0xFFFFFFFFu);
}
}
SANDBOX_TEST(SyscallIterator, InvalidOnly) {
bool invalid_only = true;
SyscallIterator iter(invalid_only);
uint32_t next = iter.Next();
// We're assuming MIN_SYSCALL == 0 for all architectures,
// this is currently valid for Intel and ARM EABI.
// First invalid syscall should then be |MAX_PUBLIC_SYSCALL + 1|.
SANDBOX_ASSERT(MIN_SYSCALL == 0);
SANDBOX_ASSERT(next == MAX_PUBLIC_SYSCALL + 1);
#if defined(__arm__)
next = iter.Next();
// The iterator should skip until the last invalid syscall in this range.
SANDBOX_ASSERT(next == MIN_PRIVATE_SYSCALL - 1);
while (next <= MAX_PRIVATE_SYSCALL) {
next = iter.Next();
}
next = iter.Next();
// The iterator should skip until the last invalid syscall in this range.
SANDBOX_ASSERT(next == MIN_GHOST_SYSCALL - 1);
while (next <= MAX_SYSCALL) {
next = iter.Next();
}
SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
#endif
}
} // namespace
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,201 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <asm/unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <vector>
#include "base/basictypes.h"
#include "base/posix/eintr_wrapper.h"
#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/syscall.h"
#include "sandbox/linux/tests/unit_tests.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace sandbox {
namespace {
// Different platforms use different symbols for the six-argument version
// of the mmap() system call. Test for the correct symbol at compile time.
#ifdef __NR_mmap2
const int kMMapNr = __NR_mmap2;
#else
const int kMMapNr = __NR_mmap;
#endif
TEST(Syscall, WellKnownEntryPoint) {
// Test that SandboxSyscall(-1) is handled specially. Don't do this on ARM,
// where syscall(-1) crashes with SIGILL. Not running the test is fine, as we
// are still testing ARM code in the next set of tests.
#if !defined(__arm__)
EXPECT_NE(SandboxSyscall(-1), syscall(-1));
#endif
// If possible, test that SandboxSyscall(-1) returns the address right after
// a kernel entry point.
#if defined(__i386__)
EXPECT_EQ(0x80CDu, ((uint16_t*)SandboxSyscall(-1))[-1]); // INT 0x80
#elif defined(__x86_64__)
EXPECT_EQ(0x050Fu, ((uint16_t*)SandboxSyscall(-1))[-1]); // SYSCALL
#elif defined(__arm__)
#if defined(__thumb__)
EXPECT_EQ(0xDF00u, ((uint16_t*)SandboxSyscall(-1))[-1]); // SWI 0
#else
EXPECT_EQ(0xEF000000u, ((uint32_t*)SandboxSyscall(-1))[-1]); // SVC 0
#endif
#else
#warning Incomplete test case; need port for target platform
#endif
}
TEST(Syscall, TrivialSyscallNoArgs) {
// Test that we can do basic system calls
EXPECT_EQ(SandboxSyscall(__NR_getpid), syscall(__NR_getpid));
}
TEST(Syscall, TrivialSyscallOneArg) {
int new_fd;
// Duplicate standard error and close it.
ASSERT_GE(new_fd = SandboxSyscall(__NR_dup, 2), 0);
int close_return_value = IGNORE_EINTR(SandboxSyscall(__NR_close, new_fd));
ASSERT_EQ(close_return_value, 0);
}
// SIGSYS trap handler that will be called on __NR_uname.
intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void* aux) {
// |aux| is a pointer to our BPF_AUX.
std::vector<uint64_t>* const seen_syscall_args =
static_cast<std::vector<uint64_t>*>(aux);
BPF_ASSERT(arraysize(args.args) == 6);
seen_syscall_args->assign(args.args, args.args + arraysize(args.args));
return -ENOMEM;
}
ErrorCode CopyAllArgsOnUnamePolicy(SandboxBPF* sandbox, int sysno, void* aux) {
if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
}
if (sysno == __NR_uname) {
return sandbox->Trap(CopySyscallArgsToAux, aux);
} else {
return ErrorCode(ErrorCode::ERR_ALLOWED);
}
}
// We are testing SandboxSyscall() by making use of a BPF filter that allows us
// to inspect the system call arguments that the kernel saw.
BPF_TEST(Syscall,
SyntheticSixArgs,
CopyAllArgsOnUnamePolicy,
std::vector<uint64_t> /* BPF_AUX */) {
const int kExpectedValue = 42;
// In this test we only pass integers to the kernel. We might want to make
// additional tests to try other types. What we will see depends on
// implementation details of kernel BPF filters and we will need to document
// the expected behavior very clearly.
int syscall_args[6];
for (size_t i = 0; i < arraysize(syscall_args); ++i) {
syscall_args[i] = kExpectedValue + i;
}
// We could use pretty much any system call we don't need here. uname() is
// nice because it doesn't have any dangerous side effects.
BPF_ASSERT(SandboxSyscall(__NR_uname,
syscall_args[0],
syscall_args[1],
syscall_args[2],
syscall_args[3],
syscall_args[4],
syscall_args[5]) == -ENOMEM);
// We expect the trap handler to have copied the 6 arguments.
BPF_ASSERT(BPF_AUX.size() == 6);
// Don't loop here so that we can see which argument does cause the failure
// easily from the failing line.
// uint64_t is the type passed to our SIGSYS handler.
BPF_ASSERT(BPF_AUX[0] == static_cast<uint64_t>(syscall_args[0]));
BPF_ASSERT(BPF_AUX[1] == static_cast<uint64_t>(syscall_args[1]));
BPF_ASSERT(BPF_AUX[2] == static_cast<uint64_t>(syscall_args[2]));
BPF_ASSERT(BPF_AUX[3] == static_cast<uint64_t>(syscall_args[3]));
BPF_ASSERT(BPF_AUX[4] == static_cast<uint64_t>(syscall_args[4]));
BPF_ASSERT(BPF_AUX[5] == static_cast<uint64_t>(syscall_args[5]));
}
TEST(Syscall, ComplexSyscallSixArgs) {
int fd;
ASSERT_LE(0, fd = SandboxSyscall(__NR_open, "/dev/null", O_RDWR, 0L));
// Use mmap() to allocate some read-only memory
char* addr0;
ASSERT_NE((char*)NULL,
addr0 = reinterpret_cast<char*>(
SandboxSyscall(kMMapNr,
(void*)NULL,
4096,
PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS,
fd,
0L)));
// Try to replace the existing mapping with a read-write mapping
char* addr1;
ASSERT_EQ(addr0,
addr1 = reinterpret_cast<char*>(
SandboxSyscall(kMMapNr,
addr0,
4096L,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
fd,
0L)));
++*addr1; // This should not seg fault
// Clean up
EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr1, 4096L));
EXPECT_EQ(0, IGNORE_EINTR(SandboxSyscall(__NR_close, fd)));
// Check that the offset argument (i.e. the sixth argument) is processed
// correctly.
ASSERT_GE(fd = SandboxSyscall(__NR_open, "/proc/self/exe", O_RDONLY, 0L), 0);
char* addr2, *addr3;
ASSERT_NE((char*)NULL,
addr2 = reinterpret_cast<char*>(SandboxSyscall(
kMMapNr, (void*)NULL, 8192L, PROT_READ, MAP_PRIVATE, fd, 0L)));
ASSERT_NE((char*)NULL,
addr3 = reinterpret_cast<char*>(SandboxSyscall(kMMapNr,
(void*)NULL,
4096L,
PROT_READ,
MAP_PRIVATE,
fd,
#if defined(__NR_mmap2)
1L
#else
4096L
#endif
)));
EXPECT_EQ(0, memcmp(addr2 + 4096, addr3, 4096));
// Just to be absolutely on the safe side, also verify that the file
// contents matches what we are getting from a read() operation.
char buf[8192];
EXPECT_EQ(8192, SandboxSyscall(__NR_read, fd, buf, 8192L));
EXPECT_EQ(0, memcmp(addr2, buf, 8192));
// Clean up
EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr2, 8192L));
EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr3, 4096L));
EXPECT_EQ(0, IGNORE_EINTR(SandboxSyscall(__NR_close, fd)));
}
} // namespace
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,357 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "sandbox/linux/seccomp-bpf/trap.h"
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <limits>
#include "base/logging.h"
#include "sandbox/linux/seccomp-bpf/codegen.h"
#include "sandbox/linux/seccomp-bpf/die.h"
#include "sandbox/linux/seccomp-bpf/syscall.h"
// Android's signal.h doesn't define ucontext etc.
#if defined(OS_ANDROID)
#include "sandbox/linux/services/android_ucontext.h"
#endif
namespace {
const int kCapacityIncrement = 20;
// Unsafe traps can only be turned on, if the user explicitly allowed them
// by setting the CHROME_SANDBOX_DEBUGGING environment variable.
const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
// We need to tell whether we are performing a "normal" callback, or
// whether we were called recursively from within a UnsafeTrap() callback.
// This is a little tricky to do, because we need to somehow get access to
// per-thread data from within a signal context. Normal TLS storage is not
// safely accessible at this time. We could roll our own, but that involves
// a lot of complexity. Instead, we co-opt one bit in the signal mask.
// If BUS is blocked, we assume that we have been called recursively.
// There is a possibility for collision with other code that needs to do
// this, but in practice the risks are low.
// If SIGBUS turns out to be a problem, we could instead co-opt one of the
// realtime signals. There are plenty of them. Unfortunately, there is no
// way to mark a signal as allocated. So, the potential for collision is
// possibly even worse.
bool GetIsInSigHandler(const ucontext_t* ctx) {
// Note: on Android, sigismember does not take a pointer to const.
return sigismember(const_cast<sigset_t*>(&ctx->uc_sigmask), SIGBUS);
}
void SetIsInSigHandler() {
sigset_t mask;
if (sigemptyset(&mask) || sigaddset(&mask, SIGBUS) ||
sigprocmask(SIG_BLOCK, &mask, NULL)) {
SANDBOX_DIE("Failed to block SIGBUS");
}
}
bool IsDefaultSignalAction(const struct sigaction& sa) {
if (sa.sa_flags & SA_SIGINFO || sa.sa_handler != SIG_DFL) {
return false;
}
return true;
}
} // namespace
namespace sandbox {
Trap::Trap()
: trap_array_(NULL),
trap_array_size_(0),
trap_array_capacity_(0),
has_unsafe_traps_(false) {
// Set new SIGSYS handler
struct sigaction sa = {};
sa.sa_sigaction = SigSysAction;
sa.sa_flags = SA_SIGINFO | SA_NODEFER;
struct sigaction old_sa;
if (sigaction(SIGSYS, &sa, &old_sa) < 0) {
SANDBOX_DIE("Failed to configure SIGSYS handler");
}
if (!IsDefaultSignalAction(old_sa)) {
static const char kExistingSIGSYSMsg[] =
"Existing signal handler when trying to install SIGSYS. SIGSYS needs "
"to be reserved for seccomp-bpf.";
DLOG(FATAL) << kExistingSIGSYSMsg;
LOG(ERROR) << kExistingSIGSYSMsg;
}
// Unmask SIGSYS
sigset_t mask;
if (sigemptyset(&mask) || sigaddset(&mask, SIGSYS) ||
sigprocmask(SIG_UNBLOCK, &mask, NULL)) {
SANDBOX_DIE("Failed to configure SIGSYS handler");
}
}
Trap* Trap::GetInstance() {
// Note: This class is not thread safe. It is the caller's responsibility
// to avoid race conditions. Normally, this is a non-issue as the sandbox
// can only be initialized if there are no other threads present.
// Also, this is not a normal singleton. Once created, the global trap
// object must never be destroyed again.
if (!global_trap_) {
global_trap_ = new Trap();
if (!global_trap_) {
SANDBOX_DIE("Failed to allocate global trap handler");
}
}
return global_trap_;
}
void Trap::SigSysAction(int nr, siginfo_t* info, void* void_context) {
if (!global_trap_) {
RAW_SANDBOX_DIE(
"This can't happen. Found no global singleton instance "
"for Trap() handling.");
}
global_trap_->SigSys(nr, info, void_context);
}
void Trap::SigSys(int nr, siginfo_t* info, void* void_context) {
// Signal handlers should always preserve "errno". Otherwise, we could
// trigger really subtle bugs.
const int old_errno = errno;
// Various sanity checks to make sure we actually received a signal
// triggered by a BPF filter. If something else triggered SIGSYS
// (e.g. kill()), there is really nothing we can do with this signal.
if (nr != SIGSYS || info->si_code != SYS_SECCOMP || !void_context ||
info->si_errno <= 0 ||
static_cast<size_t>(info->si_errno) > trap_array_size_) {
// ATI drivers seem to send SIGSYS, so this cannot be FATAL.
// See crbug.com/178166.
// TODO(jln): add a DCHECK or move back to FATAL.
RAW_LOG(ERROR, "Unexpected SIGSYS received.");
errno = old_errno;
return;
}
// Obtain the signal context. This, most notably, gives us access to
// all CPU registers at the time of the signal.
ucontext_t* ctx = reinterpret_cast<ucontext_t*>(void_context);
// Obtain the siginfo information that is specific to SIGSYS. Unfortunately,
// most versions of glibc don't include this information in siginfo_t. So,
// we need to explicitly copy it into a arch_sigsys structure.
struct arch_sigsys sigsys;
memcpy(&sigsys, &info->_sifields, sizeof(sigsys));
// Some more sanity checks.
if (sigsys.ip != reinterpret_cast<void*>(SECCOMP_IP(ctx)) ||
sigsys.nr != static_cast<int>(SECCOMP_SYSCALL(ctx)) ||
sigsys.arch != SECCOMP_ARCH) {
// TODO(markus):
// SANDBOX_DIE() can call LOG(FATAL). This is not normally async-signal
// safe and can lead to bugs. We should eventually implement a different
// logging and reporting mechanism that is safe to be called from
// the sigSys() handler.
RAW_SANDBOX_DIE("Sanity checks are failing after receiving SIGSYS.");
}
intptr_t rc;
if (has_unsafe_traps_ && GetIsInSigHandler(ctx)) {
errno = old_errno;
if (sigsys.nr == __NR_clone) {
RAW_SANDBOX_DIE("Cannot call clone() from an UnsafeTrap() handler.");
}
rc = SandboxSyscall(sigsys.nr,
SECCOMP_PARM1(ctx),
SECCOMP_PARM2(ctx),
SECCOMP_PARM3(ctx),
SECCOMP_PARM4(ctx),
SECCOMP_PARM5(ctx),
SECCOMP_PARM6(ctx));
} else {
const ErrorCode& err = trap_array_[info->si_errno - 1];
if (!err.safe_) {
SetIsInSigHandler();
}
// Copy the seccomp-specific data into a arch_seccomp_data structure. This
// is what we are showing to TrapFnc callbacks that the system call
// evaluator registered with the sandbox.
struct arch_seccomp_data data = {
sigsys.nr, SECCOMP_ARCH, reinterpret_cast<uint64_t>(sigsys.ip),
{static_cast<uint64_t>(SECCOMP_PARM1(ctx)),
static_cast<uint64_t>(SECCOMP_PARM2(ctx)),
static_cast<uint64_t>(SECCOMP_PARM3(ctx)),
static_cast<uint64_t>(SECCOMP_PARM4(ctx)),
static_cast<uint64_t>(SECCOMP_PARM5(ctx)),
static_cast<uint64_t>(SECCOMP_PARM6(ctx))}};
// Now call the TrapFnc callback associated with this particular instance
// of SECCOMP_RET_TRAP.
rc = err.fnc_(data, err.aux_);
}
// Update the CPU register that stores the return code of the system call
// that we just handled, and restore "errno" to the value that it had
// before entering the signal handler.
SECCOMP_RESULT(ctx) = static_cast<greg_t>(rc);
errno = old_errno;
return;
}
bool Trap::TrapKey::operator<(const TrapKey& o) const {
if (fnc != o.fnc) {
return fnc < o.fnc;
} else if (aux != o.aux) {
return aux < o.aux;
} else {
return safe < o.safe;
}
}
ErrorCode Trap::MakeTrap(TrapFnc fnc, const void* aux, bool safe) {
return GetInstance()->MakeTrapImpl(fnc, aux, safe);
}
ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe) {
if (!safe && !SandboxDebuggingAllowedByUser()) {
// Unless the user set the CHROME_SANDBOX_DEBUGGING environment variable,
// we never return an ErrorCode that is marked as "unsafe". This also
// means, the BPF compiler will never emit code that allow unsafe system
// calls to by-pass the filter (because they use the magic return address
// from SandboxSyscall(-1)).
// This SANDBOX_DIE() can optionally be removed. It won't break security,
// but it might make error messages from the BPF compiler a little harder
// to understand. Removing the SANDBOX_DIE() allows callers to easyly check
// whether unsafe traps are supported (by checking whether the returned
// ErrorCode is ET_INVALID).
SANDBOX_DIE(
"Cannot use unsafe traps unless CHROME_SANDBOX_DEBUGGING "
"is enabled");
return ErrorCode();
}
// Each unique pair of TrapFnc and auxiliary data make up a distinct instance
// of a SECCOMP_RET_TRAP.
TrapKey key(fnc, aux, safe);
TrapIds::const_iterator iter = trap_ids_.find(key);
// We return unique identifiers together with SECCOMP_RET_TRAP. This allows
// us to associate trap with the appropriate handler. The kernel allows us
// identifiers in the range from 0 to SECCOMP_RET_DATA (0xFFFF). We want to
// avoid 0, as it could be confused for a trap without any specific id.
// The nice thing about sequentially numbered identifiers is that we can also
// trivially look them up from our signal handler without making any system
// calls that might be async-signal-unsafe.
// In order to do so, we store all of our traps in a C-style trap_array_.
uint16_t id;
if (iter != trap_ids_.end()) {
// We have seen this pair before. Return the same id that we assigned
// earlier.
id = iter->second;
} else {
// This is a new pair. Remember it and assign a new id.
if (trap_array_size_ >= SECCOMP_RET_DATA /* 0xFFFF */ ||
trap_array_size_ >= std::numeric_limits<typeof(id)>::max()) {
// In practice, this is pretty much impossible to trigger, as there
// are other kernel limitations that restrict overall BPF program sizes.
SANDBOX_DIE("Too many SECCOMP_RET_TRAP callback instances");
}
id = trap_array_size_ + 1;
// Our callers ensure that there are no other threads accessing trap_array_
// concurrently (typically this is done by ensuring that we are single-
// threaded while the sandbox is being set up). But we nonetheless are
// modifying a life data structure that could be accessed any time a
// system call is made; as system calls could be triggering SIGSYS.
// So, we have to be extra careful that we update trap_array_ atomically.
// In particular, this means we shouldn't be using realloc() to resize it.
// Instead, we allocate a new array, copy the values, and then switch the
// pointer. We only really care about the pointer being updated atomically
// and the data that is pointed to being valid, as these are the only
// values accessed from the signal handler. It is OK if trap_array_size_
// is inconsistent with the pointer, as it is monotonously increasing.
// Also, we only care about compiler barriers, as the signal handler is
// triggered synchronously from a system call. We don't have to protect
// against issues with the memory model or with completely asynchronous
// events.
if (trap_array_size_ >= trap_array_capacity_) {
trap_array_capacity_ += kCapacityIncrement;
ErrorCode* old_trap_array = trap_array_;
ErrorCode* new_trap_array = new ErrorCode[trap_array_capacity_];
// Language specs are unclear on whether the compiler is allowed to move
// the "delete[]" above our preceding assignments and/or memory moves,
// iff the compiler believes that "delete[]" doesn't have any other
// global side-effects.
// We insert optimization barriers to prevent this from happening.
// The first barrier is probably not needed, but better be explicit in
// what we want to tell the compiler.
// The clang developer mailing list couldn't answer whether this is a
// legitimate worry; but they at least thought that the barrier is
// sufficient to prevent the (so far hypothetical) problem of re-ordering
// of instructions by the compiler.
memcpy(new_trap_array, trap_array_, trap_array_size_ * sizeof(ErrorCode));
asm volatile("" : "=r"(new_trap_array) : "0"(new_trap_array) : "memory");
trap_array_ = new_trap_array;
asm volatile("" : "=r"(trap_array_) : "0"(trap_array_) : "memory");
delete[] old_trap_array;
}
trap_ids_[key] = id;
trap_array_[trap_array_size_] = ErrorCode(fnc, aux, safe, id);
return trap_array_[trap_array_size_++];
}
return ErrorCode(fnc, aux, safe, id);
}
bool Trap::SandboxDebuggingAllowedByUser() const {
const char* debug_flag = getenv(kSandboxDebuggingEnv);
return debug_flag && *debug_flag;
}
bool Trap::EnableUnsafeTrapsInSigSysHandler() {
Trap* trap = GetInstance();
if (!trap->has_unsafe_traps_) {
// Unsafe traps are a one-way fuse. Once enabled, they can never be turned
// off again.
// We only allow enabling unsafe traps, if the user explicitly set an
// appropriate environment variable. This prevents bugs that accidentally
// disable all sandboxing for all users.
if (trap->SandboxDebuggingAllowedByUser()) {
// We only ever print this message once, when we enable unsafe traps the
// first time.
SANDBOX_INFO("WARNING! Disabling sandbox for debugging purposes");
trap->has_unsafe_traps_ = true;
} else {
SANDBOX_INFO(
"Cannot disable sandbox and use unsafe traps unless "
"CHROME_SANDBOX_DEBUGGING is turned on first");
}
}
// Returns the, possibly updated, value of has_unsafe_traps_.
return trap->has_unsafe_traps_;
}
ErrorCode Trap::ErrorCodeFromTrapId(uint16_t id) {
if (global_trap_ && id > 0 && id <= global_trap_->trap_array_size_) {
return global_trap_->trap_array_[id - 1];
} else {
return ErrorCode();
}
}
Trap* Trap::global_trap_;
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,117 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
#define SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
#include <signal.h>
#include <stdint.h>
#include <map>
#include <vector>
#include "base/basictypes.h"
#include "sandbox/linux/sandbox_export.h"
namespace sandbox {
class ErrorCode;
// The Trap class allows a BPF filter program to branch out to user space by
// raising a SIGSYS signal.
// N.B.: This class does not perform any synchronization operations. If
// modifications are made to any of the traps, it is the caller's
// responsibility to ensure that this happens in a thread-safe fashion.
// Preferably, that means that no other threads should be running at that
// time. For the purposes of our sandbox, this assertion should always be
// true. Threads are incompatible with the seccomp sandbox anyway.
class SANDBOX_EXPORT Trap {
public:
// TrapFnc is a pointer to a function that handles Seccomp traps in
// user-space. The seccomp policy can request that a trap handler gets
// installed; it does so by returning a suitable ErrorCode() from the
// syscallEvaluator. See the ErrorCode() constructor for how to pass in
// the function pointer.
// Please note that TrapFnc is executed from signal context and must be
// async-signal safe:
// http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
// Also note that it follows the calling convention of native system calls.
// In other words, it reports an error by returning an exit code in the
// range -1..-4096. It should not set errno when reporting errors; on the
// other hand, accidentally modifying errno is harmless and the changes will
// be undone afterwards.
typedef intptr_t (*TrapFnc)(const struct arch_seccomp_data& args, void* aux);
// Registers a new trap handler and sets up the appropriate SIGSYS handler
// as needed.
// N.B.: This makes a permanent state change. Traps cannot be unregistered,
// as that would break existing BPF filters that are still active.
static ErrorCode MakeTrap(TrapFnc fnc, const void* aux, bool safe);
// Enables support for unsafe traps in the SIGSYS signal handler. This is a
// one-way fuse. It works in conjunction with the BPF compiler emitting code
// that unconditionally allows system calls, if they have a magic return
// address (i.e. SandboxSyscall(-1)).
// Once unsafe traps are enabled, the sandbox is essentially compromised.
// But this is still a very useful feature for debugging purposes. Use with
// care. This feature is availably only if enabled by the user (see above).
// Returns "true", if unsafe traps were turned on.
static bool EnableUnsafeTrapsInSigSysHandler();
// Returns the ErrorCode associate with a particular trap id.
static ErrorCode ErrorCodeFromTrapId(uint16_t id);
private:
// The destructor is unimplemented. Don't ever attempt to destruct this
// object. It'll break subsequent system calls that trigger a SIGSYS.
~Trap();
struct TrapKey {
TrapKey(TrapFnc f, const void* a, bool s) : fnc(f), aux(a), safe(s) {}
TrapFnc fnc;
const void* aux;
bool safe;
bool operator<(const TrapKey&) const;
};
typedef std::map<TrapKey, uint16_t> TrapIds;
// We only have a very small number of methods. We opt to make them static
// and have them internally call GetInstance(). This is a little more
// convenient than having each caller obtain short-lived reference to the
// singleton.
// It also gracefully deals with methods that should check for the singleton,
// but avoid instantiating it, if it doesn't exist yet
// (e.g. ErrorCodeFromTrapId()).
static Trap* GetInstance();
static void SigSysAction(int nr, siginfo_t* info, void* void_context);
// Make sure that SigSys is not inlined in order to get slightly better crash
// dumps.
void SigSys(int nr, siginfo_t* info, void* void_context)
__attribute__((noinline));
ErrorCode MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe);
bool SandboxDebuggingAllowedByUser() const;
// We have a global singleton that handles all of our SIGSYS traps. This
// variable must never be deallocated after it has been set up initially, as
// there is no way to reset in-kernel BPF filters that generate SIGSYS
// events.
static Trap* global_trap_;
TrapIds trap_ids_; // Maps from TrapKeys to numeric ids
ErrorCode* trap_array_; // Array of ErrorCodes indexed by ids
size_t trap_array_size_; // Currently used size of array
size_t trap_array_capacity_; // Currently allocated capacity of array
bool has_unsafe_traps_; // Whether unsafe traps have been enabled
// Our constructor is private. A shared global instance is created
// automatically as needed.
// Copying and assigning is unimplemented. It doesn't make sense for a
// singleton.
DISALLOW_IMPLICIT_CONSTRUCTORS(Trap);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__

Просмотреть файл

@ -0,0 +1,446 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
#include "sandbox/linux/seccomp-bpf/verifier.h"
namespace sandbox {
namespace {
struct State {
State(const std::vector<struct sock_filter>& p,
const struct arch_seccomp_data& d)
: program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
const std::vector<struct sock_filter>& program;
const struct arch_seccomp_data& data;
unsigned int ip;
uint32_t accumulator;
bool acc_is_valid;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(State);
};
uint32_t EvaluateErrorCode(SandboxBPF* sandbox,
const ErrorCode& code,
const struct arch_seccomp_data& data) {
if (code.error_type() == ErrorCode::ET_SIMPLE ||
code.error_type() == ErrorCode::ET_TRAP) {
return code.err();
} else if (code.error_type() == ErrorCode::ET_COND) {
if (code.width() == ErrorCode::TP_32BIT &&
(data.args[code.argno()] >> 32) &&
(data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
0xFFFFFFFF80000000ull) {
return sandbox->Unexpected64bitArgument().err();
}
switch (code.op()) {
case ErrorCode::OP_EQUAL:
return EvaluateErrorCode(sandbox,
(code.width() == ErrorCode::TP_32BIT
? uint32_t(data.args[code.argno()])
: data.args[code.argno()]) == code.value()
? *code.passed()
: *code.failed(),
data);
case ErrorCode::OP_HAS_ALL_BITS:
return EvaluateErrorCode(sandbox,
((code.width() == ErrorCode::TP_32BIT
? uint32_t(data.args[code.argno()])
: data.args[code.argno()]) &
code.value()) == code.value()
? *code.passed()
: *code.failed(),
data);
case ErrorCode::OP_HAS_ANY_BITS:
return EvaluateErrorCode(sandbox,
(code.width() == ErrorCode::TP_32BIT
? uint32_t(data.args[code.argno()])
: data.args[code.argno()]) &
code.value()
? *code.passed()
: *code.failed(),
data);
default:
return SECCOMP_RET_INVALID;
}
} else {
return SECCOMP_RET_INVALID;
}
}
bool VerifyErrorCode(SandboxBPF* sandbox,
const std::vector<struct sock_filter>& program,
struct arch_seccomp_data* data,
const ErrorCode& root_code,
const ErrorCode& code,
const char** err) {
if (code.error_type() == ErrorCode::ET_SIMPLE ||
code.error_type() == ErrorCode::ET_TRAP) {
uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
if (*err) {
return false;
} else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) {
// For efficiency's sake, we'd much rather compare "computed_ret"
// against "code.err()". This works most of the time, but it doesn't
// always work for nested conditional expressions. The test values
// that we generate on the fly to probe expressions can trigger
// code flow decisions in multiple nodes of the decision tree, and the
// only way to compute the correct error code in that situation is by
// calling EvaluateErrorCode().
*err = "Exit code from BPF program doesn't match";
return false;
}
} else if (code.error_type() == ErrorCode::ET_COND) {
if (code.argno() < 0 || code.argno() >= 6) {
*err = "Invalid argument number in error code";
return false;
}
switch (code.op()) {
case ErrorCode::OP_EQUAL:
// Verify that we can check a 32bit value (or the LSB of a 64bit value)
// for equality.
data->args[code.argno()] = code.value();
if (!VerifyErrorCode(
sandbox, program, data, root_code, *code.passed(), err)) {
return false;
}
// Change the value to no longer match and verify that this is detected
// as an inequality.
data->args[code.argno()] = code.value() ^ 0x55AA55AA;
if (!VerifyErrorCode(
sandbox, program, data, root_code, *code.failed(), err)) {
return false;
}
// BPF programs can only ever operate on 32bit values. So, we have
// generated additional BPF instructions that inspect the MSB. Verify
// that they behave as intended.
if (code.width() == ErrorCode::TP_32BIT) {
if (code.value() >> 32) {
SANDBOX_DIE(
"Invalid comparison of a 32bit system call argument "
"against a 64bit constant; this test is always false.");
}
// If the system call argument was intended to be a 32bit parameter,
// verify that it is a fatal error if a 64bit value is ever passed
// here.
data->args[code.argno()] = 0x100000000ull;
if (!VerifyErrorCode(sandbox,
program,
data,
root_code,
sandbox->Unexpected64bitArgument(),
err)) {
return false;
}
} else {
// If the system call argument was intended to be a 64bit parameter,
// verify that we can handle (in-)equality for the MSB. This is
// essentially the same test that we did earlier for the LSB.
// We only need to verify the behavior of the inequality test. We
// know that the equality test already passed, as unlike the kernel
// the Verifier does operate on 64bit quantities.
data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull;
if (!VerifyErrorCode(
sandbox, program, data, root_code, *code.failed(), err)) {
return false;
}
}
break;
case ErrorCode::OP_HAS_ALL_BITS:
case ErrorCode::OP_HAS_ANY_BITS:
// A comprehensive test of bit values is difficult and potentially
// rather
// time-expensive. We avoid doing so at run-time and instead rely on the
// unittest for full testing. The test that we have here covers just the
// common cases. We test against the bitmask itself, all zeros and all
// ones.
{
// Testing "any" bits against a zero mask is always false. So, there
// are some cases, where we expect tests to take the "failed()" branch
// even though this is a test that normally should take "passed()".
const ErrorCode& passed =
(!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) ||
// On a 32bit system, it is impossible to pass a 64bit
// value as a
// system call argument. So, some additional tests always
// evaluate
// as false.
((code.value() & ~uint64_t(uintptr_t(-1))) &&
code.op() == ErrorCode::OP_HAS_ALL_BITS) ||
(code.value() && !(code.value() & uintptr_t(-1)) &&
code.op() == ErrorCode::OP_HAS_ANY_BITS)
? *code.failed()
: *code.passed();
// Similary, testing for "all" bits in a zero mask is always true. So,
// some cases pass despite them normally failing.
const ErrorCode& failed =
!code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS
? *code.passed()
: *code.failed();
data->args[code.argno()] = code.value() & uintptr_t(-1);
if (!VerifyErrorCode(
sandbox, program, data, root_code, passed, err)) {
return false;
}
data->args[code.argno()] = uintptr_t(-1);
if (!VerifyErrorCode(
sandbox, program, data, root_code, passed, err)) {
return false;
}
data->args[code.argno()] = 0;
if (!VerifyErrorCode(
sandbox, program, data, root_code, failed, err)) {
return false;
}
}
break;
default: // TODO(markus): Need to add support for OP_GREATER
*err = "Unsupported operation in conditional error code";
return false;
}
} else {
*err = "Attempting to return invalid error code from BPF program";
return false;
}
return true;
}
void Ld(State* state, const struct sock_filter& insn, const char** err) {
if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
*err = "Invalid BPF_LD instruction";
return;
}
if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
// We only allow loading of properly aligned 32bit quantities.
memcpy(&state->accumulator,
reinterpret_cast<const char*>(&state->data) + insn.k,
4);
} else {
*err = "Invalid operand in BPF_LD instruction";
return;
}
state->acc_is_valid = true;
return;
}
void Jmp(State* state, const struct sock_filter& insn, const char** err) {
if (BPF_OP(insn.code) == BPF_JA) {
if (state->ip + insn.k + 1 >= state->program.size() ||
state->ip + insn.k + 1 <= state->ip) {
compilation_failure:
*err = "Invalid BPF_JMP instruction";
return;
}
state->ip += insn.k;
} else {
if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
state->ip + insn.jt + 1 >= state->program.size() ||
state->ip + insn.jf + 1 >= state->program.size()) {
goto compilation_failure;
}
switch (BPF_OP(insn.code)) {
case BPF_JEQ:
if (state->accumulator == insn.k) {
state->ip += insn.jt;
} else {
state->ip += insn.jf;
}
break;
case BPF_JGT:
if (state->accumulator > insn.k) {
state->ip += insn.jt;
} else {
state->ip += insn.jf;
}
break;
case BPF_JGE:
if (state->accumulator >= insn.k) {
state->ip += insn.jt;
} else {
state->ip += insn.jf;
}
break;
case BPF_JSET:
if (state->accumulator & insn.k) {
state->ip += insn.jt;
} else {
state->ip += insn.jf;
}
break;
default:
goto compilation_failure;
}
}
}
uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
if (BPF_SRC(insn.code) != BPF_K) {
*err = "Invalid BPF_RET instruction";
return 0;
}
return insn.k;
}
void Alu(State* state, const struct sock_filter& insn, const char** err) {
if (BPF_OP(insn.code) == BPF_NEG) {
state->accumulator = -state->accumulator;
return;
} else {
if (BPF_SRC(insn.code) != BPF_K) {
*err = "Unexpected source operand in arithmetic operation";
return;
}
switch (BPF_OP(insn.code)) {
case BPF_ADD:
state->accumulator += insn.k;
break;
case BPF_SUB:
state->accumulator -= insn.k;
break;
case BPF_MUL:
state->accumulator *= insn.k;
break;
case BPF_DIV:
if (!insn.k) {
*err = "Illegal division by zero";
break;
}
state->accumulator /= insn.k;
break;
case BPF_MOD:
if (!insn.k) {
*err = "Illegal division by zero";
break;
}
state->accumulator %= insn.k;
break;
case BPF_OR:
state->accumulator |= insn.k;
break;
case BPF_XOR:
state->accumulator ^= insn.k;
break;
case BPF_AND:
state->accumulator &= insn.k;
break;
case BPF_LSH:
if (insn.k > 32) {
*err = "Illegal shift operation";
break;
}
state->accumulator <<= insn.k;
break;
case BPF_RSH:
if (insn.k > 32) {
*err = "Illegal shift operation";
break;
}
state->accumulator >>= insn.k;
break;
default:
*err = "Invalid operator in arithmetic operation";
break;
}
}
}
} // namespace
bool Verifier::VerifyBPF(SandboxBPF* sandbox,
const std::vector<struct sock_filter>& program,
const SandboxBPFPolicy& policy,
const char** err) {
*err = NULL;
for (SyscallIterator iter(false); !iter.Done();) {
uint32_t sysnum = iter.Next();
// We ideally want to iterate over the full system call range and values
// just above and just below this range. This gives us the full result set
// of the "evaluators".
// On Intel systems, this can fail in a surprising way, as a cleared bit 30
// indicates either i386 or x86-64; and a set bit 30 indicates x32. And
// unless we pay attention to setting this bit correctly, an early check in
// our BPF program will make us fail with a misleading error code.
struct arch_seccomp_data data = {static_cast<int>(sysnum),
static_cast<uint32_t>(SECCOMP_ARCH)};
#if defined(__i386__) || defined(__x86_64__)
#if defined(__x86_64__) && defined(__ILP32__)
if (!(sysnum & 0x40000000u)) {
continue;
}
#else
if (sysnum & 0x40000000u) {
continue;
}
#endif
#endif
ErrorCode code = policy.EvaluateSyscall(sandbox, sysnum);
if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) {
return false;
}
}
return true;
}
uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
const struct arch_seccomp_data& data,
const char** err) {
*err = NULL;
if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
*err = "Invalid program length";
return 0;
}
for (State state(program, data); !*err; ++state.ip) {
if (state.ip >= program.size()) {
*err = "Invalid instruction pointer in BPF program";
break;
}
const struct sock_filter& insn = program[state.ip];
switch (BPF_CLASS(insn.code)) {
case BPF_LD:
Ld(&state, insn, err);
break;
case BPF_JMP:
Jmp(&state, insn, err);
break;
case BPF_RET: {
uint32_t r = Ret(&state, insn, err);
switch (r & SECCOMP_RET_ACTION) {
case SECCOMP_RET_TRAP:
case SECCOMP_RET_ERRNO:
case SECCOMP_RET_ALLOW:
break;
case SECCOMP_RET_KILL: // We don't ever generate this
case SECCOMP_RET_TRACE: // We don't ever generate this
case SECCOMP_RET_INVALID: // Should never show up in BPF program
default:
*err = "Unexpected return code found in BPF program";
return 0;
}
return r;
}
case BPF_ALU:
Alu(&state, insn, err);
break;
default:
*err = "Unexpected instruction in BPF program";
break;
}
}
return 0;
}
} // namespace sandbox

Просмотреть файл

@ -0,0 +1,49 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__
#define SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__
#include <linux/filter.h>
#include <utility>
#include <vector>
namespace sandbox {
class SandboxBPFPolicy;
class Verifier {
public:
// Evaluate the BPF program for all possible inputs and verify that it
// computes the correct result. We use the "evaluators" to determine
// the full set of possible inputs that we have to iterate over.
// Returns success, if the BPF filter accurately reflects the rules
// set by the "evaluators".
// Upon success, "err" is set to NULL. Upon failure, it contains a static
// error message that does not need to be free()'d.
static bool VerifyBPF(SandboxBPF* sandbox,
const std::vector<struct sock_filter>& program,
const SandboxBPFPolicy& policy,
const char** err);
// Evaluate a given BPF program for a particular set of system call
// parameters. If evaluation failed for any reason, "err" will be set to
// a non-NULL error string. Otherwise, the BPF program's result will be
// returned by the function and "err" is NULL.
// We do not actually implement the full BPF state machine, but only the
// parts that can actually be generated by our BPF compiler. If this code
// is used for purposes other than verifying the output of the sandbox's
// BPF compiler, we might have to extend this BPF interpreter.
static uint32_t EvaluateBPF(const std::vector<struct sock_filter>& program,
const struct arch_seccomp_data& data,
const char** err);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Verifier);
};
} // namespace sandbox
#endif // SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__