servo: Merge #5098 - Remove uint/int inside `components/util` (partial #4745) (from servo:int-util); r=Ms2ger

This leaves range.rs alone.

Source-Repo: https://github.com/servo/servo
Source-Revision: 07b918f731c0ff7c101f69195a4ff94b2f13acdc
This commit is contained in:
Alexandru Cojocaru 2015-02-28 07:21:53 -07:00
Родитель 50d5ff0835
Коммит 68cf9c0af2
11 изменённых файлов: 69 добавлений и 69 удалений

Просмотреть файл

@ -59,7 +59,7 @@ impl<K, V> HashCache<K,V>
#[test]
fn test_hashcache() {
let mut cache: HashCache<uint, Cell<&str>> = HashCache::new();
let mut cache: HashCache<usize, Cell<&str>> = HashCache::new();
cache.insert(1, Cell::new("one"));
assert!(cache.find(&1).is_some());
@ -72,11 +72,11 @@ fn test_hashcache() {
pub struct LRUCache<K, V> {
entries: Vec<(K, V)>,
cache_size: uint,
cache_size: usize,
}
impl<K: Clone + PartialEq, V: Clone> LRUCache<K,V> {
pub fn new(size: uint) -> LRUCache<K, V> {
pub fn new(size: usize) -> LRUCache<K, V> {
LRUCache {
entries: vec!(),
cache_size: size,
@ -84,7 +84,7 @@ impl<K: Clone + PartialEq, V: Clone> LRUCache<K,V> {
}
#[inline]
pub fn touch(&mut self, pos: uint) -> V {
pub fn touch(&mut self, pos: usize) -> V {
let last_index = self.entries.len() - 1;
if pos != last_index {
let entry = self.entries.remove(pos);
@ -134,7 +134,7 @@ pub struct SimpleHashCache<K,V> {
}
impl<K:Clone+Eq+Hash<SipHasher>,V:Clone> SimpleHashCache<K,V> {
pub fn new(cache_size: uint) -> SimpleHashCache<K,V> {
pub fn new(cache_size: usize) -> SimpleHashCache<K,V> {
let mut r = rand::thread_rng();
SimpleHashCache {
entries: repeat(None).take(cache_size).collect(),
@ -144,15 +144,15 @@ impl<K:Clone+Eq+Hash<SipHasher>,V:Clone> SimpleHashCache<K,V> {
}
#[inline]
fn to_bucket(&self, h: uint) -> uint {
fn to_bucket(&self, h: usize) -> usize {
h % self.entries.len()
}
#[inline]
fn bucket_for_key<Q:Hash<SipHasher>>(&self, key: &Q) -> uint {
fn bucket_for_key<Q:Hash<SipHasher>>(&self, key: &Q) -> usize {
let mut hasher = SipHasher::new_with_keys(self.k0, self.k1);
key.hash(&mut hasher);
self.to_bucket(hasher.finish() as uint)
self.to_bucket(hasher.finish() as usize)
}
pub fn insert(&mut self, key: K, value: V) {

Просмотреть файл

@ -12,7 +12,7 @@ fn hexdump_slice(buf: &[u8]) {
let mut stderr = io::stderr();
stderr.write_all(b" ").unwrap();
for (i, &v) in buf.iter().enumerate() {
let output = format!("{:02X} ", v as uint);
let output = format!("{:02X} ", v);
stderr.write_all(output.as_bytes()).unwrap();
match i % 16 {
15 => { stderr.write_all(b"\n ").unwrap(); },

Просмотреть файл

@ -57,23 +57,23 @@ use std::mem::{forget, min_align_of, size_of, transmute};
use std::ptr;
use std::sync::Mutex;
use std::sync::atomic::{AtomicInt, AtomicPtr};
use std::sync::atomic::{AtomicIsize, AtomicPtr};
use std::sync::atomic::Ordering::SeqCst;
// Once the queue is less than 1/K full, then it will be downsized. Note that
// the deque requires that this number be less than 2.
static K: int = 4;
static K: isize = 4;
// Minimum number of bits that a buffer size should be. No buffer will resize to
// under this value, and all deques will initially contain a buffer of this
// size.
//
// The size in question is 1 << MIN_BITS
static MIN_BITS: uint = 7;
static MIN_BITS: usize = 7;
struct Deque<T> {
bottom: AtomicInt,
top: AtomicInt,
bottom: AtomicIsize,
top: AtomicIsize,
array: AtomicPtr<Buffer<T>>,
pool: BufferPool<T>,
}
@ -139,7 +139,7 @@ pub struct BufferPool<T> {
/// LLVM is probably pretty good at doing this already.
struct Buffer<T> {
storage: *const T,
log_size: uint,
log_size: usize,
}
unsafe impl<T: 'static> Send for Buffer<T> { }
@ -159,7 +159,7 @@ impl<T: Send> BufferPool<T> {
(Worker { deque: a }, Stealer { deque: b })
}
fn alloc(&mut self, bits: uint) -> Box<Buffer<T>> {
fn alloc(&mut self, bits: usize) -> Box<Buffer<T>> {
unsafe {
let mut pool = self.pool.lock().unwrap();
match pool.iter().position(|x| x.size() >= (1 << bits)) {
@ -228,8 +228,8 @@ impl<T: Send> Deque<T> {
fn new(mut pool: BufferPool<T>) -> Deque<T> {
let buf = pool.alloc(MIN_BITS);
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
bottom: AtomicIsize::new(0),
top: AtomicIsize::new(0),
array: AtomicPtr::new(unsafe { transmute(buf) }),
pool: pool,
}
@ -299,7 +299,7 @@ impl<T: Send> Deque<T> {
}
}
unsafe fn maybe_shrink(&self, b: int, t: int) {
unsafe fn maybe_shrink(&self, b: isize, t: isize) {
let a = self.array.load(SeqCst);
if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) {
self.swap_buffer(b, a, (*a).resize(b, t, -1));
@ -313,7 +313,7 @@ impl<T: Send> Deque<T> {
// after this method has called 'free' on it. The continued usage is simply
// a read followed by a forget, but we must make sure that the memory can
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&self, b: int, old: *mut Buffer<T>,
unsafe fn swap_buffer(&self, b: isize, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = transmute(box buf);
self.array.store(newbuf, SeqCst);
@ -345,12 +345,12 @@ impl<T: Send> Drop for Deque<T> {
}
#[inline]
fn buffer_alloc_size<T>(log_size: uint) -> uint {
fn buffer_alloc_size<T>(log_size: usize) -> usize {
(1 << log_size) * size_of::<T>()
}
impl<T: Send> Buffer<T> {
unsafe fn new(log_size: uint) -> Buffer<T> {
unsafe fn new(log_size: usize) -> Buffer<T> {
let size = buffer_alloc_size::<T>(log_size);
let buffer = allocate(size, min_align_of::<T>());
if buffer.is_null() { ::alloc::oom() }
@ -360,12 +360,12 @@ impl<T: Send> Buffer<T> {
}
}
fn size(&self) -> int { 1 << self.log_size }
fn size(&self) -> isize { 1 << self.log_size }
// Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly
fn mask(&self) -> int { (1 << self.log_size) - 1 }
fn mask(&self) -> isize { (1 << self.log_size) - 1 }
unsafe fn elem(&self, i: int) -> *const T {
unsafe fn elem(&self, i: isize) -> *const T {
self.storage.offset(i & self.mask())
}
@ -373,23 +373,23 @@ impl<T: Send> Buffer<T> {
// nor does this clear out the contents contained within. Hence, this is a
// very unsafe method which the caller needs to treat specially in case a
// race is lost.
unsafe fn get(&self, i: int) -> T {
unsafe fn get(&self, i: isize) -> T {
ptr::read(self.elem(i))
}
// Unsafe because this unsafely overwrites possibly uninitialized or
// initialized data.
unsafe fn put(&self, i: int, t: T) {
unsafe fn put(&self, i: isize, t: T) {
ptr::write(self.elem(i) as *mut T, t);
}
// Again, unsafe because this has incredibly dubious ownership violations.
// It is assumed that this buffer is immediately dropped.
unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer<T> {
unsafe fn resize(&self, b: isize, t: isize, delta: isize) -> Buffer<T> {
// NB: not entirely obvious, but thanks to 2's complement,
// casting delta to uint and then adding gives the desired
// casting delta to usize and then adding gives the desired
// effect.
let buf = Buffer::new(self.log_size + delta as uint);
let buf = Buffer::new(self.log_size + delta as usize);
for i in range(t, b) {
buf.put(i, self.get(i));
}

Просмотреть файл

@ -236,7 +236,7 @@ impl Au {
}
#[inline]
pub fn from_px(px: int) -> Au {
pub fn from_px(px: isize) -> Au {
NumCast::from(px * 60).unwrap()
}
@ -246,9 +246,9 @@ impl Au {
}
#[inline]
pub fn to_nearest_px(&self) -> int {
pub fn to_nearest_px(&self) -> isize {
let Au(s) = *self;
((s as f64) / 60f64).round() as int
((s as f64) / 60f64).round() as isize
}
#[inline]
@ -309,13 +309,13 @@ pub fn from_frac_px(px: f64) -> Au {
Au((px * 60f64) as i32)
}
pub fn from_px(px: int) -> Au {
pub fn from_px(px: isize) -> Au {
NumCast::from(px * 60).unwrap()
}
pub fn to_px(au: Au) -> int {
pub fn to_px(au: Au) -> isize {
let Au(a) = au;
(a / 60) as int
(a / 60) as isize
}
pub fn to_frac_px(au: Au) -> f64 {
@ -325,7 +325,7 @@ pub fn to_frac_px(au: Au) -> f64 {
// assumes 72 points per inch, and 96 px per inch
pub fn from_pt(pt: f64) -> Au {
from_px((pt / 72f64 * 96f64) as int)
from_px((pt / 72f64 * 96f64) as isize)
}
// assumes 72 points per inch, and 96 px per inch

Просмотреть файл

@ -241,7 +241,7 @@ macro_rules! option_try(
);
#[cfg(target_os="linux")]
fn get_proc_self_statm_field(field: uint) -> Option<u64> {
fn get_proc_self_statm_field(field: usize) -> Option<u64> {
let mut f = File::open(&Path::new("/proc/self/statm"));
match f.read_to_string() {
Ok(contents) => {

Просмотреть файл

@ -28,14 +28,14 @@ pub struct Opts {
/// How many threads to use for CPU painting (`-t`).
///
/// Note that painting is sequentialized when using GPU painting.
pub paint_threads: uint,
pub paint_threads: usize,
/// True to use GPU painting via Skia-GL, false to use CPU painting via Skia (`-g`). Note that
/// compositing is always done on the GPU.
pub gpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
pub tile_size: uint,
pub tile_size: usize,
/// The ratio of device pixels per px at the default scale. If unspecified, will use the
/// platform default setting.
@ -54,7 +54,7 @@ pub struct Opts {
/// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive
/// sequential algorithm.
pub layout_threads: uint,
pub layout_threads: usize,
pub nonincremental_layout: bool,
@ -102,7 +102,7 @@ pub struct Opts {
pub devtools_port: Option<u16>,
/// The initial requested size of the window.
pub initial_window_size: TypedSize2D<ScreenPx, uint>,
pub initial_window_size: TypedSize2D<ScreenPx, u32>,
/// An optional string allowing the user agent to be set for testing.
pub user_agent: Option<String>,
@ -256,7 +256,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool {
opt_match.free.clone()
};
let tile_size: uint = match opt_match.opt_str("s") {
let tile_size: usize = match opt_match.opt_str("s") {
Some(tile_size_str) => tile_size_str.parse().unwrap(),
None => 512,
};
@ -265,7 +265,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool {
ScaleFactor(dppx_str.parse().unwrap())
);
let mut paint_threads: uint = match opt_match.opt_str("t") {
let mut paint_threads: usize = match opt_match.opt_str("t") {
Some(paint_threads_str) => paint_threads_str.parse().unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
@ -280,7 +280,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool {
let gpu_painting = !FORCE_CPU_PAINTING && opt_match.opt_present("g");
let mut layout_threads: uint = match opt_match.opt_str("y") {
let mut layout_threads: usize = match opt_match.opt_str("y") {
Some(layout_threads_str) => layout_threads_str.parse().unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
@ -301,7 +301,7 @@ pub fn from_cmdline_args(args: &[String]) -> bool {
let initial_window_size = match opt_match.opt_str("resolution") {
Some(res_string) => {
let res: Vec<uint> = res_string.split('x').map(|r| r.parse().unwrap()).collect();
let res: Vec<u32> = res_string.split('x').map(|r| r.parse().unwrap()).collect();
TypedSize2D(res[0], res[1])
}
None => {

Просмотреть файл

@ -9,7 +9,7 @@ use std::sync::Arc;
pub struct PersistentList<T> {
head: PersistentListLink<T>,
length: uint,
length: usize,
}
struct PersistentListEntry<T> {
@ -29,7 +29,7 @@ impl<T> PersistentList<T> where T: Send + Sync {
}
#[inline]
pub fn len(&self) -> uint {
pub fn len(&self) -> usize {
self.length
}

Просмотреть файл

@ -25,7 +25,7 @@ pub struct TaskPool {
}
impl TaskPool {
pub fn new(tasks: uint) -> TaskPool {
pub fn new(tasks: u32) -> TaskPool {
assert!(tasks > 0);
let (tx, rx) = channel();

Просмотреть файл

@ -8,10 +8,10 @@ use std::cell::RefCell;
static mut next_tid: AtomicUsize = ATOMIC_USIZE_INIT;
thread_local!(static TASK_LOCAL_TID: Rc<RefCell<Option<uint>>> = Rc::new(RefCell::new(None)));
thread_local!(static TASK_LOCAL_TID: Rc<RefCell<Option<usize>>> = Rc::new(RefCell::new(None)));
/// Every task gets one, that's unique.
pub fn tid() -> uint {
pub fn tid() -> usize {
TASK_LOCAL_TID.with(|ref k| {
let ret =
match *k.borrow() {

Просмотреть файл

@ -16,11 +16,11 @@ pub trait Comparator<K,T> {
pub trait BinarySearchMethods<'a, T: Ord + PartialOrd + PartialEq> {
fn binary_search_(&self, key: &T) -> Option<&'a T>;
fn binary_search_index(&self, key: &T) -> Option<uint>;
fn binary_search_index(&self, key: &T) -> Option<usize>;
}
pub trait FullBinarySearchMethods<T> {
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<uint>;
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<usize>;
}
impl<'a, T: Ord + PartialOrd + PartialEq> BinarySearchMethods<'a, T> for &'a [T] {
@ -28,28 +28,28 @@ impl<'a, T: Ord + PartialOrd + PartialEq> BinarySearchMethods<'a, T> for &'a [T]
self.binary_search_index(key).map(|i| &self[i])
}
fn binary_search_index(&self, key: &T) -> Option<uint> {
fn binary_search_index(&self, key: &T) -> Option<usize> {
self.binary_search_index_by(key, DefaultComparator)
}
}
impl<'a, T> FullBinarySearchMethods<T> for &'a [T] {
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<uint> {
fn binary_search_index_by<K,C:Comparator<K,T>>(&self, key: &K, cmp: C) -> Option<usize> {
if self.len() == 0 {
return None;
}
let mut low : int = 0;
let mut high : int = (self.len() as int) - 1;
let mut low : isize = 0;
let mut high : isize = (self.len() as isize) - 1;
while low <= high {
// http://googleresearch.blogspot.com/2006/06/extra-extra-read-all-about-it-nearly.html
let mid = ((low as uint) + (high as uint)) >> 1;
let mid = ((low as usize) + (high as usize)) >> 1;
let midv = &self[mid];
match cmp.compare(key, midv) {
Ordering::Greater => low = (mid as int) + 1,
Ordering::Less => high = (mid as int) - 1,
Ordering::Greater => low = (mid as isize) + 1,
Ordering::Less => high = (mid as isize) - 1,
Ordering::Equal => return Some(mid),
}
}

Просмотреть файл

@ -13,7 +13,7 @@ use task_state;
use libc::funcs::posix88::unistd::usleep;
use std::mem;
use rand::{Rng, weak_rng, XorShiftRng};
use std::sync::atomic::{AtomicUint, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Sender, Receiver};
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
@ -33,7 +33,7 @@ pub struct WorkUnit<QueueData, WorkData> {
/// Messages from the supervisor to the worker.
enum WorkerMsg<QueueData: 'static, WorkData: 'static> {
/// Tells the worker to start work.
Start(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUint, *const QueueData),
Start(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUsize, *const QueueData),
/// Tells the worker to stop. It can be restarted again with a `WorkerMsg::Start`.
Stop,
/// Tells the worker thread to terminate.
@ -45,7 +45,7 @@ unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerMsg<QueueData,
/// Messages to the supervisor.
enum SupervisorMsg<QueueData: 'static, WorkData: 'static> {
Finished,
ReturnDeque(uint, Worker<WorkUnit<QueueData, WorkData>>),
ReturnDeque(usize, Worker<WorkUnit<QueueData, WorkData>>),
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for SupervisorMsg<QueueData, WorkData> {}
@ -63,7 +63,7 @@ struct WorkerInfo<QueueData: 'static, WorkData: 'static> {
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QueueData: 'static, WorkData: 'static> {
/// The index of this worker.
index: uint,
index: usize,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QueueData, WorkData>>,
/// The communication channel on which messages are sent to the supervisor.
@ -110,7 +110,7 @@ impl<QueueData: Send, WorkData: Send> WorkerThread<QueueData, WorkData> {
let mut i = 0;
let mut should_continue = true;
loop {
let victim = (self.rng.next_u32() as uint) % self.other_deques.len();
let victim = (self.rng.next_u32() as usize) % self.other_deques.len();
match self.other_deques[victim].steal() {
Empty | Abort => {
// Continue.
@ -179,7 +179,7 @@ impl<QueueData: Send, WorkData: Send> WorkerThread<QueueData, WorkData> {
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a, QueueData: 'a, WorkData: 'a> {
worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>,
ref_count: *mut AtomicUint,
ref_count: *mut AtomicUsize,
queue_data: *const QueueData,
worker_index: u8,
}
@ -216,7 +216,7 @@ pub struct WorkQueue<QueueData: 'static, WorkData: 'static> {
/// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: uint,
work_count: usize,
/// Arbitrary user data.
pub data: QueueData,
}
@ -226,7 +226,7 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// it.
pub fn new(task_name: &'static str,
state: task_state::TaskState,
thread_count: uint,
thread_count: usize,
user_data: QueueData) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
@ -295,7 +295,7 @@ impl<QueueData: Send, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self) {
// Tell the workers to start.
let mut work_count = AtomicUint::new(self.work_count);
let mut work_count = AtomicUsize::new(self.work_count);
for worker in self.workers.iter_mut() {
worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(), &mut work_count, &self.data)).unwrap()
}