2012-07-04 22:53:12 +01:00
|
|
|
//! Unsafe operations
|
2011-12-13 16:25:51 -08:00
|
|
|
|
2012-06-27 13:11:57 -04:00
|
|
|
export reinterpret_cast, forget, bump_box_refcount, transmute;
|
2012-08-14 13:32:41 -04:00
|
|
|
export transmute_mut, transmute_immut, transmute_region, transmute_mut_region;
|
2012-01-17 17:28:21 -08:00
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
export SharedMutableState, shared_mutable_state, clone_shared_mutable_state;
|
2012-08-10 18:20:03 -04:00
|
|
|
export get_shared_mutable_state, get_shared_immutable_state;
|
2012-08-13 16:20:27 -07:00
|
|
|
export Exclusive, exclusive;
|
2012-08-10 18:20:03 -04:00
|
|
|
|
|
|
|
|
import task::atomically;
|
|
|
|
|
|
2012-03-23 15:05:16 +01:00
|
|
|
#[abi = "rust-intrinsic"]
|
2012-07-03 16:11:00 -07:00
|
|
|
extern mod rusti {
|
2012-03-22 12:30:10 +01:00
|
|
|
fn forget<T>(-x: T);
|
|
|
|
|
fn reinterpret_cast<T, U>(e: T) -> U;
|
2011-12-13 16:25:51 -08:00
|
|
|
}
|
|
|
|
|
|
2012-07-04 22:53:12 +01:00
|
|
|
/// Casts the value at `src` to U. The two types must have the same length.
|
2012-03-06 11:20:43 -08:00
|
|
|
#[inline(always)]
|
2011-12-13 16:25:51 -08:00
|
|
|
unsafe fn reinterpret_cast<T, U>(src: T) -> U {
|
2012-03-22 12:30:10 +01:00
|
|
|
rusti::reinterpret_cast(src)
|
2011-12-13 16:25:51 -08:00
|
|
|
}
|
|
|
|
|
|
2012-07-04 22:53:12 +01:00
|
|
|
/**
|
|
|
|
|
* Move a thing into the void
|
|
|
|
|
*
|
|
|
|
|
* The forget function will take ownership of the provided value but neglect
|
|
|
|
|
* to run any required cleanup or memory-management operations on it. This
|
|
|
|
|
* can be used for various acts of magick, particularly when using
|
|
|
|
|
* reinterpret_cast on managed pointer types.
|
|
|
|
|
*/
|
2012-03-06 11:20:43 -08:00
|
|
|
#[inline(always)]
|
2012-03-22 12:30:10 +01:00
|
|
|
unsafe fn forget<T>(-thing: T) { rusti::forget(thing); }
|
2012-01-17 17:28:21 -08:00
|
|
|
|
2012-07-04 22:53:12 +01:00
|
|
|
/**
|
|
|
|
|
* Force-increment the reference count on a shared box. If used
|
|
|
|
|
* uncarefully, this can leak the box. Use this in conjunction with transmute
|
|
|
|
|
* and/or reinterpret_cast when such calls would otherwise scramble a box's
|
|
|
|
|
* reference count
|
|
|
|
|
*/
|
2012-06-27 13:11:57 -04:00
|
|
|
unsafe fn bump_box_refcount<T>(+t: @T) { forget(t); }
|
|
|
|
|
|
2012-07-04 22:53:12 +01:00
|
|
|
/**
|
|
|
|
|
* Transform a value of one type into a value of another type.
|
|
|
|
|
* Both types must have the same size and alignment.
|
|
|
|
|
*
|
|
|
|
|
* # Example
|
|
|
|
|
*
|
2012-07-11 16:49:02 -07:00
|
|
|
* assert transmute("L") == ~[76u8, 0u8];
|
2012-07-04 22:53:12 +01:00
|
|
|
*/
|
2012-06-07 23:36:34 -07:00
|
|
|
unsafe fn transmute<L, G>(-thing: L) -> G {
|
|
|
|
|
let newthing = reinterpret_cast(thing);
|
|
|
|
|
forget(thing);
|
2012-08-01 17:30:05 -07:00
|
|
|
return newthing;
|
2012-06-07 23:36:34 -07:00
|
|
|
}
|
|
|
|
|
|
2012-08-14 13:32:41 -04:00
|
|
|
/// Coerce an immutable reference to be mutable.
|
|
|
|
|
unsafe fn transmute_mut<T>(+ptr: &T) -> &mut T { transmute(ptr) }
|
|
|
|
|
/// Coerce a mutable reference to be immutable.
|
|
|
|
|
unsafe fn transmute_immut<T>(+ptr: &mut T) -> &T { transmute(ptr) }
|
|
|
|
|
/// Coerce a borrowed pointer to have an arbitrary associated region.
|
|
|
|
|
unsafe fn transmute_region<T>(+ptr: &a/T) -> &b/T { transmute(ptr) }
|
|
|
|
|
/// Coerce a borrowed mutable pointer to have an arbitrary associated region.
|
|
|
|
|
unsafe fn transmute_mut_region<T>(+ptr: &a/mut T) -> &b/mut T {
|
|
|
|
|
transmute(ptr)
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-10 18:20:03 -04:00
|
|
|
/****************************************************************************
|
|
|
|
|
* Shared state & exclusive ARC
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
type ArcData<T> = {
|
2012-08-10 18:20:03 -04:00
|
|
|
mut count: libc::intptr_t,
|
|
|
|
|
data: T
|
|
|
|
|
};
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
class ArcDestruct<T> {
|
2012-08-10 18:20:03 -04:00
|
|
|
let data: *libc::c_void;
|
|
|
|
|
new(data: *libc::c_void) { self.data = data; }
|
|
|
|
|
drop unsafe {
|
2012-08-13 16:20:27 -07:00
|
|
|
let data: ~ArcData<T> = unsafe::reinterpret_cast(self.data);
|
2012-08-10 18:20:03 -04:00
|
|
|
let new_count = rustrt::rust_atomic_decrement(&mut data.count);
|
|
|
|
|
assert new_count >= 0;
|
|
|
|
|
if new_count == 0 {
|
|
|
|
|
// drop glue takes over.
|
|
|
|
|
} else {
|
|
|
|
|
unsafe::forget(data);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc.
|
|
|
|
|
*
|
|
|
|
|
* Data races between tasks can result in crashes and, with sufficient
|
|
|
|
|
* cleverness, arbitrary type coercion.
|
|
|
|
|
*/
|
2012-08-13 16:20:27 -07:00
|
|
|
type SharedMutableState<T: send> = ArcDestruct<T>;
|
2012-08-10 18:20:03 -04:00
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
unsafe fn shared_mutable_state<T: send>(+data: T) -> SharedMutableState<T> {
|
2012-08-10 18:20:03 -04:00
|
|
|
let data = ~{mut count: 1, data: data};
|
|
|
|
|
unsafe {
|
|
|
|
|
let ptr = unsafe::transmute(data);
|
2012-08-13 16:20:27 -07:00
|
|
|
ArcDestruct(ptr)
|
2012-08-10 18:20:03 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-14 20:36:36 -04:00
|
|
|
#[inline(always)]
|
2012-08-13 16:20:27 -07:00
|
|
|
unsafe fn get_shared_mutable_state<T: send>(rc: &SharedMutableState<T>)
|
2012-08-10 18:20:03 -04:00
|
|
|
-> &mut T {
|
|
|
|
|
unsafe {
|
2012-08-13 16:20:27 -07:00
|
|
|
let ptr: ~ArcData<T> = unsafe::reinterpret_cast((*rc).data);
|
2012-08-10 18:20:03 -04:00
|
|
|
assert ptr.count > 0;
|
|
|
|
|
// Cast us back into the correct region
|
|
|
|
|
let r = unsafe::reinterpret_cast(&ptr.data);
|
|
|
|
|
unsafe::forget(ptr);
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-08-14 20:36:36 -04:00
|
|
|
#[inline(always)]
|
2012-08-13 16:20:27 -07:00
|
|
|
unsafe fn get_shared_immutable_state<T: send>(rc: &SharedMutableState<T>)
|
2012-08-10 18:20:03 -04:00
|
|
|
-> &T {
|
|
|
|
|
unsafe {
|
2012-08-13 16:20:27 -07:00
|
|
|
let ptr: ~ArcData<T> = unsafe::reinterpret_cast((*rc).data);
|
2012-08-10 18:20:03 -04:00
|
|
|
assert ptr.count > 0;
|
|
|
|
|
// Cast us back into the correct region
|
|
|
|
|
let r = unsafe::reinterpret_cast(&ptr.data);
|
|
|
|
|
unsafe::forget(ptr);
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
unsafe fn clone_shared_mutable_state<T: send>(rc: &SharedMutableState<T>)
|
|
|
|
|
-> SharedMutableState<T> {
|
2012-08-10 18:20:03 -04:00
|
|
|
unsafe {
|
2012-08-13 16:20:27 -07:00
|
|
|
let ptr: ~ArcData<T> = unsafe::reinterpret_cast((*rc).data);
|
2012-08-10 18:20:03 -04:00
|
|
|
let new_count = rustrt::rust_atomic_increment(&mut ptr.count);
|
|
|
|
|
assert new_count >= 2;
|
|
|
|
|
unsafe::forget(ptr);
|
|
|
|
|
}
|
2012-08-13 16:20:27 -07:00
|
|
|
ArcDestruct((*rc).data)
|
2012-08-10 18:20:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/****************************************************************************/
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
#[allow(non_camel_case_types)] // runtime type
|
2012-08-10 18:20:03 -04:00
|
|
|
type rust_little_lock = *libc::c_void;
|
|
|
|
|
|
|
|
|
|
#[abi = "cdecl"]
|
|
|
|
|
extern mod rustrt {
|
|
|
|
|
#[rust_stack]
|
|
|
|
|
fn rust_atomic_increment(p: &mut libc::intptr_t)
|
|
|
|
|
-> libc::intptr_t;
|
|
|
|
|
|
|
|
|
|
#[rust_stack]
|
|
|
|
|
fn rust_atomic_decrement(p: &mut libc::intptr_t)
|
|
|
|
|
-> libc::intptr_t;
|
|
|
|
|
|
|
|
|
|
fn rust_create_little_lock() -> rust_little_lock;
|
|
|
|
|
fn rust_destroy_little_lock(lock: rust_little_lock);
|
|
|
|
|
fn rust_lock_little_lock(lock: rust_little_lock);
|
|
|
|
|
fn rust_unlock_little_lock(lock: rust_little_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
class LittleLock {
|
2012-08-10 18:20:03 -04:00
|
|
|
let l: rust_little_lock;
|
|
|
|
|
new() {
|
|
|
|
|
self.l = rustrt::rust_create_little_lock();
|
|
|
|
|
}
|
|
|
|
|
drop { rustrt::rust_destroy_little_lock(self.l); }
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
impl LittleLock {
|
2012-08-14 20:36:36 -04:00
|
|
|
#[inline(always)]
|
2012-08-10 18:20:03 -04:00
|
|
|
unsafe fn lock<T>(f: fn() -> T) -> T {
|
2012-08-13 16:20:27 -07:00
|
|
|
class Unlock {
|
2012-08-10 18:20:03 -04:00
|
|
|
let l: rust_little_lock;
|
|
|
|
|
new(l: rust_little_lock) { self.l = l; }
|
|
|
|
|
drop { rustrt::rust_unlock_little_lock(self.l); }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
do atomically {
|
|
|
|
|
rustrt::rust_lock_little_lock(self.l);
|
2012-08-13 16:20:27 -07:00
|
|
|
let _r = Unlock(self.l);
|
2012-08-10 18:20:03 -04:00
|
|
|
f()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
struct ExData<T: send> { lock: LittleLock; mut failed: bool; mut data: T; }
|
2012-08-10 18:20:03 -04:00
|
|
|
/**
|
|
|
|
|
* An arc over mutable data that is protected by a lock. For library use only.
|
|
|
|
|
*/
|
2012-08-13 16:20:27 -07:00
|
|
|
struct Exclusive<T: send> { x: SharedMutableState<ExData<T>>; }
|
2012-08-10 18:20:03 -04:00
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
fn exclusive<T:send >(+user_data: T) -> Exclusive<T> {
|
|
|
|
|
let data = ExData {
|
|
|
|
|
lock: LittleLock(), mut failed: false, mut data: user_data
|
2012-08-10 18:20:03 -04:00
|
|
|
};
|
2012-08-13 16:20:27 -07:00
|
|
|
Exclusive { x: unsafe { shared_mutable_state(data) } }
|
2012-08-10 18:20:03 -04:00
|
|
|
}
|
|
|
|
|
|
2012-08-13 16:20:27 -07:00
|
|
|
impl<T: send> Exclusive<T> {
|
2012-08-10 18:20:03 -04:00
|
|
|
// Duplicate an exclusive ARC, as std::arc::clone.
|
2012-08-13 16:20:27 -07:00
|
|
|
fn clone() -> Exclusive<T> {
|
|
|
|
|
Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } }
|
2012-08-10 18:20:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Exactly like std::arc::mutex_arc,access(), but with the little_lock
|
|
|
|
|
// instead of a proper mutex. Same reason for being unsafe.
|
|
|
|
|
//
|
|
|
|
|
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
|
|
|
|
|
// accessing the provided condition variable) are prohibited while inside
|
|
|
|
|
// the exclusive. Supporting that is a work in progress.
|
2012-08-14 20:36:36 -04:00
|
|
|
#[inline(always)]
|
2012-08-10 18:20:03 -04:00
|
|
|
unsafe fn with<U>(f: fn(x: &mut T) -> U) -> U {
|
|
|
|
|
let rec = unsafe { get_shared_mutable_state(&self.x) };
|
|
|
|
|
do rec.lock.lock {
|
|
|
|
|
if rec.failed {
|
|
|
|
|
fail ~"Poisoned exclusive - another task failed inside!";
|
|
|
|
|
}
|
|
|
|
|
rec.failed = true;
|
|
|
|
|
let result = f(&mut rec.data);
|
|
|
|
|
rec.failed = false;
|
|
|
|
|
result
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
|
* Tests
|
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
2012-01-17 17:28:21 -08:00
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
|
|
|
|
|
#[test]
|
2012-06-24 20:18:18 -07:00
|
|
|
fn test_reinterpret_cast() {
|
|
|
|
|
assert unsafe { reinterpret_cast(1) } == 1u;
|
2012-01-17 17:28:21 -08:00
|
|
|
}
|
2012-06-07 23:36:34 -07:00
|
|
|
|
2012-06-27 13:11:57 -04:00
|
|
|
#[test]
|
|
|
|
|
fn test_bump_box_refcount() {
|
|
|
|
|
unsafe {
|
2012-07-13 22:57:48 -07:00
|
|
|
let box = @~"box box box"; // refcount 1
|
2012-06-27 13:11:57 -04:00
|
|
|
bump_box_refcount(box); // refcount 2
|
|
|
|
|
let ptr: *int = transmute(box); // refcount 2
|
2012-07-13 22:57:48 -07:00
|
|
|
let _box1: @~str = reinterpret_cast(ptr);
|
|
|
|
|
let _box2: @~str = reinterpret_cast(ptr);
|
|
|
|
|
assert *_box1 == ~"box box box";
|
|
|
|
|
assert *_box2 == ~"box box box";
|
2012-06-27 13:11:57 -04:00
|
|
|
// Will destroy _box1 and _box2. Without the bump, this would
|
|
|
|
|
// use-after-free. With too many bumps, it would leak.
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-06-07 23:36:34 -07:00
|
|
|
#[test]
|
2012-06-24 20:18:18 -07:00
|
|
|
fn test_transmute() {
|
|
|
|
|
unsafe {
|
|
|
|
|
let x = @1;
|
|
|
|
|
let x: *int = transmute(x);
|
|
|
|
|
assert *x == 1;
|
|
|
|
|
let _x: @int = transmute(x);
|
|
|
|
|
}
|
2012-06-07 23:36:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2012-06-24 20:18:18 -07:00
|
|
|
fn test_transmute2() {
|
|
|
|
|
unsafe {
|
2012-07-13 22:57:48 -07:00
|
|
|
assert transmute(~"L") == ~[76u8, 0u8];
|
2012-06-24 20:18:18 -07:00
|
|
|
}
|
2012-06-07 23:36:34 -07:00
|
|
|
}
|
2012-08-10 18:20:03 -04:00
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn exclusive_arc() {
|
|
|
|
|
let mut futures = ~[];
|
|
|
|
|
|
|
|
|
|
let num_tasks = 10u;
|
|
|
|
|
let count = 10u;
|
|
|
|
|
|
|
|
|
|
let total = exclusive(~mut 0u);
|
|
|
|
|
|
|
|
|
|
for uint::range(0u, num_tasks) |_i| {
|
|
|
|
|
let total = total.clone();
|
|
|
|
|
futures += ~[future::spawn(|| {
|
|
|
|
|
for uint::range(0u, count) |_i| {
|
|
|
|
|
do total.with |count| {
|
|
|
|
|
**count += 1u;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for futures.each |f| { f.get() }
|
|
|
|
|
|
|
|
|
|
do total.with |total| {
|
|
|
|
|
assert **total == num_tasks * count
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test] #[should_fail] #[ignore(cfg(windows))]
|
|
|
|
|
fn exclusive_poison() {
|
|
|
|
|
// Tests that if one task fails inside of an exclusive, subsequent
|
|
|
|
|
// accesses will also fail.
|
|
|
|
|
let x = exclusive(1);
|
|
|
|
|
let x2 = x.clone();
|
|
|
|
|
do task::try {
|
|
|
|
|
do x2.with |one| {
|
|
|
|
|
assert *one == 2;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
do x.with |one| {
|
|
|
|
|
assert *one == 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-01-17 17:28:21 -08:00
|
|
|
}
|