auto merge of #7265 : brson/rust/io-upstream, r=brson

r? @graydon, @nikomatsakis, @pcwalton, or @catamorphism

Sorry this is so huge, but it's been accumulating for about a month. There's lots of stuff here, mostly oriented toward enabling multithreaded scheduling and improving compatibility between the old and new runtimes. Adds task pinning so that we can create the 'platform thread' in servo.

[Here](e1555f9b56/src/libstd/rt/mod.rs (L201)) is the current runtime setup code.

About half of this has already been reviewed.
This commit is contained in:
bors
2013-07-09 18:28:46 -07:00
52 changed files with 5652 additions and 2095 deletions

View File

@@ -36,14 +36,6 @@ use std::u64;
use std::uint;
use std::vec;
pub mod rustrt {
use std::libc::size_t;
#[abi = "cdecl"]
pub extern {
pub unsafe fn rust_sched_threads() -> size_t;
}
}
// The name of a test. By convention this follows the rules for rust
// paths; i.e. it should be a series of identifiers separated by double
@@ -493,11 +485,10 @@ static SCHED_OVERCOMMIT : uint = 1;
static SCHED_OVERCOMMIT : uint = 4u;
fn get_concurrency() -> uint {
unsafe {
let threads = rustrt::rust_sched_threads() as uint;
use std::rt;
let threads = rt::util::default_sched_threads();
if threads == 1 { 1 }
else { threads * SCHED_OVERCOMMIT }
}
}
#[allow(non_implicitly_copyable_typarams)]

View File

@@ -22,23 +22,6 @@ use vec::{ImmutableVector, OwnedVector};
/// Code for dealing with @-vectors. This is pretty incomplete, and
/// contains a bunch of duplication from the code for ~-vectors.
pub mod rustrt {
use libc;
use vec;
#[cfg(stage0)]
use intrinsic::{TyDesc};
#[cfg(not(stage0))]
use unstable::intrinsics::{TyDesc};
#[abi = "cdecl"]
#[link_name = "rustrt"]
pub extern {
pub unsafe fn vec_reserve_shared_actual(t: *TyDesc,
v: **vec::raw::VecRepr,
n: libc::size_t);
}
}
/// Returns the number of elements the vector can hold without reallocating
#[inline]
pub fn capacity<T>(v: @[T]) -> uint {
@@ -192,18 +175,17 @@ pub mod traits {
pub mod traits {}
pub mod raw {
use at_vec::{capacity, rustrt};
use at_vec::capacity;
use cast;
use cast::{transmute, transmute_copy};
use libc;
use ptr;
use sys;
use uint;
use unstable::intrinsics::{move_val_init};
use unstable::intrinsics;
use unstable::intrinsics::{move_val_init, TyDesc};
use vec;
#[cfg(stage0)]
use intrinsic::{get_tydesc};
#[cfg(not(stage0))]
use unstable::intrinsics::{get_tydesc};
use vec::UnboxedVecRepr;
pub type VecRepr = vec::raw::VecRepr;
pub type SliceRepr = vec::raw::SliceRepr;
@@ -264,9 +246,49 @@ pub mod raw {
pub unsafe fn reserve<T>(v: &mut @[T], n: uint) {
// Only make the (slow) call into the runtime if we have to
if capacity(*v) < n {
let ptr: **VecRepr = transmute(v);
rustrt::vec_reserve_shared_actual(get_tydesc::<T>(),
ptr, n as libc::size_t);
let ptr: *mut *mut VecRepr = transmute(v);
let ty = intrinsics::get_tydesc::<T>();
// XXX transmute shouldn't be necessary
let ty = cast::transmute(ty);
return reserve_raw(ty, ptr, n);
}
}
// Implementation detail. Shouldn't be public
#[allow(missing_doc)]
pub fn reserve_raw(ty: *TyDesc, ptr: *mut *mut VecRepr, n: uint) {
unsafe {
let size_in_bytes = n * (*ty).size;
if size_in_bytes > (**ptr).unboxed.alloc {
let total_size = size_in_bytes + sys::size_of::<UnboxedVecRepr>();
// XXX: UnboxedVecRepr has an extra u8 at the end
let total_size = total_size - sys::size_of::<u8>();
(*ptr) = local_realloc(*ptr as *(), total_size) as *mut VecRepr;
(**ptr).unboxed.alloc = size_in_bytes;
}
}
fn local_realloc(ptr: *(), size: uint) -> *() {
use rt;
use rt::OldTaskContext;
use rt::local::Local;
use rt::task::Task;
if rt::context() == OldTaskContext {
unsafe {
return rust_local_realloc(ptr, size as libc::size_t);
}
extern {
#[fast_ffi]
fn rust_local_realloc(ptr: *(), size: libc::size_t) -> *();
}
} else {
do Local::borrow::<Task, *()> |task| {
task.heap.realloc(ptr as *libc::c_void, size) as *()
}
}
}
}

View File

@@ -10,105 +10,13 @@
#[doc(hidden)];
use libc::{c_char, intptr_t, uintptr_t};
use libc::c_void;
use ptr::{mut_null};
use repr::BoxRepr;
use cast::transmute;
use unstable::intrinsics::TyDesc;
#[cfg(not(test))] use unstable::lang::clear_task_borrow_list;
/**
* Runtime structures
*
* NB: These must match the representation in the C++ runtime.
*/
type TaskID = uintptr_t;
struct StackSegment { priv opaque: () }
struct Scheduler { priv opaque: () }
struct SchedulerLoop { priv opaque: () }
struct Kernel { priv opaque: () }
struct Env { priv opaque: () }
struct AllocHeader { priv opaque: () }
struct MemoryRegion { priv opaque: () }
#[cfg(target_arch="x86")]
struct Registers {
data: [u32, ..16]
}
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Registers {
data: [u32, ..32]
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Context {
regs: Registers,
next: *Context,
pad: [u32, ..3]
}
#[cfg(target_arch="x86_64")]
struct Registers {
data: [u64, ..22]
}
#[cfg(target_arch="x86_64")]
struct Context {
regs: Registers,
next: *Context,
pad: uintptr_t
}
struct BoxedRegion {
env: *Env,
backing_region: *MemoryRegion,
live_allocs: *BoxRepr
}
#[cfg(target_arch="x86")]
#[cfg(target_arch="arm")]
#[cfg(target_arch="mips")]
struct Task {
// Public fields
refcount: intptr_t, // 0
id: TaskID, // 4
pad: [u32, ..2], // 8
ctx: Context, // 16
stack_segment: *StackSegment, // 96
runtime_sp: uintptr_t, // 100
scheduler: *Scheduler, // 104
scheduler_loop: *SchedulerLoop, // 108
// Fields known only to the runtime
kernel: *Kernel, // 112
name: *c_char, // 116
list_index: i32, // 120
boxed_region: BoxedRegion // 128
}
#[cfg(target_arch="x86_64")]
struct Task {
// Public fields
refcount: intptr_t,
id: TaskID,
ctx: Context,
stack_segment: *StackSegment,
runtime_sp: uintptr_t,
scheduler: *Scheduler,
scheduler_loop: *SchedulerLoop,
// Fields known only to the runtime
kernel: *Kernel,
name: *c_char,
list_index: i32,
boxed_region: BoxedRegion
}
type DropGlue<'self> = &'self fn(**TyDesc, *c_void);
/*
* Box annihilation
@@ -127,9 +35,9 @@ unsafe fn each_live_alloc(read_next_before: bool,
//! Walks the internal list of allocations
use managed;
use rt::local_heap;
let task: *Task = transmute(rustrt::rust_get_task());
let box = (*task).boxed_region.live_allocs;
let box = local_heap::live_allocs();
let mut box: *mut BoxRepr = transmute(copy box);
while box != mut_null() {
let next_before = transmute(copy (*box).header.next);
@@ -151,7 +59,13 @@ unsafe fn each_live_alloc(read_next_before: bool,
#[cfg(unix)]
fn debug_mem() -> bool {
::rt::env::get().debug_mem
use rt;
use rt::OldTaskContext;
// XXX: Need to port the environment struct to newsched
match rt::context() {
OldTaskContext => ::rt::env::get().debug_mem,
_ => false
}
}
#[cfg(windows)]
@@ -173,13 +87,12 @@ unsafe fn call_drop_glue(tydesc: *TyDesc, data: *i8) {
}
/// Destroys all managed memory (i.e. @ boxes) held by the current task.
#[cfg(not(test))]
#[lang="annihilate"]
pub unsafe fn annihilate() {
use unstable::lang::local_free;
use rt::local_heap::local_free;
use io::WriterUtil;
use io;
use libc;
use rt::borrowck;
use sys;
use managed;
@@ -191,7 +104,7 @@ pub unsafe fn annihilate() {
// Quick hack: we need to free this list upon task exit, and this
// is a convenient place to do it.
clear_task_borrow_list();
borrowck::clear_task_borrow_list();
// Pass 1: Make all boxes immortal.
//
@@ -213,7 +126,7 @@ pub unsafe fn annihilate() {
// callback, as the original value may have been freed.
for each_live_alloc(false) |box, uniq| {
if !uniq {
let tydesc = (*box).header.type_desc;
let tydesc: *TyDesc = transmute(copy (*box).header.type_desc);
let data = transmute(&(*box).data);
call_drop_glue(tydesc, data);
}

View File

@@ -220,7 +220,7 @@ impl<T: Send> Peekable<T> for PortSet<T> {
/// A channel that can be shared between many senders.
pub struct SharedChan<T> {
ch: Exclusive<pipesy::Chan<T>>
inner: Either<Exclusive<pipesy::Chan<T>>, rtcomm::SharedChan<T>>
}
impl<T: Send> SharedChan<T> {
@@ -228,40 +228,50 @@ impl<T: Send> SharedChan<T> {
pub fn new(c: Chan<T>) -> SharedChan<T> {
let Chan { inner } = c;
let c = match inner {
Left(c) => c,
Right(_) => fail!("SharedChan not implemented")
Left(c) => Left(exclusive(c)),
Right(c) => Right(rtcomm::SharedChan::new(c))
};
SharedChan { ch: exclusive(c) }
SharedChan { inner: c }
}
}
impl<T: Send> GenericChan<T> for SharedChan<T> {
fn send(&self, x: T) {
match self.inner {
Left(ref chan) => {
unsafe {
let mut xx = Some(x);
do self.ch.with_imm |chan| {
do chan.with_imm |chan| {
let x = replace(&mut xx, None);
chan.send(x.unwrap())
}
}
}
Right(ref chan) => chan.send(x)
}
}
}
impl<T: Send> GenericSmartChan<T> for SharedChan<T> {
fn try_send(&self, x: T) -> bool {
match self.inner {
Left(ref chan) => {
unsafe {
let mut xx = Some(x);
do self.ch.with_imm |chan| {
do chan.with_imm |chan| {
let x = replace(&mut xx, None);
chan.try_send(x.unwrap())
}
}
}
Right(ref chan) => chan.try_send(x)
}
}
}
impl<T: Send> ::clone::Clone for SharedChan<T> {
fn clone(&self) -> SharedChan<T> {
SharedChan { ch: self.ch.clone() }
SharedChan { inner: self.inner.clone() }
}
}

View File

@@ -11,14 +11,21 @@
//! Logging
use option::*;
use os;
use either::*;
use rt;
use rt::OldTaskContext;
use rt::logging::{Logger, StdErrLogger};
/// Turns on logging to stdout globally
pub fn console_on() {
if rt::context() == OldTaskContext {
unsafe {
rustrt::rust_log_console_on();
}
} else {
rt::logging::console_on();
}
}
/**
@@ -29,9 +36,18 @@ pub fn console_on() {
* the RUST_LOG environment variable
*/
pub fn console_off() {
// If RUST_LOG is set then the console can't be turned off
if os::getenv("RUST_LOG").is_some() {
return;
}
if rt::context() == OldTaskContext {
unsafe {
rustrt::rust_log_console_off();
}
} else {
rt::logging::console_off();
}
}
#[cfg(not(test))]

View File

@@ -10,18 +10,16 @@
#[macro_escape];
macro_rules! rterrln (
($( $arg:expr),+) => ( {
::rt::util::dumb_println(fmt!( $($arg),+ ));
} )
)
// Some basic logging
macro_rules! rtdebug_ (
($( $arg:expr),+) => ( {
dumb_println(fmt!( $($arg),+ ));
fn dumb_println(s: &str) {
use io::WriterUtil;
let dbg = ::libc::STDERR_FILENO as ::io::fd_t;
dbg.write_str(s);
dbg.write_str("\n");
}
rterrln!( $($arg),+ )
} )
)
@@ -33,21 +31,15 @@ macro_rules! rtdebug (
macro_rules! rtassert (
( $arg:expr ) => ( {
if !$arg {
abort!("assertion failed: %s", stringify!($arg));
rtabort!("assertion failed: %s", stringify!($arg));
}
} )
)
macro_rules! abort(
macro_rules! rtabort(
($( $msg:expr),+) => ( {
rtdebug!($($msg),+);
do_abort();
// NB: This is in a fn to avoid putting the `unsafe` block in a macro,
// which causes spurious 'unnecessary unsafe block' warnings.
fn do_abort() -> ! {
unsafe { ::libc::abort(); }
}
::rt::util::abort(fmt!($($msg),+));
} )
)

View File

@@ -741,6 +741,7 @@ pub fn list_dir(p: &Path) -> ~[~str] {
as_utf16_p
};
use rt::global_heap::malloc_raw;
#[nolink]
extern {
unsafe fn rust_list_dir_wfd_size() -> libc::size_t;
@@ -1134,9 +1135,16 @@ pub fn last_os_error() -> ~str {
* ignored and the process exits with the default failure status
*/
pub fn set_exit_status(code: int) {
use rt;
use rt::OldTaskContext;
if rt::context() == OldTaskContext {
unsafe {
rustrt::rust_set_exit_status(code as libc::intptr_t);
}
} else {
rt::util::set_exit_status(code);
}
}
unsafe fn load_argc_and_argv(argc: c_int, argv: **c_char) -> ~[~str] {
@@ -1165,11 +1173,21 @@ pub fn real_args() -> ~[~str] {
#[cfg(target_os = "android")]
#[cfg(target_os = "freebsd")]
pub fn real_args() -> ~[~str] {
use rt;
use rt::TaskContext;
if rt::context() == TaskContext {
match rt::args::clone() {
Some(args) => args,
None => fail!("process arguments not initialized")
}
} else {
unsafe {
let argc = rustrt::rust_get_argc();
let argv = rustrt::rust_get_argv();
load_argc_and_argv(argc, argv)
}
}
}
#[cfg(windows)]

125
src/libstd/rt/args.rs Normal file
View File

@@ -0,0 +1,125 @@
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Global storage for command line arguments
//!
//! The current incarnation of the Rust runtime expects for
//! the processes `argc` and `argv` arguments to be stored
//! in a globally-accessible location for use by the `os` module.
//!
//! XXX: Would be nice for this to not exist.
//! XXX: This has a lot of C glue for lack of globals.
use libc;
use option::{Option, Some, None};
use str;
use uint;
use unstable::finally::Finally;
use util;
/// One-time global initialization.
pub unsafe fn init(argc: int, argv: **u8) {
let args = load_argc_and_argv(argc, argv);
put(args);
}
/// One-time global cleanup.
pub fn cleanup() {
rtassert!(take().is_some());
}
/// Take the global arguments from global storage.
pub fn take() -> Option<~[~str]> {
with_lock(|| unsafe {
let ptr = get_global_ptr();
let val = util::replace(&mut *ptr, None);
val.map(|s: &~~[~str]| (**s).clone())
})
}
/// Give the global arguments to global storage.
///
/// It is an error if the arguments already exist.
pub fn put(args: ~[~str]) {
with_lock(|| unsafe {
let ptr = get_global_ptr();
rtassert!((*ptr).is_none());
(*ptr) = Some(~args.clone());
})
}
/// Make a clone of the global arguments.
pub fn clone() -> Option<~[~str]> {
with_lock(|| unsafe {
let ptr = get_global_ptr();
(*ptr).map(|s: &~~[~str]| (**s).clone())
})
}
fn with_lock<T>(f: &fn() -> T) -> T {
do (|| {
unsafe {
rust_take_global_args_lock();
f()
}
}).finally {
unsafe {
rust_drop_global_args_lock();
}
}
}
fn get_global_ptr() -> *mut Option<~~[~str]> {
unsafe { rust_get_global_args_ptr() }
}
// Copied from `os`.
unsafe fn load_argc_and_argv(argc: int, argv: **u8) -> ~[~str] {
let mut args = ~[];
for uint::range(0, argc as uint) |i| {
args.push(str::raw::from_c_str(*(argv as **libc::c_char).offset(i)));
}
return args;
}
extern {
fn rust_take_global_args_lock();
fn rust_drop_global_args_lock();
fn rust_get_global_args_ptr() -> *mut Option<~~[~str]>;
}
#[cfg(test)]
mod tests {
use option::{Some, None};
use super::*;
use unstable::finally::Finally;
#[test]
fn smoke_test() {
// Preserve the actual global state.
let saved_value = take();
let expected = ~[~"happy", ~"today?"];
put(expected.clone());
assert!(clone() == Some(expected.clone()));
assert!(take() == Some(expected.clone()));
assert!(take() == None);
do (|| {
}).finally {
// Restore the actual global state.
match saved_value {
Some(ref args) => put(args.clone()),
None => ()
}
}
}
}

283
src/libstd/rt/borrowck.rs Normal file
View File

@@ -0,0 +1,283 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cast::transmute;
use libc::{c_char, c_void, size_t, STDERR_FILENO};
use io;
use io::{Writer, WriterUtil};
use managed::raw::BoxRepr;
use option::{Option, None, Some};
use uint;
use str;
use str::OwnedStr;
use sys;
use vec::ImmutableVector;
#[allow(non_camel_case_types)]
type rust_task = c_void;
pub static FROZEN_BIT: uint = 1 << (uint::bits - 1);
pub static MUT_BIT: uint = 1 << (uint::bits - 2);
static ALL_BITS: uint = FROZEN_BIT | MUT_BIT;
#[deriving(Eq)]
struct BorrowRecord {
box: *mut BoxRepr,
file: *c_char,
line: size_t
}
fn try_take_task_borrow_list() -> Option<~[BorrowRecord]> {
unsafe {
let cur_task: *rust_task = rust_try_get_task();
if cur_task.is_not_null() {
let ptr = rust_take_task_borrow_list(cur_task);
if ptr.is_null() {
None
} else {
let v: ~[BorrowRecord] = transmute(ptr);
Some(v)
}
} else {
None
}
}
}
fn swap_task_borrow_list(f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) {
unsafe {
let cur_task: *rust_task = rust_try_get_task();
if cur_task.is_not_null() {
let mut borrow_list: ~[BorrowRecord] = {
let ptr = rust_take_task_borrow_list(cur_task);
if ptr.is_null() { ~[] } else { transmute(ptr) }
};
borrow_list = f(borrow_list);
rust_set_task_borrow_list(cur_task, transmute(borrow_list));
}
}
}
pub unsafe fn clear_task_borrow_list() {
// pub because it is used by the box annihilator.
let _ = try_take_task_borrow_list();
}
unsafe fn fail_borrowed(box: *mut BoxRepr, file: *c_char, line: size_t) {
debug_borrow("fail_borrowed: ", box, 0, 0, file, line);
match try_take_task_borrow_list() {
None => { // not recording borrows
let msg = "borrowed";
do str::as_buf(msg) |msg_p, _| {
sys::begin_unwind_(msg_p as *c_char, file, line);
}
}
Some(borrow_list) => { // recording borrows
let mut msg = ~"borrowed";
let mut sep = " at ";
for borrow_list.rev_iter().advance |entry| {
if entry.box == box {
msg.push_str(sep);
let filename = str::raw::from_c_str(entry.file);
msg.push_str(filename);
msg.push_str(fmt!(":%u", entry.line as uint));
sep = " and at ";
}
}
do str::as_buf(msg) |msg_p, _| {
sys::begin_unwind_(msg_p as *c_char, file, line)
}
}
}
}
/// Because this code is so perf. sensitive, use a static constant so that
/// debug printouts are compiled out most of the time.
static ENABLE_DEBUG: bool = false;
#[inline]
unsafe fn debug_borrow<T>(tag: &'static str,
p: *const T,
old_bits: uint,
new_bits: uint,
filename: *c_char,
line: size_t) {
//! A useful debugging function that prints a pointer + tag + newline
//! without allocating memory.
if ENABLE_DEBUG && ::rt::env::get().debug_borrow {
debug_borrow_slow(tag, p, old_bits, new_bits, filename, line);
}
unsafe fn debug_borrow_slow<T>(tag: &'static str,
p: *const T,
old_bits: uint,
new_bits: uint,
filename: *c_char,
line: size_t) {
let dbg = STDERR_FILENO as io::fd_t;
dbg.write_str(tag);
dbg.write_hex(p as uint);
dbg.write_str(" ");
dbg.write_hex(old_bits);
dbg.write_str(" ");
dbg.write_hex(new_bits);
dbg.write_str(" ");
dbg.write_cstr(filename);
dbg.write_str(":");
dbg.write_hex(line as uint);
dbg.write_str("\n");
}
}
trait DebugPrints {
fn write_hex(&self, val: uint);
unsafe fn write_cstr(&self, str: *c_char);
}
impl DebugPrints for io::fd_t {
fn write_hex(&self, mut i: uint) {
let letters = ['0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', 'a', 'b', 'c', 'd', 'e', 'f'];
static UINT_NIBBLES: uint = ::uint::bytes << 1;
let mut buffer = [0_u8, ..UINT_NIBBLES+1];
let mut c = UINT_NIBBLES;
while c > 0 {
c -= 1;
buffer[c] = letters[i & 0xF] as u8;
i >>= 4;
}
self.write(buffer.slice(0, UINT_NIBBLES));
}
unsafe fn write_cstr(&self, p: *c_char) {
use libc::strlen;
use vec;
let len = strlen(p);
let p: *u8 = transmute(p);
do vec::raw::buf_as_slice(p, len as uint) |s| {
self.write(s);
}
}
}
#[inline]
pub unsafe fn borrow_as_imm(a: *u8, file: *c_char, line: size_t) -> uint {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count = old_ref_count | FROZEN_BIT;
debug_borrow("borrow_as_imm:", a, old_ref_count, new_ref_count, file, line);
if (old_ref_count & MUT_BIT) != 0 {
fail_borrowed(a, file, line);
}
(*a).header.ref_count = new_ref_count;
old_ref_count
}
#[inline]
pub unsafe fn borrow_as_mut(a: *u8, file: *c_char, line: size_t) -> uint {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count = old_ref_count | MUT_BIT | FROZEN_BIT;
debug_borrow("borrow_as_mut:", a, old_ref_count, new_ref_count, file, line);
if (old_ref_count & (MUT_BIT|FROZEN_BIT)) != 0 {
fail_borrowed(a, file, line);
}
(*a).header.ref_count = new_ref_count;
old_ref_count
}
pub unsafe fn record_borrow(a: *u8, old_ref_count: uint,
file: *c_char, line: size_t) {
if (old_ref_count & ALL_BITS) == 0 {
// was not borrowed before
let a: *mut BoxRepr = transmute(a);
debug_borrow("record_borrow:", a, old_ref_count, 0, file, line);
do swap_task_borrow_list |borrow_list| {
let mut borrow_list = borrow_list;
borrow_list.push(BorrowRecord {box: a, file: file, line: line});
borrow_list
}
}
}
pub unsafe fn unrecord_borrow(a: *u8, old_ref_count: uint,
file: *c_char, line: size_t) {
if (old_ref_count & ALL_BITS) == 0 {
// was not borrowed before, so we should find the record at
// the end of the list
let a: *mut BoxRepr = transmute(a);
debug_borrow("unrecord_borrow:", a, old_ref_count, 0, file, line);
do swap_task_borrow_list |borrow_list| {
let mut borrow_list = borrow_list;
assert!(!borrow_list.is_empty());
let br = borrow_list.pop();
if br.box != a || br.file != file || br.line != line {
let err = fmt!("wrong borrow found, br=%?", br);
do str::as_buf(err) |msg_p, _| {
sys::begin_unwind_(msg_p as *c_char, file, line)
}
}
borrow_list
}
}
}
#[inline]
pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint,
file: *c_char, line: size_t) {
// Sometimes the box is null, if it is conditionally frozen.
// See e.g. #4904.
if !a.is_null() {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count =
(old_ref_count & !ALL_BITS) | (orig_ref_count & ALL_BITS);
debug_borrow("return_to_mut:",
a, old_ref_count, new_ref_count, file, line);
(*a).header.ref_count = new_ref_count;
}
}
#[inline]
pub unsafe fn check_not_borrowed(a: *u8,
file: *c_char,
line: size_t) {
let a: *mut BoxRepr = transmute(a);
let ref_count = (*a).header.ref_count;
debug_borrow("check_not_borrowed:", a, ref_count, 0, file, line);
if (ref_count & FROZEN_BIT) != 0 {
fail_borrowed(a, file, line);
}
}
extern {
#[rust_stack]
pub fn rust_take_task_borrow_list(task: *rust_task) -> *c_void;
#[rust_stack]
pub fn rust_set_task_borrow_list(task: *rust_task, map: *c_void);
#[rust_stack]
pub fn rust_try_get_task() -> *rust_task;
}

View File

@@ -19,13 +19,16 @@ use option::*;
use cast;
use util;
use ops::Drop;
use rt::task::Task;
use kinds::Send;
use rt::sched::{Scheduler, Coroutine};
use rt::sched::Scheduler;
use rt::local::Local;
use unstable::intrinsics::{atomic_xchg, atomic_load};
use unstable::atomics::{AtomicUint, AtomicOption, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use util::Void;
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
use cell::Cell;
use clone::Clone;
/// A combined refcount / ~Task pointer.
///
@@ -34,14 +37,14 @@ use cell::Cell;
/// * 2 - both endpoints are alive
/// * 1 - either the sender or the receiver is dead, determined by context
/// * <ptr> - A pointer to a blocked Task that can be transmuted to ~Task
type State = int;
type State = uint;
static STATE_BOTH: State = 2;
static STATE_ONE: State = 1;
/// The heap-allocated structure shared between two endpoints.
struct Packet<T> {
state: State,
state: AtomicUint,
payload: Option<T>,
}
@@ -70,7 +73,7 @@ pub struct PortOneHack<T> {
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
let packet: ~Packet<T> = ~Packet {
state: STATE_BOTH,
state: AtomicUint::new(STATE_BOTH),
payload: None
};
@@ -114,20 +117,30 @@ impl<T> ChanOne<T> {
// reordering of the payload write. This also issues an
// acquire barrier that keeps the subsequent access of the
// ~Task pointer from being reordered.
let oldstate = atomic_xchg(&mut (*packet).state, STATE_ONE);
let oldstate = (*packet).state.swap(STATE_ONE, SeqCst);
match oldstate {
STATE_BOTH => {
// Port is not waiting yet. Nothing to do
do Local::borrow::<Scheduler, ()> |sched| {
rtdebug!("non-rendezvous send");
sched.metrics.non_rendezvous_sends += 1;
}
}
STATE_ONE => {
do Local::borrow::<Scheduler, ()> |sched| {
rtdebug!("rendezvous send");
sched.metrics.rendezvous_sends += 1;
}
// Port has closed. Need to clean up.
let _packet: ~Packet<T> = cast::transmute(this.inner.void_packet);
recvr_active = false;
}
task_as_state => {
// Port is blocked. Wake it up.
let recvr: ~Coroutine = cast::transmute(task_as_state);
let sched = Local::take::<Scheduler>();
let recvr: ~Task = cast::transmute(task_as_state);
let mut sched = Local::take::<Scheduler>();
rtdebug!("rendezvous send");
sched.metrics.rendezvous_sends += 1;
sched.schedule_task(recvr);
}
}
@@ -158,23 +171,30 @@ impl<T> PortOne<T> {
// Switch to the scheduler to put the ~Task into the Packet state.
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |task| {
do sched.deschedule_running_task_and_then |sched, task| {
unsafe {
// Atomically swap the task pointer into the Packet state, issuing
// an acquire barrier to prevent reordering of the subsequent read
// of the payload. Also issues a release barrier to prevent reordering
// of any previous writes to the task structure.
let task_as_state: State = cast::transmute(task);
let oldstate = atomic_xchg(&mut (*packet).state, task_as_state);
let oldstate = (*packet).state.swap(task_as_state, SeqCst);
match oldstate {
STATE_BOTH => {
// Data has not been sent. Now we're blocked.
rtdebug!("non-rendezvous recv");
sched.metrics.non_rendezvous_recvs += 1;
}
STATE_ONE => {
rtdebug!("rendezvous recv");
sched.metrics.rendezvous_recvs += 1;
// Channel is closed. Switch back and check the data.
let task: ~Coroutine = cast::transmute(task_as_state);
let sched = Local::take::<Scheduler>();
sched.resume_task_immediately(task);
// NB: We have to drop back into the scheduler event loop here
// instead of switching immediately back or we could end up
// triggering infinite recursion on the scheduler's stack.
let task: ~Task = cast::transmute(task_as_state);
sched.enqueue_task(task);
}
_ => util::unreachable()
}
@@ -210,7 +230,7 @@ impl<T> Peekable<T> for PortOne<T> {
fn peek(&self) -> bool {
unsafe {
let packet: *mut Packet<T> = self.inner.packet();
let oldstate = atomic_load(&mut (*packet).state);
let oldstate = (*packet).state.load(SeqCst);
match oldstate {
STATE_BOTH => false,
STATE_ONE => (*packet).payload.is_some(),
@@ -227,7 +247,7 @@ impl<T> Drop for ChanOneHack<T> {
unsafe {
let this = cast::transmute_mut(self);
let oldstate = atomic_xchg(&mut (*this.packet()).state, STATE_ONE);
let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst);
match oldstate {
STATE_BOTH => {
// Port still active. It will destroy the Packet.
@@ -238,7 +258,7 @@ impl<T> Drop for ChanOneHack<T> {
task_as_state => {
// The port is blocked waiting for a message we will never send. Wake it.
assert!((*this.packet()).payload.is_none());
let recvr: ~Coroutine = cast::transmute(task_as_state);
let recvr: ~Task = cast::transmute(task_as_state);
let sched = Local::take::<Scheduler>();
sched.schedule_task(recvr);
}
@@ -254,7 +274,7 @@ impl<T> Drop for PortOneHack<T> {
unsafe {
let this = cast::transmute_mut(self);
let oldstate = atomic_xchg(&mut (*this.packet()).state, STATE_ONE);
let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst);
match oldstate {
STATE_BOTH => {
// Chan still active. It will destroy the packet.
@@ -295,16 +315,19 @@ struct StreamPayload<T> {
next: PortOne<StreamPayload<T>>
}
type StreamChanOne<T> = ChanOne<StreamPayload<T>>;
type StreamPortOne<T> = PortOne<StreamPayload<T>>;
/// A channel with unbounded size.
pub struct Chan<T> {
// FIXME #5372. Using Cell because we don't take &mut self
next: Cell<ChanOne<StreamPayload<T>>>
next: Cell<StreamChanOne<T>>
}
/// An port with unbounded size.
pub struct Port<T> {
// FIXME #5372. Using Cell because we don't take &mut self
next: Cell<PortOne<StreamPayload<T>>>
next: Cell<StreamPortOne<T>>
}
pub fn stream<T: Send>() -> (Port<T>, Chan<T>) {
@@ -357,6 +380,136 @@ impl<T> Peekable<T> for Port<T> {
}
}
pub struct SharedChan<T> {
// Just like Chan, but a shared AtomicOption instead of Cell
priv next: UnsafeAtomicRcBox<AtomicOption<StreamChanOne<T>>>
}
impl<T> SharedChan<T> {
pub fn new(chan: Chan<T>) -> SharedChan<T> {
let next = chan.next.take();
let next = AtomicOption::new(~next);
SharedChan { next: UnsafeAtomicRcBox::new(next) }
}
}
impl<T: Send> GenericChan<T> for SharedChan<T> {
fn send(&self, val: T) {
self.try_send(val);
}
}
impl<T: Send> GenericSmartChan<T> for SharedChan<T> {
fn try_send(&self, val: T) -> bool {
unsafe {
let (next_pone, next_cone) = oneshot();
let cone = (*self.next.get()).swap(~next_cone, SeqCst);
cone.unwrap().try_send(StreamPayload { val: val, next: next_pone })
}
}
}
impl<T> Clone for SharedChan<T> {
fn clone(&self) -> SharedChan<T> {
SharedChan {
next: self.next.clone()
}
}
}
pub struct SharedPort<T> {
// The next port on which we will receive the next port on which we will receive T
priv next_link: UnsafeAtomicRcBox<AtomicOption<PortOne<StreamPortOne<T>>>>
}
impl<T> SharedPort<T> {
pub fn new(port: Port<T>) -> SharedPort<T> {
// Put the data port into a new link pipe
let next_data_port = port.next.take();
let (next_link_port, next_link_chan) = oneshot();
next_link_chan.send(next_data_port);
let next_link = AtomicOption::new(~next_link_port);
SharedPort { next_link: UnsafeAtomicRcBox::new(next_link) }
}
}
impl<T: Send> GenericPort<T> for SharedPort<T> {
fn recv(&self) -> T {
match self.try_recv() {
Some(val) => val,
None => {
fail!("receiving on a closed channel");
}
}
}
fn try_recv(&self) -> Option<T> {
unsafe {
let (next_link_port, next_link_chan) = oneshot();
let link_port = (*self.next_link.get()).swap(~next_link_port, SeqCst);
let link_port = link_port.unwrap();
let data_port = link_port.recv();
let (next_data_port, res) = match data_port.try_recv() {
Some(StreamPayload { val, next }) => {
(next, Some(val))
}
None => {
let (next_data_port, _) = oneshot();
(next_data_port, None)
}
};
next_link_chan.send(next_data_port);
return res;
}
}
}
impl<T> Clone for SharedPort<T> {
fn clone(&self) -> SharedPort<T> {
SharedPort {
next_link: self.next_link.clone()
}
}
}
// XXX: Need better name
type MegaPipe<T> = (SharedPort<T>, SharedChan<T>);
pub fn megapipe<T: Send>() -> MegaPipe<T> {
let (port, chan) = stream();
(SharedPort::new(port), SharedChan::new(chan))
}
impl<T: Send> GenericChan<T> for MegaPipe<T> {
fn send(&self, val: T) {
match *self {
(_, ref c) => c.send(val)
}
}
}
impl<T: Send> GenericSmartChan<T> for MegaPipe<T> {
fn try_send(&self, val: T) -> bool {
match *self {
(_, ref c) => c.try_send(val)
}
}
}
impl<T: Send> GenericPort<T> for MegaPipe<T> {
fn recv(&self) -> T {
match *self {
(ref p, _) => p.recv()
}
}
fn try_recv(&self) -> Option<T> {
match *self {
(ref p, _) => p.try_recv()
}
}
}
#[cfg(test)]
mod test {
use super::*;
@@ -402,6 +555,8 @@ mod test {
{ let _c = chan; }
port.recv();
};
// What is our res?
rtdebug!("res is: %?", res.is_err());
assert!(res.is_err());
}
}
@@ -584,7 +739,7 @@ mod test {
#[test]
fn stream_send_recv_stress() {
for stress_factor().times {
do run_in_newsched_task {
do run_in_mt_newsched_task {
let (port, chan) = stream::<~int>();
send(chan, 0);
@@ -594,18 +749,18 @@ mod test {
if i == 10 { return }
let chan_cell = Cell::new(chan);
let _thread = do spawntask_thread {
do spawntask_random {
let chan = chan_cell.take();
chan.send(~i);
send(chan, i + 1);
};
}
}
fn recv(port: Port<~int>, i: int) {
if i == 10 { return }
let port_cell = Cell::new(port);
let _thread = do spawntask_thread {
do spawntask_random {
let port = port_cell.take();
assert!(port.recv() == ~i);
recv(port, i + 1);
@@ -614,4 +769,144 @@ mod test {
}
}
}
#[test]
fn recv_a_lot() {
// Regression test that we don't run out of stack in scheduler context
do run_in_newsched_task {
let (port, chan) = stream();
for 10000.times { chan.send(()) }
for 10000.times { port.recv() }
}
}
#[test]
fn shared_chan_stress() {
do run_in_mt_newsched_task {
let (port, chan) = stream();
let chan = SharedChan::new(chan);
let total = stress_factor() + 100;
for total.times {
let chan_clone = chan.clone();
do spawntask_random {
chan_clone.send(());
}
}
for total.times {
port.recv();
}
}
}
#[test]
fn shared_port_stress() {
do run_in_mt_newsched_task {
// XXX: Removing these type annotations causes an ICE
let (end_port, end_chan) = stream::<()>();
let (port, chan) = stream::<()>();
let end_chan = SharedChan::new(end_chan);
let port = SharedPort::new(port);
let total = stress_factor() + 100;
for total.times {
let end_chan_clone = end_chan.clone();
let port_clone = port.clone();
do spawntask_random {
port_clone.recv();
end_chan_clone.send(());
}
}
for total.times {
chan.send(());
}
for total.times {
end_port.recv();
}
}
}
#[test]
fn shared_port_close_simple() {
do run_in_mt_newsched_task {
let (port, chan) = stream::<()>();
let port = SharedPort::new(port);
{ let _chan = chan; }
assert!(port.try_recv().is_none());
}
}
#[test]
fn shared_port_close() {
do run_in_mt_newsched_task {
let (end_port, end_chan) = stream::<bool>();
let (port, chan) = stream::<()>();
let end_chan = SharedChan::new(end_chan);
let port = SharedPort::new(port);
let chan = SharedChan::new(chan);
let send_total = 10;
let recv_total = 20;
do spawntask_random {
for send_total.times {
let chan_clone = chan.clone();
do spawntask_random {
chan_clone.send(());
}
}
}
let end_chan_clone = end_chan.clone();
do spawntask_random {
for recv_total.times {
let port_clone = port.clone();
let end_chan_clone = end_chan_clone.clone();
do spawntask_random {
let recvd = port_clone.try_recv().is_some();
end_chan_clone.send(recvd);
}
}
}
let mut recvd = 0;
for recv_total.times {
recvd += if end_port.recv() { 1 } else { 0 };
}
assert!(recvd == send_total);
}
}
#[test]
fn megapipe_stress() {
use rand;
use rand::RngUtil;
do run_in_mt_newsched_task {
let (end_port, end_chan) = stream::<()>();
let end_chan = SharedChan::new(end_chan);
let pipe = megapipe();
let total = stress_factor() + 10;
let mut rng = rand::rng();
for total.times {
let msgs = rng.gen_uint_range(0, 10);
let pipe_clone = pipe.clone();
let end_chan_clone = end_chan.clone();
do spawntask_random {
for msgs.times {
pipe_clone.send(());
}
for msgs.times {
pipe_clone.recv();
}
}
end_chan_clone.send(());
}
for total.times {
end_port.recv();
}
}
}
}

View File

@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{c_char, c_void, size_t, uintptr_t, free, malloc, realloc};
use libc::{c_void, c_char, size_t, uintptr_t, free, malloc, realloc};
use managed::raw::{BoxHeaderRepr, BoxRepr};
use unstable::intrinsics::TyDesc;
use sys::size_of;
@@ -95,6 +95,11 @@ pub unsafe fn vector_exchange_malloc(align: u32, size: uintptr_t) -> *c_char {
// FIXME: #7496
#[cfg(not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
pub unsafe fn closure_exchange_malloc_(td: *c_char, size: uintptr_t) -> *c_char {
closure_exchange_malloc(td, size)
}
#[inline]
pub unsafe fn closure_exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char {
let td = td as *TyDesc;
@@ -115,6 +120,11 @@ pub unsafe fn closure_exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char {
// inside a landing pad may corrupt the state of the exception handler.
#[cfg(not(test))]
#[lang="exchange_free"]
#[inline]
pub unsafe fn exchange_free_(ptr: *c_char) {
exchange_free(ptr)
}
#[inline]
pub unsafe fn exchange_free(ptr: *c_char) {
free(ptr as *c_void);

View File

@@ -8,7 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type Port = u16;
#[deriving(Eq, TotalEq)]
pub enum IpAddr {
Ipv4(u8, u8, u8, u8, u16),
Ipv6
Ipv4(u8, u8, u8, u8, Port),
Ipv6(u16, u16, u16, u16, u16, u16, u16, u16, Port)
}

View File

@@ -18,15 +18,11 @@ use rt::rtio::{IoFactory, IoFactoryObject,
RtioTcpStream, RtioTcpStreamObject};
use rt::local::Local;
pub struct TcpStream {
rtstream: ~RtioTcpStreamObject
}
pub struct TcpStream(~RtioTcpStreamObject);
impl TcpStream {
fn new(s: ~RtioTcpStreamObject) -> TcpStream {
TcpStream {
rtstream: s
}
TcpStream(s)
}
pub fn connect(addr: IpAddr) -> Option<TcpStream> {
@@ -38,13 +34,11 @@ impl TcpStream {
};
match stream {
Ok(s) => {
Some(TcpStream::new(s))
}
Ok(s) => Some(TcpStream::new(s)),
Err(ioerr) => {
rtdebug!("failed to connect: %?", ioerr);
io_error::cond.raise(ioerr);
return None;
None
}
}
}
@@ -52,8 +46,7 @@ impl TcpStream {
impl Reader for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> Option<uint> {
let bytes_read = self.rtstream.read(buf);
match bytes_read {
match (**self).read(buf) {
Ok(read) => Some(read),
Err(ioerr) => {
// EOF is indicated by returning None
@@ -70,8 +63,7 @@ impl Reader for TcpStream {
impl Writer for TcpStream {
fn write(&mut self, buf: &[u8]) {
let res = self.rtstream.write(buf);
match res {
match (**self).write(buf) {
Ok(_) => (),
Err(ioerr) => {
io_error::cond.raise(ioerr);
@@ -82,9 +74,7 @@ impl Writer for TcpStream {
fn flush(&mut self) { fail!() }
}
pub struct TcpListener {
rtlistener: ~RtioTcpListenerObject,
}
pub struct TcpListener(~RtioTcpListenerObject);
impl TcpListener {
pub fn bind(addr: IpAddr) -> Option<TcpListener> {
@@ -93,11 +83,7 @@ impl TcpListener {
(*io).tcp_bind(addr)
};
match listener {
Ok(l) => {
Some(TcpListener {
rtlistener: l
})
}
Ok(l) => Some(TcpListener(l)),
Err(ioerr) => {
io_error::cond.raise(ioerr);
return None;
@@ -108,8 +94,7 @@ impl TcpListener {
impl Listener<TcpStream> for TcpListener {
fn accept(&mut self) -> Option<TcpStream> {
let rtstream = self.rtlistener.accept();
match rtstream {
match (**self).accept() {
Ok(s) => {
Some(TcpStream::new(s))
}
@@ -163,7 +148,7 @@ mod test {
}
#[test]
fn smoke_test() {
fn smoke_test_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
@@ -183,7 +168,27 @@ mod test {
}
#[test]
fn read_eof() {
fn smoke_test_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
let mut stream = listener.accept();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
}
do spawntask_immediately {
let mut stream = TcpStream::connect(addr);
stream.write([99]);
}
}
}
#[test]
fn read_eof_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
@@ -203,7 +208,27 @@ mod test {
}
#[test]
fn read_eof_twice() {
fn read_eof_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
let mut stream = listener.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
}
do spawntask_immediately {
let _stream = TcpStream::connect(addr);
// Close
}
}
}
#[test]
fn read_eof_twice_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
@@ -225,7 +250,29 @@ mod test {
}
#[test]
fn write_close() {
fn read_eof_twice_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
let mut stream = listener.accept();
let mut buf = [0];
let nread = stream.read(buf);
assert!(nread.is_none());
let nread = stream.read(buf);
assert!(nread.is_none());
}
do spawntask_immediately {
let _stream = TcpStream::connect(addr);
// Close
}
}
}
#[test]
fn write_close_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
@@ -254,7 +301,36 @@ mod test {
}
#[test]
fn multiple_connect_serial() {
fn write_close_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
let mut stream = listener.accept();
let buf = [0];
loop {
let mut stop = false;
do io_error::cond.trap(|e| {
// NB: ECONNRESET on linux, EPIPE on mac
assert!(e.kind == ConnectionReset || e.kind == BrokenPipe);
stop = true;
}).in {
stream.write(buf);
}
if stop { break }
}
}
do spawntask_immediately {
let _stream = TcpStream::connect(addr);
// Close
}
}
}
#[test]
fn multiple_connect_serial_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
let max = 10;
@@ -279,7 +355,32 @@ mod test {
}
#[test]
fn multiple_connect_interleaved_greedy_schedule() {
fn multiple_connect_serial_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
let max = 10;
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
for max.times {
let mut stream = listener.accept();
let mut buf = [0];
stream.read(buf);
assert_eq!(buf[0], 99);
}
}
do spawntask_immediately {
for max.times {
let mut stream = TcpStream::connect(addr);
stream.write([99]);
}
}
}
}
#[test]
fn multiple_connect_interleaved_greedy_schedule_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
static MAX: int = 10;
@@ -318,7 +419,46 @@ mod test {
}
#[test]
fn multiple_connect_interleaved_lazy_schedule() {
fn multiple_connect_interleaved_greedy_schedule_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
static MAX: int = 10;
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
for int::range(0, MAX) |i| {
let stream = Cell::new(listener.accept());
rtdebug!("accepted");
// Start another task to handle the connection
do spawntask_immediately {
let mut stream = stream.take();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == i as u8);
rtdebug!("read");
}
}
}
connect(0, addr);
fn connect(i: int, addr: IpAddr) {
if i == MAX { return }
do spawntask_immediately {
rtdebug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
rtdebug!("writing");
stream.write([i as u8]);
}
}
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip4() {
do run_in_newsched_task {
let addr = next_test_ip4();
static MAX: int = 10;
@@ -355,5 +495,43 @@ mod test {
}
}
}
#[test]
fn multiple_connect_interleaved_lazy_schedule_ip6() {
do run_in_newsched_task {
let addr = next_test_ip6();
static MAX: int = 10;
do spawntask_immediately {
let mut listener = TcpListener::bind(addr);
for int::range(0, MAX) |_| {
let stream = Cell::new(listener.accept());
rtdebug!("accepted");
// Start another task to handle the connection
do spawntask_later {
let mut stream = stream.take();
let mut buf = [0];
stream.read(buf);
assert!(buf[0] == 99);
rtdebug!("read");
}
}
}
connect(0, addr);
fn connect(i: int, addr: IpAddr) {
if i == MAX { return }
do spawntask_later {
rtdebug!("connecting");
let mut stream = TcpStream::connect(addr);
// Connect again before writing
connect(i + 1, addr);
rtdebug!("writing");
stream.write([99]);
}
}
}
}
}

View File

@@ -8,38 +8,247 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::*;
use super::super::*;
use super::ip::IpAddr;
use option::{Option, Some, None};
use result::{Ok, Err};
use rt::io::net::ip::IpAddr;
use rt::io::{Reader, Writer};
use rt::io::{io_error, read_error, EndOfFile};
use rt::rtio::{RtioUdpSocketObject, RtioUdpSocket, IoFactory, IoFactoryObject};
use rt::local::Local;
pub struct UdpStream;
pub struct UdpSocket(~RtioUdpSocketObject);
impl UdpStream {
pub fn connect(_addr: IpAddr) -> Option<UdpStream> {
fail!()
impl UdpSocket {
pub fn bind(addr: IpAddr) -> Option<UdpSocket> {
let socket = unsafe { (*Local::unsafe_borrow::<IoFactoryObject>()).udp_bind(addr) };
match socket {
Ok(s) => Some(UdpSocket(s)),
Err(ioerr) => {
io_error::cond.raise(ioerr);
None
}
}
}
pub fn recvfrom(&self, buf: &mut [u8]) -> Option<(uint, IpAddr)> {
match (**self).recvfrom(buf) {
Ok((nread, src)) => Some((nread, src)),
Err(ioerr) => {
// EOF is indicated by returning None
if ioerr.kind != EndOfFile {
read_error::cond.raise(ioerr);
}
None
}
}
}
pub fn sendto(&self, buf: &[u8], dst: IpAddr) {
match (**self).sendto(buf, dst) {
Ok(_) => (),
Err(ioerr) => io_error::cond.raise(ioerr),
}
}
pub fn connect(self, other: IpAddr) -> UdpStream {
UdpStream { socket: self, connectedTo: other }
}
}
pub struct UdpStream {
socket: UdpSocket,
connectedTo: IpAddr
}
impl UdpStream {
pub fn as_socket<T>(&self, f: &fn(&UdpSocket) -> T) -> T { f(&self.socket) }
pub fn disconnect(self) -> UdpSocket { self.socket }
}
impl Reader for UdpStream {
fn read(&mut self, _buf: &mut [u8]) -> Option<uint> { fail!() }
fn read(&mut self, buf: &mut [u8]) -> Option<uint> {
do self.as_socket |sock| {
match sock.recvfrom(buf) {
Some((_nread, src)) if src != self.connectedTo => Some(0),
Some((nread, _src)) => Some(nread),
None => None,
}
}
}
fn eof(&mut self) -> bool { fail!() }
}
impl Writer for UdpStream {
fn write(&mut self, _buf: &[u8]) { fail!() }
fn write(&mut self, buf: &[u8]) {
do self.as_socket |sock| {
sock.sendto(buf, self.connectedTo);
}
}
fn flush(&mut self) { fail!() }
}
pub struct UdpListener;
#[cfg(test)]
mod test {
use super::*;
use rt::test::*;
use rt::io::net::ip::Ipv4;
use rt::io::*;
use option::{Some, None};
impl UdpListener {
pub fn bind(_addr: IpAddr) -> Option<UdpListener> {
fail!()
#[test] #[ignore]
fn bind_error() {
do run_in_newsched_task {
let mut called = false;
do io_error::cond.trap(|e| {
assert!(e.kind == PermissionDenied);
called = true;
}).in {
let addr = Ipv4(0, 0, 0, 0, 1);
let socket = UdpSocket::bind(addr);
assert!(socket.is_none());
}
assert!(called);
}
}
#[test]
fn socket_smoke_test_ip4() {
do run_in_newsched_task {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
do spawntask_immediately {
match UdpSocket::bind(server_ip) {
Some(server) => {
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
do spawntask_immediately {
match UdpSocket::bind(client_ip) {
Some(client) => client.sendto([99], server_ip),
None => fail!()
}
}
}
}
#[test]
fn socket_smoke_test_ip6() {
do run_in_newsched_task {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
do spawntask_immediately {
match UdpSocket::bind(server_ip) {
Some(server) => {
let mut buf = [0];
match server.recvfrom(buf) {
Some((nread, src)) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
assert_eq!(src, client_ip);
}
None => fail!()
}
}
None => fail!()
}
}
do spawntask_immediately {
match UdpSocket::bind(client_ip) {
Some(client) => client.sendto([99], server_ip),
None => fail!()
}
}
}
}
#[test]
fn stream_smoke_test_ip4() {
do run_in_newsched_task {
let server_ip = next_test_ip4();
let client_ip = next_test_ip4();
do spawntask_immediately {
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
}
None => fail!()
}
}
do spawntask_immediately {
match UdpSocket::bind(client_ip) {
Some(client) => {
let client = ~client;
let mut stream = client.connect(server_ip);
stream.write([99]);
}
None => fail!()
}
}
}
}
#[test]
fn stream_smoke_test_ip6() {
do run_in_newsched_task {
let server_ip = next_test_ip6();
let client_ip = next_test_ip6();
do spawntask_immediately {
match UdpSocket::bind(server_ip) {
Some(server) => {
let server = ~server;
let mut stream = server.connect(client_ip);
let mut buf = [0];
match stream.read(buf) {
Some(nread) => {
assert_eq!(nread, 1);
assert_eq!(buf[0], 99);
}
None => fail!()
}
}
None => fail!()
}
}
do spawntask_immediately {
match UdpSocket::bind(client_ip) {
Some(client) => {
let client = ~client;
let mut stream = client.connect(server_ip);
stream.write([99]);
}
None => fail!()
}
}
}
}
}
impl Listener<UdpStream> for UdpListener {
fn accept(&mut self) -> Option<UdpStream> { fail!() }
}

645
src/libstd/rt/join_latch.rs Normal file
View File

@@ -0,0 +1,645 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The JoinLatch is a concurrent type that establishes the task
//! tree and propagates failure.
//!
//! Each task gets a JoinLatch that is derived from the JoinLatch
//! of its parent task. Every latch must be released by either calling
//! the non-blocking `release` method or the task-blocking `wait` method.
//! Releasing a latch does not complete until all of its child latches
//! complete.
//!
//! Latches carry a `success` flag that is set to `false` during task
//! failure and is propagated both from children to parents and parents
//! to children. The status af this flag may be queried for the purposes
//! of linked failure.
//!
//! In addition to failure propagation the task tree serves to keep the
//! default task schedulers alive. The runtime only sends the shutdown
//! message to schedulers once the root task exits.
//!
//! Under this scheme tasks that terminate before their children become
//! 'zombies' since they may not exit until their children do. Zombie
//! tasks are 'tombstoned' as `Tombstone(~JoinLatch)` and the tasks
//! themselves allowed to terminate.
//!
//! XXX: Propagate flag from parents to children.
//! XXX: Tombstoning actually doesn't work.
//! XXX: This could probably be done in a way that doesn't leak tombstones
//! longer than the life of the child tasks.
use comm::{GenericPort, Peekable, GenericSmartChan};
use clone::Clone;
use container::Container;
use option::{Option, Some, None};
use ops::Drop;
use rt::comm::{SharedChan, Port, stream};
use rt::local::Local;
use rt::sched::Scheduler;
use unstable::atomics::{AtomicUint, SeqCst};
use util;
use vec::OwnedVector;
// FIXME #7026: Would prefer this to be an enum
pub struct JoinLatch {
priv parent: Option<ParentLink>,
priv child: Option<ChildLink>,
closed: bool,
}
// Shared between parents and all their children.
struct SharedState {
/// Reference count, held by a parent and all children.
count: AtomicUint,
success: bool
}
struct ParentLink {
shared: *mut SharedState,
// For communicating with the parent.
chan: SharedChan<Message>
}
struct ChildLink {
shared: ~SharedState,
// For receiving from children.
port: Port<Message>,
chan: SharedChan<Message>,
// Prevents dropping the child SharedState reference counts multiple times.
dropped_child: bool
}
// Messages from child latches to parent.
enum Message {
Tombstone(~JoinLatch),
ChildrenTerminated
}
impl JoinLatch {
pub fn new_root() -> ~JoinLatch {
let this = ~JoinLatch {
parent: None,
child: None,
closed: false
};
rtdebug!("new root latch %x", this.id());
return this;
}
fn id(&self) -> uint {
unsafe { ::cast::transmute(&*self) }
}
pub fn new_child(&mut self) -> ~JoinLatch {
rtassert!(!self.closed);
if self.child.is_none() {
// This is the first time spawning a child
let shared = ~SharedState {
count: AtomicUint::new(1),
success: true
};
let (port, chan) = stream();
let chan = SharedChan::new(chan);
let child = ChildLink {
shared: shared,
port: port,
chan: chan,
dropped_child: false
};
self.child = Some(child);
}
let child_link: &mut ChildLink = self.child.get_mut_ref();
let shared_state: *mut SharedState = &mut *child_link.shared;
child_link.shared.count.fetch_add(1, SeqCst);
let child = ~JoinLatch {
parent: Some(ParentLink {
shared: shared_state,
chan: child_link.chan.clone()
}),
child: None,
closed: false
};
rtdebug!("NEW child latch %x", child.id());
return child;
}
pub fn release(~self, local_success: bool) {
// XXX: This should not block, but there's a bug in the below
// code that I can't figure out.
self.wait(local_success);
}
// XXX: Should not require ~self
fn release_broken(~self, local_success: bool) {
rtassert!(!self.closed);
rtdebug!("releasing %x", self.id());
let id = self.id();
let _ = id; // XXX: `id` is only used in debug statements so appears unused
let mut this = self;
let mut child_success = true;
let mut children_done = false;
if this.child.is_some() {
rtdebug!("releasing children");
let child_link: &mut ChildLink = this.child.get_mut_ref();
let shared: &mut SharedState = &mut *child_link.shared;
if !child_link.dropped_child {
let last_count = shared.count.fetch_sub(1, SeqCst);
rtdebug!("child count before sub %u %x", last_count, id);
if last_count == 1 {
assert!(child_link.chan.try_send(ChildrenTerminated));
}
child_link.dropped_child = true;
}
// Wait for messages from children
let mut tombstones = ~[];
loop {
if child_link.port.peek() {
match child_link.port.recv() {
Tombstone(t) => {
tombstones.push(t);
},
ChildrenTerminated => {
children_done = true;
break;
}
}
} else {
break
}
}
rtdebug!("releasing %u tombstones %x", tombstones.len(), id);
// Try to release the tombstones. Those that still have
// outstanding will be re-enqueued. When this task's
// parents release their latch we'll end up back here
// trying them again.
while !tombstones.is_empty() {
tombstones.pop().release(true);
}
if children_done {
let count = shared.count.load(SeqCst);
assert!(count == 0);
// self_count is the acquire-read barrier
child_success = shared.success;
}
} else {
children_done = true;
}
let total_success = local_success && child_success;
rtassert!(this.parent.is_some());
unsafe {
{
let parent_link: &mut ParentLink = this.parent.get_mut_ref();
let shared: *mut SharedState = parent_link.shared;
if !total_success {
// parent_count is the write-wait barrier
(*shared).success = false;
}
}
if children_done {
rtdebug!("children done");
do Local::borrow::<Scheduler, ()> |sched| {
sched.metrics.release_tombstone += 1;
}
{
rtdebug!("RELEASING parent %x", id);
let parent_link: &mut ParentLink = this.parent.get_mut_ref();
let shared: *mut SharedState = parent_link.shared;
let last_count = (*shared).count.fetch_sub(1, SeqCst);
rtdebug!("count before parent sub %u %x", last_count, id);
if last_count == 1 {
assert!(parent_link.chan.try_send(ChildrenTerminated));
}
}
this.closed = true;
util::ignore(this);
} else {
rtdebug!("children not done");
rtdebug!("TOMBSTONING %x", id);
do Local::borrow::<Scheduler, ()> |sched| {
sched.metrics.release_no_tombstone += 1;
}
let chan = {
let parent_link: &mut ParentLink = this.parent.get_mut_ref();
parent_link.chan.clone()
};
assert!(chan.try_send(Tombstone(this)));
}
}
}
// XXX: Should not require ~self
pub fn wait(~self, local_success: bool) -> bool {
rtassert!(!self.closed);
rtdebug!("WAITING %x", self.id());
let mut this = self;
let mut child_success = true;
if this.child.is_some() {
rtdebug!("waiting for children");
let child_link: &mut ChildLink = this.child.get_mut_ref();
let shared: &mut SharedState = &mut *child_link.shared;
if !child_link.dropped_child {
let last_count = shared.count.fetch_sub(1, SeqCst);
rtdebug!("child count before sub %u", last_count);
if last_count == 1 {
assert!(child_link.chan.try_send(ChildrenTerminated));
}
child_link.dropped_child = true;
}
// Wait for messages from children
loop {
match child_link.port.recv() {
Tombstone(t) => {
t.wait(true);
}
ChildrenTerminated => break
}
}
let count = shared.count.load(SeqCst);
if count != 0 { ::io::println(fmt!("%u", count)); }
assert!(count == 0);
// self_count is the acquire-read barrier
child_success = shared.success;
}
let total_success = local_success && child_success;
if this.parent.is_some() {
rtdebug!("releasing parent");
unsafe {
let parent_link: &mut ParentLink = this.parent.get_mut_ref();
let shared: *mut SharedState = parent_link.shared;
if !total_success {
// parent_count is the write-wait barrier
(*shared).success = false;
}
let last_count = (*shared).count.fetch_sub(1, SeqCst);
rtdebug!("count before parent sub %u", last_count);
if last_count == 1 {
assert!(parent_link.chan.try_send(ChildrenTerminated));
}
}
}
this.closed = true;
util::ignore(this);
return total_success;
}
}
impl Drop for JoinLatch {
fn drop(&self) {
rtdebug!("DESTROYING %x", self.id());
rtassert!(self.closed);
}
}
#[cfg(test)]
mod test {
use super::*;
use cell::Cell;
use container::Container;
use iter::Times;
use rt::test::*;
use rand;
use rand::RngUtil;
use vec::{CopyableVector, ImmutableVector};
#[test]
fn success_immediately() {
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_immediately {
let child_latch = child_latch.take();
assert!(child_latch.wait(true));
}
assert!(latch.wait(true));
}
}
#[test]
fn success_later() {
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_later {
let child_latch = child_latch.take();
assert!(child_latch.wait(true));
}
assert!(latch.wait(true));
}
}
#[test]
fn mt_success() {
do run_in_mt_newsched_task {
let mut latch = JoinLatch::new_root();
for 10.times {
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_random {
let child_latch = child_latch.take();
assert!(child_latch.wait(true));
}
}
assert!(latch.wait(true));
}
}
#[test]
fn mt_failure() {
do run_in_mt_newsched_task {
let mut latch = JoinLatch::new_root();
let spawn = |status| {
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_random {
let child_latch = child_latch.take();
child_latch.wait(status);
}
};
for 10.times { spawn(true) }
spawn(false);
for 10.times { spawn(true) }
assert!(!latch.wait(true));
}
}
#[test]
fn mt_multi_level_success() {
do run_in_mt_newsched_task {
let mut latch = JoinLatch::new_root();
fn child(latch: &mut JoinLatch, i: int) {
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_random {
let mut child_latch = child_latch.take();
if i != 0 {
child(&mut *child_latch, i - 1);
child_latch.wait(true);
} else {
child_latch.wait(true);
}
}
}
child(&mut *latch, 10);
assert!(latch.wait(true));
}
}
#[test]
fn mt_multi_level_failure() {
do run_in_mt_newsched_task {
let mut latch = JoinLatch::new_root();
fn child(latch: &mut JoinLatch, i: int) {
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_random {
let mut child_latch = child_latch.take();
if i != 0 {
child(&mut *child_latch, i - 1);
child_latch.wait(false);
} else {
child_latch.wait(true);
}
}
}
child(&mut *latch, 10);
assert!(!latch.wait(true));
}
}
#[test]
fn release_child() {
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_immediately {
let latch = child_latch.take();
latch.release(false);
}
assert!(!latch.wait(true));
}
}
#[test]
fn release_child_tombstone() {
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_immediately {
let mut latch = child_latch.take();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_later {
let latch = child_latch.take();
latch.release(false);
}
latch.release(true);
}
assert!(!latch.wait(true));
}
}
#[test]
fn release_child_no_tombstone() {
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_later {
let mut latch = child_latch.take();
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
do spawntask_immediately {
let latch = child_latch.take();
latch.release(false);
}
latch.release(true);
}
assert!(!latch.wait(true));
}
}
#[test]
fn release_child_tombstone_stress() {
fn rand_orders() -> ~[bool] {
let mut v = ~[false,.. 5];
v[0] = true;
let mut rng = rand::rng();
return rng.shuffle(v);
}
fn split_orders(orders: &[bool]) -> (~[bool], ~[bool]) {
if orders.is_empty() {
return (~[], ~[]);
} else if orders.len() <= 2 {
return (orders.to_owned(), ~[]);
}
let mut rng = rand::rng();
let n = rng.gen_uint_range(1, orders.len());
let first = orders.slice(0, n).to_owned();
let last = orders.slice(n, orders.len()).to_owned();
assert!(first.len() + last.len() == orders.len());
return (first, last);
}
for stress_factor().times {
do run_in_newsched_task {
fn doit(latch: &mut JoinLatch, orders: ~[bool], depth: uint) {
let (my_orders, remaining_orders) = split_orders(orders);
rtdebug!("(my_orders, remaining): %?", (&my_orders, &remaining_orders));
rtdebug!("depth: %u", depth);
let mut remaining_orders = remaining_orders;
let mut num = 0;
for my_orders.iter().advance |&order| {
let child_latch = latch.new_child();
let child_latch = Cell::new(child_latch);
let (child_orders, remaining) = split_orders(remaining_orders);
rtdebug!("(child_orders, remaining): %?", (&child_orders, &remaining));
remaining_orders = remaining;
let child_orders = Cell::new(child_orders);
let child_num = num;
let _ = child_num; // XXX unused except in rtdebug!
do spawntask_random {
rtdebug!("depth %u num %u", depth, child_num);
let mut child_latch = child_latch.take();
let child_orders = child_orders.take();
doit(&mut *child_latch, child_orders, depth + 1);
child_latch.release(order);
}
num += 1;
}
}
let mut latch = JoinLatch::new_root();
let orders = rand_orders();
rtdebug!("orders: %?", orders);
doit(&mut *latch, orders, 0);
assert!(!latch.wait(true));
}
}
}
#[test]
fn whateverman() {
struct Order {
immediate: bool,
succeed: bool,
orders: ~[Order]
}
fn next(latch: &mut JoinLatch, orders: ~[Order]) {
for orders.iter().advance |order| {
let suborders = copy order.orders;
let child_latch = Cell::new(latch.new_child());
let succeed = order.succeed;
if order.immediate {
do spawntask_immediately {
let mut child_latch = child_latch.take();
next(&mut *child_latch, copy suborders);
rtdebug!("immediate releasing");
child_latch.release(succeed);
}
} else {
do spawntask_later {
let mut child_latch = child_latch.take();
next(&mut *child_latch, copy suborders);
rtdebug!("later releasing");
child_latch.release(succeed);
}
}
}
}
do run_in_newsched_task {
let mut latch = JoinLatch::new_root();
let orders = ~[ Order { // 0 0
immediate: true,
succeed: true,
orders: ~[ Order { // 1 0
immediate: true,
succeed: false,
orders: ~[ Order { // 2 0
immediate: false,
succeed: false,
orders: ~[ Order { // 3 0
immediate: true,
succeed: false,
orders: ~[]
}, Order { // 3 1
immediate: false,
succeed: false,
orders: ~[]
}]
}]
}]
}];
next(&mut *latch, orders);
assert!(!latch.wait(true));
}
}
}

View File

@@ -13,12 +13,13 @@ use rt::sched::Scheduler;
use rt::task::Task;
use rt::local_ptr;
use rt::rtio::{EventLoop, IoFactoryObject};
//use borrow::to_uint;
pub trait Local {
fn put(value: ~Self);
fn take() -> ~Self;
fn exists() -> bool;
fn borrow(f: &fn(&mut Self));
fn borrow<T>(f: &fn(&mut Self) -> T) -> T;
unsafe fn unsafe_borrow() -> *mut Self;
unsafe fn try_unsafe_borrow() -> Option<*mut Self>;
}
@@ -27,23 +28,40 @@ impl Local for Scheduler {
fn put(value: ~Scheduler) { unsafe { local_ptr::put(value) }}
fn take() -> ~Scheduler { unsafe { local_ptr::take() } }
fn exists() -> bool { local_ptr::exists() }
fn borrow(f: &fn(&mut Scheduler)) { unsafe { local_ptr::borrow(f) } }
fn borrow<T>(f: &fn(&mut Scheduler) -> T) -> T {
let mut res: Option<T> = None;
let res_ptr: *mut Option<T> = &mut res;
unsafe {
do local_ptr::borrow |sched| {
// rtdebug!("successfully unsafe borrowed sched pointer");
let result = f(sched);
*res_ptr = Some(result);
}
}
match res {
Some(r) => { r }
None => rtabort!("function failed!")
}
}
unsafe fn unsafe_borrow() -> *mut Scheduler { local_ptr::unsafe_borrow() }
unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { abort!("unimpl") }
unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { rtabort!("unimpl") }
}
impl Local for Task {
fn put(_value: ~Task) { abort!("unimpl") }
fn take() -> ~Task { abort!("unimpl") }
fn exists() -> bool { abort!("unimpl") }
fn borrow(f: &fn(&mut Task)) {
do Local::borrow::<Scheduler> |sched| {
fn put(_value: ~Task) { rtabort!("unimpl") }
fn take() -> ~Task { rtabort!("unimpl") }
fn exists() -> bool { rtabort!("unimpl") }
fn borrow<T>(f: &fn(&mut Task) -> T) -> T {
do Local::borrow::<Scheduler, T> |sched| {
// rtdebug!("sched about to grab current_task");
match sched.current_task {
Some(~ref mut task) => {
f(&mut *task.task)
// rtdebug!("current task pointer: %x", to_uint(task));
// rtdebug!("current task heap pointer: %x", to_uint(&task.heap));
f(task)
}
None => {
abort!("no scheduler")
rtabort!("no scheduler")
}
}
}
@@ -51,12 +69,12 @@ impl Local for Task {
unsafe fn unsafe_borrow() -> *mut Task {
match (*Local::unsafe_borrow::<Scheduler>()).current_task {
Some(~ref mut task) => {
let s: *mut Task = &mut *task.task;
let s: *mut Task = &mut *task;
return s;
}
None => {
// Don't fail. Infinite recursion
abort!("no scheduler")
rtabort!("no scheduler")
}
}
}
@@ -71,48 +89,69 @@ impl Local for Task {
// XXX: This formulation won't work once ~IoFactoryObject is a real trait pointer
impl Local for IoFactoryObject {
fn put(_value: ~IoFactoryObject) { abort!("unimpl") }
fn take() -> ~IoFactoryObject { abort!("unimpl") }
fn exists() -> bool { abort!("unimpl") }
fn borrow(_f: &fn(&mut IoFactoryObject)) { abort!("unimpl") }
fn put(_value: ~IoFactoryObject) { rtabort!("unimpl") }
fn take() -> ~IoFactoryObject { rtabort!("unimpl") }
fn exists() -> bool { rtabort!("unimpl") }
fn borrow<T>(_f: &fn(&mut IoFactoryObject) -> T) -> T { rtabort!("unimpl") }
unsafe fn unsafe_borrow() -> *mut IoFactoryObject {
let sched = Local::unsafe_borrow::<Scheduler>();
let io: *mut IoFactoryObject = (*sched).event_loop.io().unwrap();
return io;
}
unsafe fn try_unsafe_borrow() -> Option<*mut IoFactoryObject> { abort!("unimpl") }
unsafe fn try_unsafe_borrow() -> Option<*mut IoFactoryObject> { rtabort!("unimpl") }
}
#[cfg(test)]
mod test {
use unstable::run_in_bare_thread;
use rt::test::*;
use rt::sched::Scheduler;
use rt::uv::uvio::UvEventLoop;
use super::*;
#[test]
fn thread_local_scheduler_smoke_test() {
let scheduler = ~UvEventLoop::new_scheduler();
do run_in_bare_thread {
let scheduler = ~new_test_uv_sched();
Local::put(scheduler);
let _scheduler: ~Scheduler = Local::take();
}
}
#[test]
fn thread_local_scheduler_two_instances() {
let scheduler = ~UvEventLoop::new_scheduler();
do run_in_bare_thread {
let scheduler = ~new_test_uv_sched();
Local::put(scheduler);
let _scheduler: ~Scheduler = Local::take();
let scheduler = ~UvEventLoop::new_scheduler();
let scheduler = ~new_test_uv_sched();
Local::put(scheduler);
let _scheduler: ~Scheduler = Local::take();
}
}
#[test]
fn borrow_smoke_test() {
let scheduler = ~UvEventLoop::new_scheduler();
do run_in_bare_thread {
let scheduler = ~new_test_uv_sched();
Local::put(scheduler);
unsafe {
let _scheduler: *mut Scheduler = Local::unsafe_borrow();
}
let _scheduler: ~Scheduler = Local::take();
}
}
#[test]
fn borrow_with_return() {
do run_in_bare_thread {
let scheduler = ~new_test_uv_sched();
Local::put(scheduler);
let res = do Local::borrow::<Scheduler,bool> |_sched| {
true
};
assert!(res);
let _scheduler: ~Scheduler = Local::take();
}
}
}

View File

@@ -10,11 +10,24 @@
//! The local, garbage collected heap
use libc;
use libc::{c_void, uintptr_t, size_t};
use ops::Drop;
use repr::BoxRepr;
use rt;
use rt::OldTaskContext;
use rt::local::Local;
use rt::task::Task;
type MemoryRegion = c_void;
type BoxedRegion = c_void;
struct Env { priv opaque: () }
struct BoxedRegion {
env: *Env,
backing_region: *MemoryRegion,
live_allocs: *BoxRepr
}
pub type OpaqueBox = c_void;
pub type TypeDesc = c_void;
@@ -49,6 +62,12 @@ impl LocalHeap {
}
}
pub fn realloc(&mut self, ptr: *OpaqueBox, size: uint) -> *OpaqueBox {
unsafe {
return rust_boxed_region_realloc(self.boxed_region, ptr, size as size_t);
}
}
pub fn free(&mut self, box: *OpaqueBox) {
unsafe {
return rust_boxed_region_free(self.boxed_region, box);
@@ -65,6 +84,40 @@ impl Drop for LocalHeap {
}
}
// A little compatibility function
pub unsafe fn local_free(ptr: *libc::c_char) {
match rt::context() {
OldTaskContext => {
rust_upcall_free_noswitch(ptr);
extern {
#[fast_ffi]
unsafe fn rust_upcall_free_noswitch(ptr: *libc::c_char);
}
}
_ => {
do Local::borrow::<Task,()> |task| {
task.heap.free(ptr as *libc::c_void);
}
}
}
}
pub fn live_allocs() -> *BoxRepr {
let region = match rt::context() {
OldTaskContext => {
unsafe { rust_current_boxed_region() }
}
_ => {
do Local::borrow::<Task, *BoxedRegion> |task| {
task.heap.boxed_region
}
}
};
return unsafe { (*region).live_allocs };
}
extern {
fn rust_new_memory_region(synchronized: uintptr_t,
detailed_leaks: uintptr_t,
@@ -76,5 +129,9 @@ extern {
fn rust_boxed_region_malloc(region: *BoxedRegion,
td: *TypeDesc,
size: size_t) -> *OpaqueBox;
fn rust_boxed_region_realloc(region: *BoxedRegion,
ptr: *OpaqueBox,
size: size_t) -> *OpaqueBox;
fn rust_boxed_region_free(region: *BoxedRegion, box: *OpaqueBox);
fn rust_current_boxed_region() -> *BoxedRegion;
}

View File

@@ -109,7 +109,7 @@ pub unsafe fn unsafe_borrow<T>() -> *mut T {
fn tls_key() -> tls::Key {
match maybe_tls_key() {
Some(key) => key,
None => abort!("runtime tls key not initialized")
None => rtabort!("runtime tls key not initialized")
}
}

View File

@@ -9,6 +9,7 @@
// except according to those terms.
use either::*;
use libc;
pub trait Logger {
fn log(&mut self, msg: Either<~str, &'static str>);
@@ -20,6 +21,10 @@ impl Logger for StdErrLogger {
fn log(&mut self, msg: Either<~str, &'static str>) {
use io::{Writer, WriterUtil};
if !should_log_console() {
return;
}
let s: &str = match msg {
Left(ref s) => {
let s: &str = *s;
@@ -44,7 +49,6 @@ pub fn init(crate_map: *u8) {
use str;
use ptr;
use option::{Some, None};
use libc::c_char;
let log_spec = os::getenv("RUST_LOG");
match log_spec {
@@ -61,8 +65,16 @@ pub fn init(crate_map: *u8) {
}
}
}
extern {
fn rust_update_log_settings(crate_map: *u8, settings: *c_char);
}
}
pub fn console_on() { unsafe { rust_log_console_on() } }
pub fn console_off() { unsafe { rust_log_console_off() } }
fn should_log_console() -> bool { unsafe { rust_should_log_console() != 0 } }
extern {
fn rust_update_log_settings(crate_map: *u8, settings: *libc::c_char);
fn rust_log_console_on();
fn rust_log_console_off();
fn rust_should_log_console() -> libc::uintptr_t;
}

View File

@@ -8,6 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A concurrent queue that supports multiple producers and a
//! single consumer.
use container::Container;
use kinds::Send;
use vec::OwnedVector;

98
src/libstd/rt/metrics.rs Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use to_str::ToStr;
pub struct SchedMetrics {
// The number of times executing `run_sched_once`.
turns: uint,
// The number of turns that received a message.
messages_received: uint,
// The number of turns that ran a task from the queue.
tasks_resumed_from_queue: uint,
// The number of turns that found no work to perform.
wasted_turns: uint,
// The number of times the scheduler went to sleep.
sleepy_times: uint,
// Context switches from the scheduler into a task.
context_switches_sched_to_task: uint,
// Context switches from a task into the scheduler.
context_switches_task_to_sched: uint,
// Context switches from a task to a task.
context_switches_task_to_task: uint,
// Message sends that unblock the receiver
rendezvous_sends: uint,
// Message sends that do not unblock the receiver
non_rendezvous_sends: uint,
// Message receives that do not block the receiver
rendezvous_recvs: uint,
// Message receives that block the receiver
non_rendezvous_recvs: uint,
// JoinLatch releases that create tombstones
release_tombstone: uint,
// JoinLatch releases that do not create tombstones
release_no_tombstone: uint,
}
impl SchedMetrics {
pub fn new() -> SchedMetrics {
SchedMetrics {
turns: 0,
messages_received: 0,
tasks_resumed_from_queue: 0,
wasted_turns: 0,
sleepy_times: 0,
context_switches_sched_to_task: 0,
context_switches_task_to_sched: 0,
context_switches_task_to_task: 0,
rendezvous_sends: 0,
non_rendezvous_sends: 0,
rendezvous_recvs: 0,
non_rendezvous_recvs: 0,
release_tombstone: 0,
release_no_tombstone: 0
}
}
}
impl ToStr for SchedMetrics {
fn to_str(&self) -> ~str {
fmt!("turns: %u\n\
messages_received: %u\n\
tasks_resumed_from_queue: %u\n\
wasted_turns: %u\n\
sleepy_times: %u\n\
context_switches_sched_to_task: %u\n\
context_switches_task_to_sched: %u\n\
context_switches_task_to_task: %u\n\
rendezvous_sends: %u\n\
non_rendezvous_sends: %u\n\
rendezvous_recvs: %u\n\
non_rendezvous_recvs: %u\n\
release_tombstone: %u\n\
release_no_tombstone: %u\n\
",
self.turns,
self.messages_received,
self.tasks_resumed_from_queue,
self.wasted_turns,
self.sleepy_times,
self.context_switches_sched_to_task,
self.context_switches_task_to_sched,
self.context_switches_task_to_task,
self.rendezvous_sends,
self.non_rendezvous_sends,
self.rendezvous_recvs,
self.non_rendezvous_recvs,
self.release_tombstone,
self.release_no_tombstone
)
}
}

View File

@@ -55,8 +55,27 @@ Several modules in `core` are clients of `rt`:
*/
#[doc(hidden)];
#[deny(unused_imports)];
#[deny(unused_mut)];
#[deny(unused_variable)];
#[deny(unused_unsafe)];
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::Times;
use iterator::IteratorUtil;
use option::Some;
use ptr::RawPtr;
use rt::sched::{Scheduler, Shutdown};
use rt::sleeper_list::SleeperList;
use rt::task::Task;
use rt::thread::Thread;
use rt::work_queue::WorkQueue;
use rt::uv::uvio::UvEventLoop;
use unstable::atomics::{AtomicInt, SeqCst};
use unstable::sync::UnsafeAtomicRcBox;
use vec::{OwnedVector, MutableVector};
/// The global (exchange) heap.
pub mod global_heap;
@@ -88,6 +107,9 @@ mod work_queue;
/// A parallel queue.
mod message_queue;
/// A parallel data structure for tracking sleeping schedulers.
mod sleeper_list;
/// Stack segments and caching.
mod stack;
@@ -101,7 +123,7 @@ mod thread;
pub mod env;
/// The local, managed heap
mod local_heap;
pub mod local_heap;
/// The Logger trait and implementations
pub mod logging;
@@ -127,6 +149,20 @@ pub mod local_ptr;
/// Bindings to pthread/windows thread-local storage.
pub mod thread_local_storage;
/// For waiting on child tasks.
pub mod join_latch;
pub mod metrics;
// FIXME #5248 shouldn't be pub
/// Just stuff
pub mod util;
// Global command line argument storage
pub mod args;
// Support for dynamic borrowck
pub mod borrowck;
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
@@ -142,27 +178,128 @@ pub mod thread_local_storage;
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on error.
pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int {
pub fn start(argc: int, argv: **u8, crate_map: *u8, main: ~fn()) -> int {
use self::sched::{Scheduler, Coroutine};
use self::uv::uvio::UvEventLoop;
init(argc, argv, crate_map);
let exit_code = run(main);
cleanup();
init(crate_map);
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_);
let main_task = ~Coroutine::new(&mut sched.stack_pool, main);
sched.enqueue_task(main_task);
sched.run();
return 0;
return exit_code;
}
/// One-time runtime initialization. Currently all this does is set up logging
/// based on the RUST_LOG environment variable.
pub fn init(crate_map: *u8) {
/// One-time runtime initialization.
///
/// Initializes global state, including frobbing
/// the crate's logging flags, registering GC
/// metadata, and storing the process arguments.
pub fn init(argc: int, argv: **u8, crate_map: *u8) {
// XXX: Derefing these pointers is not safe.
// Need to propagate the unsafety to `start`.
unsafe {
args::init(argc, argv);
logging::init(crate_map);
rust_update_gc_metadata(crate_map);
}
extern {
fn rust_update_gc_metadata(crate_map: *u8);
}
}
/// One-time runtime cleanup.
pub fn cleanup() {
args::cleanup();
}
/// Execute the main function in a scheduler.
///
/// Configures the runtime according to the environment, by default
/// using a task scheduler with the same number of threads as cores.
/// Returns a process exit code.
pub fn run(main: ~fn()) -> int {
static DEFAULT_ERROR_CODE: int = 101;
let nthreads = util::default_sched_threads();
// The shared list of sleeping schedulers. Schedulers wake each other
// occassionally to do new work.
let sleepers = SleeperList::new();
// The shared work queue. Temporary until work stealing is implemented.
let work_queue = WorkQueue::new();
// The schedulers.
let mut scheds = ~[];
// Handles to the schedulers. When the main task ends these will be
// sent the Shutdown message to terminate the schedulers.
let mut handles = ~[];
for nthreads.times {
// Every scheduler is driven by an I/O event loop.
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone());
let handle = sched.make_handle();
scheds.push(sched);
handles.push(handle);
}
// Create a shared cell for transmitting the process exit
// code from the main task to this function.
let exit_code = UnsafeAtomicRcBox::new(AtomicInt::new(0));
let exit_code_clone = exit_code.clone();
// When the main task exits, after all the tasks in the main
// task tree, shut down the schedulers and set the exit code.
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_success| {
let mut handles = handles.take();
for handles.mut_iter().advance |handle| {
handle.send(Shutdown);
}
unsafe {
let exit_code = if exit_success {
use rt::util;
// If we're exiting successfully, then return the global
// exit status, which can be set programmatically.
util::get_exit_status()
} else {
DEFAULT_ERROR_CODE
};
(*exit_code_clone.get()).store(exit_code, SeqCst);
}
};
// Create and enqueue the main task.
let main_cell = Cell::new(main);
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
main_cell.take());
main_task.on_exit = Some(on_exit);
scheds[0].enqueue_task(main_task);
// Run each scheduler in a thread.
let mut threads = ~[];
while !scheds.is_empty() {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.run();
};
threads.push(thread);
}
// Wait for schedulers
{ let _threads = threads; }
// Return the exit code
unsafe {
(*exit_code.get()).load(SeqCst)
}
}
/// Possible contexts in which Rust code may be executing.
@@ -194,8 +331,8 @@ pub fn context() -> RuntimeContext {
return OldTaskContext;
} else {
if Local::exists::<Scheduler>() {
let context = ::cell::Cell::new_empty();
do Local::borrow::<Scheduler> |sched| {
let context = Cell::new_empty();
do Local::borrow::<Scheduler, ()> |sched| {
if sched.in_task_context() {
context.put_back(TaskContext);
} else {
@@ -217,24 +354,20 @@ pub fn context() -> RuntimeContext {
#[test]
fn test_context() {
use unstable::run_in_bare_thread;
use self::sched::{Scheduler, Coroutine};
use rt::uv::uvio::UvEventLoop;
use cell::Cell;
use self::sched::{Scheduler};
use rt::local::Local;
use rt::test::new_test_uv_sched;
assert_eq!(context(), OldTaskContext);
do run_in_bare_thread {
assert_eq!(context(), GlobalContext);
let mut sched = ~UvEventLoop::new_scheduler();
let task = ~do Coroutine::new(&mut sched.stack_pool) {
let mut sched = ~new_test_uv_sched();
let task = ~do Task::new_root(&mut sched.stack_pool) {
assert_eq!(context(), TaskContext);
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then() |task| {
do sched.deschedule_running_task_and_then() |sched, task| {
assert_eq!(context(), SchedulerContext);
let task = Cell::new(task);
do Local::borrow::<Scheduler> |sched| {
sched.enqueue_task(task.take());
}
sched.enqueue_task(task);
}
};
sched.enqueue_task(task);

View File

@@ -18,28 +18,69 @@ use rt::uv::uvio;
// XXX: ~object doesn't work currently so these are some placeholder
// types to use instead
pub type EventLoopObject = uvio::UvEventLoop;
pub type RemoteCallbackObject = uvio::UvRemoteCallback;
pub type IoFactoryObject = uvio::UvIoFactory;
pub type RtioTcpStreamObject = uvio::UvTcpStream;
pub type RtioTcpListenerObject = uvio::UvTcpListener;
pub type RtioUdpSocketObject = uvio::UvUdpSocket;
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, ~fn());
fn callback_ms(&mut self, ms: u64, ~fn());
fn remote_callback(&mut self, ~fn()) -> ~RemoteCallbackObject;
/// The asynchronous I/O services. Not all event loops may provide one
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject>;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the callback
/// is run is not guaranteed. All that is guaranteed is that, after calling 'fire',
/// the callback will be called at least once, but multiple callbacks may be coalesced
/// and callbacks may be called more often requested. Destruction also triggers the
/// callback.
fn fire(&mut self);
}
pub trait IoFactory {
fn tcp_connect(&mut self, addr: IpAddr) -> Result<~RtioTcpStreamObject, IoError>;
fn tcp_bind(&mut self, addr: IpAddr) -> Result<~RtioTcpListenerObject, IoError>;
fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError>;
}
pub trait RtioTcpListener {
pub trait RtioTcpListener : RtioSocket {
fn accept(&mut self) -> Result<~RtioTcpStreamObject, IoError>;
fn accept_simultaneously(&self);
fn dont_accept_simultaneously(&self);
}
pub trait RtioTcpStream {
fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError>;
fn write(&mut self, buf: &[u8]) -> Result<(), IoError>;
pub trait RtioTcpStream : RtioSocket {
fn read(&self, buf: &mut [u8]) -> Result<uint, IoError>;
fn write(&self, buf: &[u8]) -> Result<(), IoError>;
fn peer_name(&self) -> IpAddr;
fn control_congestion(&self);
fn nodelay(&self);
fn keepalive(&self, delay_in_seconds: uint);
fn letdie(&self);
}
pub trait RtioSocket {
fn socket_name(&self) -> IpAddr;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError>;
fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError>;
fn join_multicast(&self, multi: IpAddr);
fn leave_multicast(&self, multi: IpAddr);
fn loop_multicast_locally(&self);
fn dont_loop_multicast_locally(&self);
fn multicast_time_to_live(&self, ttl: int);
fn time_to_live(&self, ttl: int);
fn hear_broadcasts(&self);
fn ignore_broadcasts(&self);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,59 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Maintains a shared list of sleeping schedulers. Schedulers
//! use this to wake each other up.
use container::Container;
use vec::OwnedVector;
use option::{Option, Some, None};
use cell::Cell;
use unstable::sync::{Exclusive, exclusive};
use rt::sched::SchedHandle;
use clone::Clone;
pub struct SleeperList {
priv stack: ~Exclusive<~[SchedHandle]>
}
impl SleeperList {
pub fn new() -> SleeperList {
SleeperList {
stack: ~exclusive(~[])
}
}
pub fn push(&mut self, handle: SchedHandle) {
let handle = Cell::new(handle);
unsafe {
self.stack.with(|s| s.push(handle.take()));
}
}
pub fn pop(&mut self) -> Option<SchedHandle> {
unsafe {
do self.stack.with |s| {
if !s.is_empty() {
Some(s.pop())
} else {
None
}
}
}
}
}
impl Clone for SleeperList {
fn clone(&self) -> SleeperList {
SleeperList {
stack: self.stack.clone()
}
}
}

View File

@@ -15,20 +15,45 @@
use borrow;
use cast::transmute;
use cleanup;
use libc::{c_void, uintptr_t};
use ptr;
use prelude::*;
use option::{Option, Some, None};
use rt::local::Local;
use rt::logging::StdErrLogger;
use super::local_heap::LocalHeap;
use rt::sched::{Scheduler, SchedHandle};
use rt::join_latch::JoinLatch;
use rt::stack::{StackSegment, StackPool};
use rt::context::Context;
use cell::Cell;
pub struct Task {
heap: LocalHeap,
gc: GarbageCollector,
storage: LocalStorage,
logger: StdErrLogger,
unwinder: Option<Unwinder>,
destroyed: bool
unwinder: Unwinder,
home: Option<SchedHome>,
join_latch: Option<~JoinLatch>,
on_exit: Option<~fn(bool)>,
destroyed: bool,
coroutine: Option<~Coroutine>
}
pub struct Coroutine {
/// The segment of stack on which the task is currently running or
/// if the task is blocked, on which the task will resume
/// execution.
priv current_stack_segment: StackSegment,
/// Always valid if the task is alive and not running.
saved_context: Context
}
pub enum SchedHome {
AnySched,
Sched(SchedHandle)
}
pub struct GarbageCollector;
@@ -39,73 +64,227 @@ pub struct Unwinder {
}
impl Task {
pub fn new() -> Task {
pub fn new_root(stack_pool: &mut StackPool,
start: ~fn()) -> Task {
Task::new_root_homed(stack_pool, AnySched, start)
}
pub fn new_child(&mut self,
stack_pool: &mut StackPool,
start: ~fn()) -> Task {
self.new_child_homed(stack_pool, AnySched, start)
}
pub fn new_root_homed(stack_pool: &mut StackPool,
home: SchedHome,
start: ~fn()) -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(ptr::null(), None),
logger: StdErrLogger,
unwinder: Some(Unwinder { unwinding: false }),
destroyed: false
unwinder: Unwinder { unwinding: false },
home: Some(home),
join_latch: Some(JoinLatch::new_root()),
on_exit: None,
destroyed: false,
coroutine: Some(~Coroutine::new(stack_pool, start))
}
}
pub fn without_unwinding() -> Task {
pub fn new_child_homed(&mut self,
stack_pool: &mut StackPool,
home: SchedHome,
start: ~fn()) -> Task {
Task {
heap: LocalHeap::new(),
gc: GarbageCollector,
storage: LocalStorage(ptr::null(), None),
logger: StdErrLogger,
unwinder: None,
destroyed: false
home: Some(home),
unwinder: Unwinder { unwinding: false },
join_latch: Some(self.join_latch.get_mut_ref().new_child()),
on_exit: None,
destroyed: false,
coroutine: Some(~Coroutine::new(stack_pool, start))
}
}
pub fn give_home(&mut self, new_home: SchedHome) {
self.home = Some(new_home);
}
pub fn run(&mut self, f: &fn()) {
// This is just an assertion that `run` was called unsafely
// and this instance of Task is still accessible.
do Local::borrow::<Task> |task| {
do Local::borrow::<Task, ()> |task| {
assert!(borrow::ref_eq(task, self));
}
match self.unwinder {
Some(ref mut unwinder) => {
// If there's an unwinder then set up the catch block
unwinder.try(f);
self.unwinder.try(f);
self.destroy();
// Wait for children. Possibly report the exit status.
let local_success = !self.unwinder.unwinding;
let join_latch = self.join_latch.swap_unwrap();
match self.on_exit {
Some(ref on_exit) => {
let success = join_latch.wait(local_success);
(*on_exit)(success);
}
None => {
// Otherwise, just run the body
f()
join_latch.release(local_success);
}
}
self.destroy();
}
/// Must be called manually before finalization to clean up
/// must be called manually before finalization to clean up
/// thread-local resources. Some of the routines here expect
/// Task to be available recursively so this must be
/// called unsafely, without removing Task from
/// thread-local-storage.
fn destroy(&mut self) {
// This is just an assertion that `destroy` was called unsafely
// and this instance of Task is still accessible.
do Local::borrow::<Task> |task| {
do Local::borrow::<Task, ()> |task| {
assert!(borrow::ref_eq(task, self));
}
match self.storage {
LocalStorage(ptr, Some(ref dtor)) => {
(*dtor)(ptr)
}
_ => ()
}
// Destroy remaining boxes
unsafe { cleanup::annihilate(); }
self.destroyed = true;
}
/// Check if *task* is currently home.
pub fn is_home(&self) -> bool {
do Local::borrow::<Scheduler,bool> |sched| {
match self.home {
Some(AnySched) => { false }
Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
*id == sched.sched_id()
}
None => { rtabort!("task home of None") }
}
}
}
pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
match self.home {
Some(AnySched) => { false }
Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
*id == sched.sched_id()
}
None => {rtabort!("task home of None") }
}
}
pub fn is_home_using_id(sched_id: uint) -> bool {
do Local::borrow::<Task,bool> |task| {
match task.home {
Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
*id == sched_id
}
Some(AnySched) => { false }
None => { rtabort!("task home of None") }
}
}
}
/// Check if this *task* has a home.
pub fn homed(&self) -> bool {
match self.home {
Some(AnySched) => { false }
Some(Sched(_)) => { true }
None => {
rtabort!("task home of None")
}
}
}
/// On a special scheduler?
pub fn on_special() -> bool {
do Local::borrow::<Scheduler,bool> |sched| {
!sched.run_anything
}
}
}
impl Drop for Task {
fn drop(&self) { assert!(self.destroyed) }
}
// Coroutines represent nothing more than a context and a stack
// segment.
impl Coroutine {
pub fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine {
static MIN_STACK_SIZE: uint = 100000; // XXX: Too much stack
let start = Coroutine::build_start_wrapper(start);
let mut stack = stack_pool.take_segment(MIN_STACK_SIZE);
let initial_context = Context::new(start, &mut stack);
Coroutine {
current_stack_segment: stack,
saved_context: initial_context
}
}
fn build_start_wrapper(start: ~fn()) -> ~fn() {
let start_cell = Cell::new(start);
let wrapper: ~fn() = || {
// First code after swap to this new context. Run our
// cleanup job.
unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
(*sched).run_cleanup_job();
let sched = Local::unsafe_borrow::<Scheduler>();
let task = (*sched).current_task.get_mut_ref();
do task.run {
// N.B. Removing `start` from the start wrapper
// closure by emptying a cell is critical for
// correctness. The ~Task pointer, and in turn the
// closure used to initialize the first call
// frame, is destroyed in the scheduler context,
// not task context. So any captured closures must
// not contain user-definable dtors that expect to
// be in task context. By moving `start` out of
// the closure, all the user code goes our of
// scope while the task is still running.
let start = start_cell.take();
start();
};
}
let sched = Local::take::<Scheduler>();
sched.terminate_current_task();
};
return wrapper;
}
/// Destroy coroutine and try to reuse stack segment.
pub fn recycle(~self, stack_pool: &mut StackPool) {
match self {
~Coroutine { current_stack_segment, _ } => {
stack_pool.give_segment(current_stack_segment);
}
}
}
}
// Just a sanity check to make sure we are catching a Rust-thrown exception
static UNWIND_TOKEN: uintptr_t = 839147;
@@ -184,8 +363,10 @@ mod test {
fn unwind() {
do run_in_newsched_task() {
let result = spawntask_try(||());
rtdebug!("trying first assert");
assert!(result.is_ok());
let result = spawntask_try(|| fail!());
rtdebug!("trying second assert");
assert!(result.is_err());
}
}
@@ -227,4 +408,67 @@ mod test {
assert!(port.recv() == 10);
}
}
#[test]
fn comm_shared_chan() {
use comm::*;
do run_in_newsched_task() {
let (port, chan) = stream();
let chan = SharedChan::new(chan);
chan.send(10);
assert!(port.recv() == 10);
}
}
#[test]
fn linked_failure() {
do run_in_newsched_task() {
let res = do spawntask_try {
spawntask_random(|| fail!());
};
assert!(res.is_err());
}
}
#[test]
fn heap_cycles() {
use option::{Option, Some, None};
do run_in_newsched_task {
struct List {
next: Option<@mut List>,
}
let a = @mut List { next: None };
let b = @mut List { next: Some(a) };
a.next = Some(b);
}
}
// XXX: This is a copy of test_future_result in std::task.
// It can be removed once the scheduler is turned on by default.
#[test]
fn future_result() {
do run_in_newsched_task {
use option::{Some, None};
use task::*;
let mut result = None;
let mut builder = task();
builder.future_result(|r| result = Some(r));
do builder.spawn {}
assert_eq!(result.unwrap().recv(), Success);
result = None;
let mut builder = task();
builder.future_result(|r| result = Some(r));
builder.unlinked();
do builder.spawn {
fail!();
}
assert_eq!(result.unwrap().recv(), Failure);
}
}
}

View File

@@ -8,75 +8,185 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use uint;
use option::*;
use option::{Some, None};
use cell::Cell;
use result::{Result, Ok, Err};
use super::io::net::ip::{IpAddr, Ipv4};
use rt::task::Task;
use rt::thread::Thread;
use clone::Clone;
use container::Container;
use iterator::IteratorUtil;
use vec::{OwnedVector, MutableVector};
use super::io::net::ip::{IpAddr, Ipv4, Ipv6};
use rt::sched::Scheduler;
use rt::local::Local;
use unstable::run_in_bare_thread;
use rt::thread::Thread;
use rt::task::Task;
use rt::uv::uvio::UvEventLoop;
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::task::{Sched};
use rt::comm::oneshot;
use result::{Result, Ok, Err};
pub fn new_test_uv_sched() -> Scheduler {
let mut sched = Scheduler::new(~UvEventLoop::new(),
WorkQueue::new(),
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
/// Creates a new scheduler in a new thread and runs a task in it,
/// then waits for the scheduler to exit. Failure of the task
/// will abort the process.
pub fn run_in_newsched_task(f: ~fn()) {
use super::sched::*;
use unstable::run_in_bare_thread;
use rt::uv::uvio::UvEventLoop;
let f = Cell::new(f);
do run_in_bare_thread {
let mut sched = ~UvEventLoop::new_scheduler();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
let mut sched = ~new_test_uv_sched();
let on_exit: ~fn(bool) = |exit_status| rtassert!(exit_status);
let mut task = ~Task::new_root(&mut sched.stack_pool,
f.take());
rtdebug!("newsched_task: %x", to_uint(task));
task.on_exit = Some(on_exit);
sched.enqueue_task(task);
sched.run();
}
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
let f_cell = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).get(),
None => {
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
};
let sleepers = SleeperList::new();
let work_queue = WorkQueue::new();
let mut handles = ~[];
let mut scheds = ~[];
for uint::range(0, nthreads) |_| {
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_,
work_queue.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let f_cell = Cell::new(f_cell.take());
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handles.mut_iter().advance |handle| {
handle.send(Shutdown);
}
rtassert!(exit_status);
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
f_cell.take());
main_task.on_exit = Some(on_exit);
scheds[0].enqueue_task(main_task);
let mut threads = ~[];
while !scheds.is_empty() {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.run();
};
threads.push(thread);
}
// Wait for schedulers
let _threads = threads;
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
use super::sched::*;
let f = Cell::new(f);
let mut sched = Local::take::<Scheduler>();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
f);
do sched.switch_running_tasks_and_then(task) |task| {
let task = Cell::new(task);
let sched = Local::take::<Scheduler>();
sched.schedule_new_task(task.take());
let task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
rtdebug!("spawntask taking the scheduler from TLS");
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool, f.take())
}
};
rtdebug!("new task pointer: %x", to_uint(task));
let sched = Local::take::<Scheduler>();
rtdebug!("spawntask scheduling the new task");
sched.schedule_task(task);
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_immediately(f: ~fn()) {
use super::sched::*;
let mut sched = Local::take::<Scheduler>();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
f);
do sched.switch_running_tasks_and_then(task) |task| {
let task = Cell::new(task);
do Local::borrow::<Scheduler> |sched| {
sched.enqueue_task(task.take());
let f = Cell::new(f);
let task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool,
f.take())
}
};
let sched = Local::take::<Scheduler>();
do sched.switch_running_tasks_and_then(task) |sched, task| {
sched.enqueue_task(task);
}
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
use super::sched::*;
let f = Cell::new(f);
let task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool, f.take())
}
};
let mut sched = Local::take::<Scheduler>();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
f);
sched.enqueue_task(task);
Local::put(sched);
}
@@ -86,20 +196,25 @@ pub fn spawntask_random(f: ~fn()) {
use super::sched::*;
use rand::{Rand, rng};
let f = Cell::new(f);
let task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool,
f.take())
}
};
let mut sched = Local::take::<Scheduler>();
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
let mut sched = Local::take::<Scheduler>();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
f);
if run_now {
do sched.switch_running_tasks_and_then(task) |task| {
let task = Cell::new(task);
do Local::borrow::<Scheduler> |sched| {
sched.enqueue_task(task.take());
}
do sched.switch_running_tasks_and_then(task) |sched, task| {
sched.enqueue_task(task);
}
} else {
sched.enqueue_task(task);
@@ -107,80 +222,165 @@ pub fn spawntask_random(f: ~fn()) {
}
}
/// Spawn a task, with the current scheduler as home, and queue it to
/// run later.
pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) {
use super::sched::*;
use rand::{rng, RngUtil};
let mut rng = rng();
/// Spawn a task and wait for it to finish, returning whether it completed successfully or failed
let task = {
let sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)];
let handle = sched.make_handle();
let home_id = handle.sched_id;
// now that we know where this is going, build a new function
// that can assert it is in the right place
let af: ~fn() = || {
do Local::borrow::<Scheduler,()>() |sched| {
rtdebug!("home_id: %u, runtime loc: %u",
home_id,
sched.sched_id());
assert!(home_id == sched.sched_id());
};
f()
};
~Task::new_root_homed(&mut sched.stack_pool,
Sched(handle),
af)
};
let dest_sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)];
// enqueue it for future execution
dest_sched.enqueue_task(task);
}
/// Spawn a task and wait for it to finish, returning whether it
/// completed successfully or failed
pub fn spawntask_try(f: ~fn()) -> Result<(), ()> {
use cell::Cell;
use super::sched::*;
use task;
use unstable::finally::Finally;
// Our status variables will be filled in from the scheduler context
let mut failed = false;
let failed_ptr: *mut bool = &mut failed;
let f = Cell::new(f);
// Switch to the scheduler
let f = Cell::new(Cell::new(f));
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then() |old_task| {
let old_task = Cell::new(old_task);
let f = f.take();
let mut sched = Local::take::<Scheduler>();
let new_task = ~do Coroutine::new(&mut sched.stack_pool) {
do (|| {
(f.take())()
}).finally {
// Check for failure then resume the parent task
unsafe { *failed_ptr = task::failing(); }
let sched = Local::take::<Scheduler>();
do sched.switch_running_tasks_and_then(old_task.take()) |new_task| {
let new_task = Cell::new(new_task);
do Local::borrow::<Scheduler> |sched| {
sched.enqueue_task(new_task.take());
}
}
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
let mut new_task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
do Local::borrow::<Task, ~Task> |_running_task| {
// I don't understand why using a child task here fails. I
// think the fail status is propogating back up the task
// tree and triggering a fail for the parent, which we
// aren't correctly expecting.
// ~running_task.new_child(&mut (*sched).stack_pool,
~Task::new_root(&mut (*sched).stack_pool,
f.take())
}
};
new_task.on_exit = Some(on_exit);
sched.resume_task_immediately(new_task);
let sched = Local::take::<Scheduler>();
do sched.switch_running_tasks_and_then(new_task) |sched, old_task| {
sched.enqueue_task(old_task);
}
if !failed { Ok(()) } else { Err(()) }
rtdebug!("enqueued the new task, now waiting on exit_status");
let exit_status = port.recv();
if exit_status { Ok(()) } else { Err(()) }
}
// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: ~fn()) -> Thread {
use rt::sched::*;
use rt::uv::uvio::UvEventLoop;
let f = Cell::new(f);
let task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool,
f.take())
}
};
let task = Cell::new(task);
let thread = do Thread::start {
let mut sched = ~UvEventLoop::new_scheduler();
let task = ~Coroutine::with_task(&mut sched.stack_pool,
~Task::without_unwinding(),
f.take());
sched.enqueue_task(task);
let mut sched = ~new_test_uv_sched();
sched.enqueue_task(task.take());
sched.run();
};
return thread;
}
/// Get a port number, starting at 9600, for use in tests
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port() as u16;
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port() -> ::libc::uintptr_t;
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a unique localhost:port pair starting at 9600
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> IpAddr {
Ipv4(127, 0, 0, 1, next_test_port())
}
/// Get a constant that represents the number of times to repeat stress tests. Default 1.
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> IpAddr {
Ipv6(0, 0, 0, 0, 0, 0, 0, 1, next_test_port())
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use to_str::ToStr;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
let path = os::getcwd().to_str();
let mut final_base = base;
for bases.iter().advance |&(dir, base)| {
if path.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;

View File

@@ -16,14 +16,15 @@
use option::*;
use clone::Clone;
use super::rc::RC;
use rt::sched::{Scheduler, Coroutine};
use rt::sched::Scheduler;
use rt::{context, TaskContext, SchedulerContext};
use rt::local::Local;
use rt::task::Task;
use vec::OwnedVector;
use container::Container;
struct TubeState<T> {
blocked_task: Option<~Coroutine>,
blocked_task: Option<~Task>,
buf: ~[T]
}
@@ -72,7 +73,7 @@ impl<T> Tube<T> {
assert!(self.p.refcount() > 1); // There better be somebody to wake us up
assert!((*state).blocked_task.is_none());
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |task| {
do sched.deschedule_running_task_and_then |_, task| {
(*state).blocked_task = Some(task);
}
rtdebug!("waking after tube recv");
@@ -107,11 +108,10 @@ mod test {
let tube_clone = tube.clone();
let tube_clone_cell = Cell::new(tube_clone);
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |task| {
do sched.deschedule_running_task_and_then |sched, task| {
let mut tube_clone = tube_clone_cell.take();
tube_clone.send(1);
let sched = Local::take::<Scheduler>();
sched.resume_task_immediately(task);
sched.enqueue_task(task);
}
assert!(tube.recv() == 1);
@@ -123,21 +123,17 @@ mod test {
do run_in_newsched_task {
let mut tube: Tube<int> = Tube::new();
let tube_clone = tube.clone();
let tube_clone = Cell::new(Cell::new(Cell::new(tube_clone)));
let tube_clone = Cell::new(tube_clone);
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |task| {
let tube_clone = tube_clone.take();
do Local::borrow::<Scheduler> |sched| {
let tube_clone = tube_clone.take();
do sched.deschedule_running_task_and_then |sched, task| {
let tube_clone = Cell::new(tube_clone.take());
do sched.event_loop.callback {
let mut tube_clone = tube_clone.take();
// The task should be blocked on this now and
// sending will wake it up.
tube_clone.send(1);
}
}
let sched = Local::take::<Scheduler>();
sched.resume_task_immediately(task);
sched.enqueue_task(task);
}
assert!(tube.recv() == 1);
@@ -153,14 +149,14 @@ mod test {
let tube_clone = tube.clone();
let tube_clone = Cell::new(tube_clone);
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |task| {
do sched.deschedule_running_task_and_then |sched, task| {
callback_send(tube_clone.take(), 0);
fn callback_send(tube: Tube<int>, i: int) {
if i == 100 { return; }
let tube = Cell::new(Cell::new(tube));
do Local::borrow::<Scheduler> |sched| {
do Local::borrow::<Scheduler, ()> |sched| {
let tube = tube.take();
do sched.event_loop.callback {
let mut tube = tube.take();
@@ -172,8 +168,7 @@ mod test {
}
}
let sched = Local::take::<Scheduler>();
sched.resume_task_immediately(task);
sched.enqueue_task(task);
}
for int::range(0, MAX) |i| {

121
src/libstd/rt/util.rs Normal file
View File

@@ -0,0 +1,121 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use container::Container;
use from_str::FromStr;
use iterator::IteratorUtil;
use libc;
use option::{Some, None};
use os;
use str::StrSlice;
/// Get the number of cores available
pub fn num_cpus() -> uint {
unsafe {
return rust_get_num_cpus();
}
extern {
fn rust_get_num_cpus() -> libc::uintptr_t;
}
}
/// Get's the number of scheduler threads requested by the environment
/// either `RUST_THREADS` or `num_cpus`.
pub fn default_sched_threads() -> uint {
match os::getenv("RUST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).get(),
None => num_cpus()
}
}
pub fn dumb_println(s: &str) {
use io::WriterUtil;
let dbg = ::libc::STDERR_FILENO as ::io::fd_t;
dbg.write_str(s);
dbg.write_str("\n");
}
pub fn abort(msg: &str) -> ! {
let msg = if !msg.is_empty() { msg } else { "aborted" };
let hash = msg.iter().fold(0, |accum, val| accum + (val as uint) );
let quote = match hash % 10 {
0 => "
It was from the artists and poets that the pertinent answers came, and I
know that panic would have broken loose had they been able to compare notes.
As it was, lacking their original letters, I half suspected the compiler of
having asked leading questions, or of having edited the correspondence in
corroboration of what he had latently resolved to see.",
1 => "
There are not many persons who know what wonders are opened to them in the
stories and visions of their youth; for when as children we listen and dream,
we think but half-formed thoughts, and when as men we try to remember, we are
dulled and prosaic with the poison of life. But some of us awake in the night
with strange phantasms of enchanted hills and gardens, of fountains that sing
in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch
down to sleeping cities of bronze and stone, and of shadowy companies of heroes
that ride caparisoned white horses along the edges of thick forests; and then
we know that we have looked back through the ivory gates into that world of
wonder which was ours before we were wise and unhappy.",
2 => "
Instead of the poems I had hoped for, there came only a shuddering blackness
and ineffable loneliness; and I saw at last a fearful truth which no one had
ever dared to breathe before — the unwhisperable secret of secrets — The fact
that this city of stone and stridor is not a sentient perpetuation of Old New
York as London is of Old London and Paris of Old Paris, but that it is in fact
quite dead, its sprawling body imperfectly embalmed and infested with queer
animate things which have nothing to do with it as it was in life.",
3 => "
The ocean ate the last of the land and poured into the smoking gulf, thereby
giving up all it had ever conquered. From the new-flooded lands it flowed
again, uncovering death and decay; and from its ancient and immemorial bed it
trickled loathsomely, uncovering nighted secrets of the years when Time was
young and the gods unborn. Above the waves rose weedy remembered spires. The
moon laid pale lilies of light on dead London, and Paris stood up from its damp
grave to be sanctified with star-dust. Then rose spires and monoliths that were
weedy but not remembered; terrible spires and monoliths of lands that men never
knew were lands...",
4 => "
There was a night when winds from unknown spaces whirled us irresistibly into
limitless vacum beyond all thought and entity. Perceptions of the most
maddeningly untransmissible sort thronged upon us; perceptions of infinity
which at the time convulsed us with joy, yet which are now partly lost to my
memory and partly incapable of presentation to others.",
_ => "You've met with a terrible fate, haven't you?"
};
rterrln!("%s", "");
rterrln!("%s", quote);
rterrln!("%s", "");
rterrln!("fatal runtime error: %s", msg);
unsafe { libc::abort(); }
}
pub fn set_exit_status(code: int) {
unsafe {
return rust_set_exit_status_newrt(code as libc::uintptr_t);
}
extern {
fn rust_set_exit_status_newrt(code: libc::uintptr_t);
}
}
pub fn get_exit_status() -> int {
unsafe {
return rust_get_exit_status_newrt() as int;
}
extern {
fn rust_get_exit_status_newrt() -> libc::uintptr_t;
}
}

105
src/libstd/rt/uv/async.rs Normal file
View File

@@ -0,0 +1,105 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc::{c_int, c_void};
use option::Some;
use rt::uv::uvll;
use rt::uv::uvll::UV_ASYNC;
use rt::uv::{Watcher, Loop, NativeHandle, AsyncCallback, NullCallback};
use rt::uv::WatcherInterop;
use rt::uv::status_to_maybe_uv_error;
pub struct AsyncWatcher(*uvll::uv_async_t);
impl Watcher for AsyncWatcher { }
impl AsyncWatcher {
pub fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher {
unsafe {
let handle = uvll::malloc_handle(UV_ASYNC);
assert!(handle.is_not_null());
let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle);
watcher.install_watcher_data();
let data = watcher.get_watcher_data();
data.async_cb = Some(cb);
assert_eq!(0, uvll::async_init(loop_.native_handle(), handle, async_cb));
return watcher;
}
extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) {
let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle);
let status = status_to_maybe_uv_error(watcher.native_handle(), status);
let data = watcher.get_watcher_data();
let cb = data.async_cb.get_ref();
(*cb)(watcher, status);
}
}
pub fn send(&mut self) {
unsafe {
let handle = self.native_handle();
uvll::async_send(handle);
}
}
pub fn close(self, cb: NullCallback) {
let mut this = self;
let data = this.get_watcher_data();
assert!(data.close_cb.is_none());
data.close_cb = Some(cb);
unsafe {
uvll::close(self.native_handle(), close_cb);
}
extern fn close_cb(handle: *uvll::uv_stream_t) {
let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle);
{
let data = watcher.get_watcher_data();
data.close_cb.swap_unwrap()();
}
watcher.drop_watcher_data();
unsafe { uvll::free_handle(handle as *c_void); }
}
}
}
impl NativeHandle<*uvll::uv_async_t> for AsyncWatcher {
fn from_native_handle(handle: *uvll::uv_async_t) -> AsyncWatcher {
AsyncWatcher(handle)
}
fn native_handle(&self) -> *uvll::uv_async_t {
match self { &AsyncWatcher(ptr) => ptr }
}
}
#[cfg(test)]
mod test {
use super::*;
use rt::uv::Loop;
use unstable::run_in_bare_thread;
use rt::thread::Thread;
use cell::Cell;
#[test]
fn smoke_test() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let watcher = AsyncWatcher::new(&mut loop_, |w, _| w.close(||()) );
let watcher_cell = Cell::new(watcher);
let _thread = do Thread::start {
let mut watcher = watcher_cell.take();
watcher.send();
};
loop_.run();
loop_.close();
}
}
}

View File

@@ -90,3 +90,65 @@ impl NativeHandle<*uvll::uv_idle_t> for IdleWatcher {
match self { &IdleWatcher(ptr) => ptr }
}
}
#[cfg(test)]
mod test {
use rt::uv::Loop;
use super::*;
use unstable::run_in_bare_thread;
#[test]
#[ignore(reason = "valgrind - loop destroyed before watcher?")]
fn idle_new_then_close() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let idle_watcher = { IdleWatcher::new(&mut loop_) };
idle_watcher.close(||());
}
}
#[test]
fn idle_smoke_test() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let mut idle_watcher = { IdleWatcher::new(&mut loop_) };
let mut count = 10;
let count_ptr: *mut int = &mut count;
do idle_watcher.start |idle_watcher, status| {
let mut idle_watcher = idle_watcher;
assert!(status.is_none());
if unsafe { *count_ptr == 10 } {
idle_watcher.stop();
idle_watcher.close(||());
} else {
unsafe { *count_ptr = *count_ptr + 1; }
}
}
loop_.run();
loop_.close();
assert_eq!(count, 10);
}
}
#[test]
fn idle_start_stop_start() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let mut idle_watcher = { IdleWatcher::new(&mut loop_) };
do idle_watcher.start |idle_watcher, status| {
let mut idle_watcher = idle_watcher;
assert!(status.is_none());
idle_watcher.stop();
do idle_watcher.start |idle_watcher, status| {
assert!(status.is_none());
let mut idle_watcher = idle_watcher;
idle_watcher.stop();
idle_watcher.close(||());
}
}
loop_.run();
loop_.close();
}
}
}

View File

@@ -47,15 +47,17 @@ use libc::{c_void, c_int, size_t, malloc, free};
use cast::transmute;
use ptr::null;
use unstable::finally::Finally;
use rt::io::net::ip::IpAddr;
use rt::io::IoError;
#[cfg(test)] use unstable::run_in_bare_thread;
pub use self::file::FsRequest;
pub use self::net::{StreamWatcher, TcpWatcher};
pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher};
pub use self::idle::IdleWatcher;
pub use self::timer::TimerWatcher;
pub use self::async::AsyncWatcher;
/// The implementation of `rtio` for libuv
pub mod uvio;
@@ -67,6 +69,7 @@ pub mod file;
pub mod net;
pub mod idle;
pub mod timer;
pub mod async;
/// XXX: Loop(*handle) is buggy with destructors. Normal structs
/// with dtors may not be destructured, but tuple structs can,
@@ -124,6 +127,9 @@ pub type IdleCallback = ~fn(IdleWatcher, Option<UvError>);
pub type ConnectionCallback = ~fn(StreamWatcher, Option<UvError>);
pub type FsCallback = ~fn(FsRequest, Option<UvError>);
pub type TimerCallback = ~fn(TimerWatcher, Option<UvError>);
pub type AsyncCallback = ~fn(AsyncWatcher, Option<UvError>);
pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, IpAddr, uint, Option<UvError>);
pub type UdpSendCallback = ~fn(UdpWatcher, Option<UvError>);
/// Callbacks used by StreamWatchers, set as custom data on the foreign handle
@@ -134,7 +140,10 @@ struct WatcherData {
close_cb: Option<NullCallback>,
alloc_cb: Option<AllocCallback>,
idle_cb: Option<IdleCallback>,
timer_cb: Option<TimerCallback>
timer_cb: Option<TimerCallback>,
async_cb: Option<AsyncCallback>,
udp_recv_cb: Option<UdpReceiveCallback>,
udp_send_cb: Option<UdpSendCallback>
}
pub trait WatcherInterop {
@@ -163,7 +172,10 @@ impl<H, W: Watcher + NativeHandle<*H>> WatcherInterop for W {
close_cb: None,
alloc_cb: None,
idle_cb: None,
timer_cb: None
timer_cb: None,
async_cb: None,
udp_recv_cb: None,
udp_send_cb: None
};
let data = transmute::<~WatcherData, *c_void>(data);
uvll::set_data_for_uv_handle(self.native_handle(), data);
@@ -348,57 +360,3 @@ fn loop_smoke_test() {
loop_.close();
}
}
#[test]
#[ignore(reason = "valgrind - loop destroyed before watcher?")]
fn idle_new_then_close() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let idle_watcher = { IdleWatcher::new(&mut loop_) };
idle_watcher.close(||());
}
}
#[test]
fn idle_smoke_test() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let mut idle_watcher = { IdleWatcher::new(&mut loop_) };
let mut count = 10;
let count_ptr: *mut int = &mut count;
do idle_watcher.start |idle_watcher, status| {
let mut idle_watcher = idle_watcher;
assert!(status.is_none());
if unsafe { *count_ptr == 10 } {
idle_watcher.stop();
idle_watcher.close(||());
} else {
unsafe { *count_ptr = *count_ptr + 1; }
}
}
loop_.run();
loop_.close();
assert_eq!(count, 10);
}
}
#[test]
fn idle_start_stop_start() {
do run_in_bare_thread {
let mut loop_ = Loop::new();
let mut idle_watcher = { IdleWatcher::new(&mut loop_) };
do idle_watcher.start |idle_watcher, status| {
let mut idle_watcher = idle_watcher;
assert!(status.is_none());
idle_watcher.stop();
do idle_watcher.start |idle_watcher, status| {
assert!(status.is_none());
let mut idle_watcher = idle_watcher;
idle_watcher.stop();
idle_watcher.close(||());
}
}
loop_.run();
loop_.close();
}
}

View File

@@ -9,33 +9,149 @@
// except according to those terms.
use prelude::*;
use libc::{size_t, ssize_t, c_int, c_void};
use libc::{size_t, ssize_t, c_int, c_void, c_uint};
use rt::uv::uvll;
use rt::uv::uvll::*;
use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback};
use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, UdpSendCallback};
use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback,
status_to_maybe_uv_error};
use rt::io::net::ip::{IpAddr, Ipv4, Ipv6};
use rt::uv::last_uv_error;
use vec;
use str;
use from_str::{FromStr};
use num;
fn ip4_as_uv_ip4<T>(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T {
match addr {
Ipv4(a, b, c, d, p) => {
enum UvIpAddr {
UvIpv4(*sockaddr_in),
UvIpv6(*sockaddr_in6),
}
fn sockaddr_to_UvIpAddr(addr: *uvll::sockaddr) -> UvIpAddr {
unsafe {
let addr = malloc_ip4_addr(fmt!("%u.%u.%u.%u",
a as uint,
b as uint,
c as uint,
d as uint), p as int);
assert!((is_ip4_addr(addr) || is_ip6_addr(addr)));
assert!(!(is_ip4_addr(addr) && is_ip6_addr(addr)));
match addr {
_ if is_ip4_addr(addr) => UvIpv4(as_sockaddr_in(addr)),
_ if is_ip6_addr(addr) => UvIpv6(as_sockaddr_in6(addr)),
_ => fail!(),
}
}
}
fn ip_as_uv_ip<T>(addr: IpAddr, f: &fn(UvIpAddr) -> T) -> T {
let malloc = match addr {
Ipv4(*) => malloc_ip4_addr,
Ipv6(*) => malloc_ip6_addr,
};
let wrap = match addr {
Ipv4(*) => UvIpv4,
Ipv6(*) => UvIpv6,
};
let ip_str = match addr {
Ipv4(x1, x2, x3, x4, _) =>
fmt!("%u.%u.%u.%u", x1 as uint, x2 as uint, x3 as uint, x4 as uint),
Ipv6(x1, x2, x3, x4, x5, x6, x7, x8, _) =>
fmt!("%x:%x:%x:%x:%x:%x:%x:%x",
x1 as uint, x2 as uint, x3 as uint, x4 as uint,
x5 as uint, x6 as uint, x7 as uint, x8 as uint),
};
let port = match addr {
Ipv4(_, _, _, _, p) | Ipv6(_, _, _, _, _, _, _, _, p) => p as int
};
let free = match addr {
Ipv4(*) => free_ip4_addr,
Ipv6(*) => free_ip6_addr,
};
let addr = unsafe { malloc(ip_str, port) };
do (|| {
f(addr)
f(wrap(addr))
}).finally {
free_ip4_addr(addr);
unsafe { free(addr) };
}
}
fn uv_ip_as_ip<T>(addr: UvIpAddr, f: &fn(IpAddr) -> T) -> T {
let ip_size = match addr {
UvIpv4(*) => 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/,
UvIpv6(*) => 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/,
};
let ip_name = {
let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8);
unsafe {
match addr {
UvIpv4(addr) => uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip_size as size_t),
UvIpv6(addr) => uvll::ip6_name(addr, vec::raw::to_ptr(buf), ip_size as size_t),
}
};
buf
};
let ip_port = unsafe {
let port = match addr {
UvIpv4(addr) => uvll::ip4_port(addr),
UvIpv6(addr) => uvll::ip6_port(addr),
};
port as u16
};
let ip_str = str::from_bytes_slice(ip_name).trim_right_chars(&'\x00');
let ip = match addr {
UvIpv4(*) => {
let ip: ~[u8] =
ip_str.split_iter('.')
.transform(|s: &str| -> u8 { FromStr::from_str(s).unwrap() })
.collect();
assert_eq!(ip.len(), 4);
Ipv4(ip[0], ip[1], ip[2], ip[3], ip_port)
},
UvIpv6(*) => {
let ip: ~[u16] = {
let expand_shorthand_and_convert = |s: &str| -> ~[~[u16]] {
let convert_each_segment = |s: &str| -> ~[u16] {
let read_hex_segment = |s: &str| -> u16 {
num::FromStrRadix::from_str_radix(s, 16u).unwrap()
};
match s {
"" => ~[],
s => s.split_iter(':').transform(read_hex_segment).collect(),
}
Ipv6 => fail!()
};
s.split_str_iter("::").transform(convert_each_segment).collect()
};
match expand_shorthand_and_convert(ip_str) {
[x] => x, // no shorthand found
[l, r] => l + vec::from_elem(8 - l.len() - r.len(), 0u16) + r, // fill the gap
_ => fail!(), // impossible. only one shorthand allowed.
}
};
assert_eq!(ip.len(), 8);
Ipv6(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7], ip_port)
},
};
// finally run the closure
f(ip)
}
fn uv_ip_to_ip(addr: UvIpAddr) -> IpAddr {
use util;
uv_ip_as_ip(addr, util::id)
}
#[cfg(test)]
#[test]
fn test_ip4_conversion() {
use rt;
let ip4 = rt::test::next_test_ip4();
assert_eq!(ip4, ip_as_uv_ip(ip4, uv_ip_to_ip));
}
#[cfg(test)]
#[test]
fn test_ip6_conversion() {
use rt;
let ip6 = rt::test::next_test_ip6();
assert_eq!(ip6, ip_as_uv_ip(ip6, uv_ip_to_ip));
}
// uv_stream t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t
@@ -51,13 +167,11 @@ impl StreamWatcher {
data.read_cb = Some(cb);
}
let handle = self.native_handle();
unsafe { uvll::read_start(handle, alloc_cb, read_cb); }
unsafe { uvll::read_start(self.native_handle(), alloc_cb, read_cb); }
extern fn alloc_cb(stream: *uvll::uv_stream_t, suggested_size: size_t) -> Buf {
let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream);
let data = stream_watcher.get_watcher_data();
let alloc_cb = data.alloc_cb.get_ref();
let alloc_cb = stream_watcher.get_watcher_data().alloc_cb.get_ref();
return (*alloc_cb)(suggested_size as uint);
}
@@ -65,8 +179,7 @@ impl StreamWatcher {
rtdebug!("buf addr: %x", buf.base as uint);
rtdebug!("buf len: %d", buf.len as int);
let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream);
let data = stream_watcher.get_watcher_data();
let cb = data.read_cb.get_ref();
let cb = stream_watcher.get_watcher_data().read_cb.get_ref();
let status = status_to_maybe_uv_error(stream, nread as c_int);
(*cb)(stream_watcher, nread as int, buf, status);
}
@@ -88,22 +201,15 @@ impl StreamWatcher {
}
let req = WriteRequest::new();
let bufs = [buf];
unsafe {
assert!(0 == uvll::write(req.native_handle(),
self.native_handle(),
bufs, write_cb));
assert_eq!(0, uvll::write(req.native_handle(), self.native_handle(), [buf], write_cb));
}
extern fn write_cb(req: *uvll::uv_write_t, status: c_int) {
let write_request: WriteRequest = NativeHandle::from_native_handle(req);
let mut stream_watcher = write_request.stream();
write_request.delete();
let cb = {
let data = stream_watcher.get_watcher_data();
let cb = data.write_cb.swap_unwrap();
cb
};
let cb = stream_watcher.get_watcher_data().write_cb.swap_unwrap();
let status = status_to_maybe_uv_error(stream_watcher.native_handle(), status);
cb(stream_watcher, status);
}
@@ -112,9 +218,7 @@ impl StreamWatcher {
pub fn accept(&mut self, stream: StreamWatcher) {
let self_handle = self.native_handle() as *c_void;
let stream_handle = stream.native_handle() as *c_void;
unsafe {
assert_eq!(0, uvll::accept(self_handle, stream_handle));
}
assert_eq!(0, unsafe { uvll::accept(self_handle, stream_handle) } );
}
pub fn close(self, cb: NullCallback) {
@@ -129,10 +233,7 @@ impl StreamWatcher {
extern fn close_cb(handle: *uvll::uv_stream_t) {
let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle);
{
let data = stream_watcher.get_watcher_data();
data.close_cb.swap_unwrap()();
}
stream_watcher.get_watcher_data().close_cb.swap_unwrap()();
stream_watcher.drop_watcher_data();
unsafe { free_handle(handle as *c_void) }
}
@@ -140,8 +241,7 @@ impl StreamWatcher {
}
impl NativeHandle<*uvll::uv_stream_t> for StreamWatcher {
fn from_native_handle(
handle: *uvll::uv_stream_t) -> StreamWatcher {
fn from_native_handle(handle: *uvll::uv_stream_t) -> StreamWatcher {
StreamWatcher(handle)
}
fn native_handle(&self) -> *uvll::uv_stream_t {
@@ -153,7 +253,7 @@ pub struct TcpWatcher(*uvll::uv_tcp_t);
impl Watcher for TcpWatcher { }
impl TcpWatcher {
pub fn new(loop_: &mut Loop) -> TcpWatcher {
pub fn new(loop_: &Loop) -> TcpWatcher {
unsafe {
let handle = malloc_handle(UV_TCP);
assert!(handle.is_not_null());
@@ -165,22 +265,19 @@ impl TcpWatcher {
}
pub fn bind(&mut self, address: IpAddr) -> Result<(), UvError> {
match address {
Ipv4(*) => {
do ip4_as_uv_ip4(address) |addr| {
do ip_as_uv_ip(address) |addr| {
let result = unsafe {
uvll::tcp_bind(self.native_handle(), addr)
match addr {
UvIpv4(addr) => uvll::tcp_bind(self.native_handle(), addr),
UvIpv6(addr) => uvll::tcp_bind6(self.native_handle(), addr),
}
};
if result == 0 {
Ok(())
} else {
Err(last_uv_error(self))
match result {
0 => Ok(()),
_ => Err(last_uv_error(self)),
}
}
}
_ => fail!()
}
}
pub fn connect(&mut self, address: IpAddr, cb: ConnectionCallback) {
unsafe {
@@ -188,16 +285,15 @@ impl TcpWatcher {
self.get_watcher_data().connect_cb = Some(cb);
let connect_handle = ConnectRequest::new().native_handle();
match address {
Ipv4(*) => {
do ip4_as_uv_ip4(address) |addr| {
rtdebug!("connect_t: %x", connect_handle as uint);
assert!(0 == uvll::tcp_connect(connect_handle,
self.native_handle(),
addr, connect_cb));
}
}
_ => fail!()
do ip_as_uv_ip(address) |addr| {
let result = match addr {
UvIpv4(addr) => uvll::tcp_connect(connect_handle,
self.native_handle(), addr, connect_cb),
UvIpv6(addr) => uvll::tcp_connect6(connect_handle,
self.native_handle(), addr, connect_cb),
};
assert_eq!(0, result);
}
extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) {
@@ -205,10 +301,7 @@ impl TcpWatcher {
let connect_request: ConnectRequest = NativeHandle::from_native_handle(req);
let mut stream_watcher = connect_request.stream();
connect_request.delete();
let cb: ConnectionCallback = {
let data = stream_watcher.get_watcher_data();
data.connect_cb.swap_unwrap()
};
let cb = stream_watcher.get_watcher_data().connect_cb.swap_unwrap();
let status = status_to_maybe_uv_error(stream_watcher.native_handle(), status);
cb(stream_watcher, status);
}
@@ -225,15 +318,13 @@ impl TcpWatcher {
unsafe {
static BACKLOG: c_int = 128; // XXX should be configurable
// XXX: This can probably fail
assert!(0 == uvll::listen(self.native_handle(),
BACKLOG, connection_cb));
assert_eq!(0, uvll::listen(self.native_handle(), BACKLOG, connection_cb));
}
extern fn connection_cb(handle: *uvll::uv_stream_t, status: c_int) {
rtdebug!("connection_cb");
let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle);
let data = stream_watcher.get_watcher_data();
let cb = data.connect_cb.get_ref();
let cb = stream_watcher.get_watcher_data().connect_cb.get_ref();
let status = status_to_maybe_uv_error(handle, status);
(*cb)(stream_watcher, status);
}
@@ -253,6 +344,134 @@ impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher {
}
}
pub struct UdpWatcher(*uvll::uv_udp_t);
impl Watcher for UdpWatcher { }
impl UdpWatcher {
pub fn new(loop_: &Loop) -> UdpWatcher {
unsafe {
let handle = malloc_handle(UV_UDP);
assert!(handle.is_not_null());
assert_eq!(0, uvll::udp_init(loop_.native_handle(), handle));
let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle);
watcher.install_watcher_data();
return watcher;
}
}
pub fn bind(&self, address: IpAddr) -> Result<(), UvError> {
do ip_as_uv_ip(address) |addr| {
let result = unsafe {
match addr {
UvIpv4(addr) => uvll::udp_bind(self.native_handle(), addr, 0u32),
UvIpv6(addr) => uvll::udp_bind6(self.native_handle(), addr, 0u32),
}
};
match result {
0 => Ok(()),
_ => Err(last_uv_error(self)),
}
}
}
pub fn recv_start(&self, alloc: AllocCallback, cb: UdpReceiveCallback) {
{
let mut this = *self;
let data = this.get_watcher_data();
data.alloc_cb = Some(alloc);
data.udp_recv_cb = Some(cb);
}
unsafe { uvll::udp_recv_start(self.native_handle(), alloc_cb, recv_cb); }
extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf {
let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle);
let alloc_cb = udp_watcher.get_watcher_data().alloc_cb.get_ref();
return (*alloc_cb)(suggested_size as uint);
}
extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf,
addr: *uvll::sockaddr, flags: c_uint) {
// When there's no data to read the recv callback can be a no-op.
// This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring
// this we just drop back to kqueue and wait for the next callback.
if nread == 0 {
return;
}
rtdebug!("buf addr: %x", buf.base as uint);
rtdebug!("buf len: %d", buf.len as int);
let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle);
let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref();
let status = status_to_maybe_uv_error(handle, nread as c_int);
let addr = uv_ip_to_ip(sockaddr_to_UvIpAddr(addr));
(*cb)(udp_watcher, nread as int, buf, addr, flags as uint, status);
}
}
pub fn recv_stop(&self) {
unsafe { uvll::udp_recv_stop(self.native_handle()); }
}
pub fn send(&self, buf: Buf, address: IpAddr, cb: UdpSendCallback) {
{
let mut this = *self;
let data = this.get_watcher_data();
assert!(data.udp_send_cb.is_none());
data.udp_send_cb = Some(cb);
}
let req = UdpSendRequest::new();
do ip_as_uv_ip(address) |addr| {
let result = unsafe {
match addr {
UvIpv4(addr) => uvll::udp_send(req.native_handle(),
self.native_handle(), [buf], addr, send_cb),
UvIpv6(addr) => uvll::udp_send6(req.native_handle(),
self.native_handle(), [buf], addr, send_cb),
}
};
assert_eq!(0, result);
}
extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) {
let send_request: UdpSendRequest = NativeHandle::from_native_handle(req);
let mut udp_watcher = send_request.handle();
send_request.delete();
let cb = udp_watcher.get_watcher_data().udp_send_cb.swap_unwrap();
let status = status_to_maybe_uv_error(udp_watcher.native_handle(), status);
cb(udp_watcher, status);
}
}
pub fn close(self, cb: NullCallback) {
{
let mut this = self;
let data = this.get_watcher_data();
assert!(data.close_cb.is_none());
data.close_cb = Some(cb);
}
unsafe { uvll::close(self.native_handle(), close_cb); }
extern fn close_cb(handle: *uvll::uv_udp_t) {
let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle);
udp_watcher.get_watcher_data().close_cb.swap_unwrap()();
udp_watcher.drop_watcher_data();
unsafe { free_handle(handle as *c_void) }
}
}
}
impl NativeHandle<*uvll::uv_udp_t> for UdpWatcher {
fn from_native_handle(handle: *uvll::uv_udp_t) -> UdpWatcher {
UdpWatcher(handle)
}
fn native_handle(&self) -> *uvll::uv_udp_t {
match self { &UdpWatcher(ptr) => ptr }
}
}
// uv_connect_t is a subclass of uv_req_t
struct ConnectRequest(*uvll::uv_connect_t);
impl Request for ConnectRequest { }
@@ -260,12 +479,9 @@ impl Request for ConnectRequest { }
impl ConnectRequest {
fn new() -> ConnectRequest {
let connect_handle = unsafe {
malloc_req(UV_CONNECT)
};
let connect_handle = unsafe { malloc_req(UV_CONNECT) };
assert!(connect_handle.is_not_null());
let connect_handle = connect_handle as *uvll::uv_connect_t;
ConnectRequest(connect_handle)
ConnectRequest(connect_handle as *uvll::uv_connect_t)
}
fn stream(&self) -> StreamWatcher {
@@ -281,8 +497,7 @@ impl ConnectRequest {
}
impl NativeHandle<*uvll::uv_connect_t> for ConnectRequest {
fn from_native_handle(
handle: *uvll:: uv_connect_t) -> ConnectRequest {
fn from_native_handle(handle: *uvll:: uv_connect_t) -> ConnectRequest {
ConnectRequest(handle)
}
fn native_handle(&self) -> *uvll::uv_connect_t {
@@ -296,12 +511,9 @@ impl Request for WriteRequest { }
impl WriteRequest {
pub fn new() -> WriteRequest {
let write_handle = unsafe {
malloc_req(UV_WRITE)
};
let write_handle = unsafe { malloc_req(UV_WRITE) };
assert!(write_handle.is_not_null());
let write_handle = write_handle as *uvll::uv_write_t;
WriteRequest(write_handle)
WriteRequest(write_handle as *uvll::uv_write_t)
}
pub fn stream(&self) -> StreamWatcher {
@@ -325,6 +537,36 @@ impl NativeHandle<*uvll::uv_write_t> for WriteRequest {
}
}
pub struct UdpSendRequest(*uvll::uv_udp_send_t);
impl Request for UdpSendRequest { }
impl UdpSendRequest {
pub fn new() -> UdpSendRequest {
let send_handle = unsafe { malloc_req(UV_UDP_SEND) };
assert!(send_handle.is_not_null());
UdpSendRequest(send_handle as *uvll::uv_udp_send_t)
}
pub fn handle(&self) -> UdpWatcher {
let send_request_handle = unsafe {
uvll::get_udp_handle_from_send_req(self.native_handle())
};
NativeHandle::from_native_handle(send_request_handle)
}
pub fn delete(self) {
unsafe { free_req(self.native_handle() as *c_void) }
}
}
impl NativeHandle<*uvll::uv_udp_send_t> for UdpSendRequest {
fn from_native_handle(handle: *uvll::uv_udp_send_t) -> UdpSendRequest {
UdpSendRequest(handle)
}
fn native_handle(&self) -> *uvll::uv_udp_send_t {
match self { &UdpSendRequest(ptr) => ptr }
}
}
#[cfg(test)]
mod test {
@@ -339,7 +581,7 @@ mod test {
use rt::uv::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf};
#[test]
fn connect_close() {
fn connect_close_ip4() {
do run_in_bare_thread() {
let mut loop_ = Loop::new();
let mut tcp_watcher = { TcpWatcher::new(&mut loop_) };
@@ -357,7 +599,51 @@ mod test {
}
#[test]
fn listen() {
fn connect_close_ip6() {
do run_in_bare_thread() {
let mut loop_ = Loop::new();
let mut tcp_watcher = { TcpWatcher::new(&mut loop_) };
// Connect to a port where nobody is listening
let addr = next_test_ip6();
do tcp_watcher.connect(addr) |stream_watcher, status| {
rtdebug!("tcp_watcher.connect!");
assert!(status.is_some());
assert_eq!(status.get().name(), ~"ECONNREFUSED");
stream_watcher.close(||());
}
loop_.run();
loop_.close();
}
}
#[test]
fn udp_bind_close_ip4() {
do run_in_bare_thread() {
let mut loop_ = Loop::new();
let udp_watcher = { UdpWatcher::new(&mut loop_) };
let addr = next_test_ip4();
udp_watcher.bind(addr);
udp_watcher.close(||());
loop_.run();
loop_.close();
}
}
#[test]
fn udp_bind_close_ip6() {
do run_in_bare_thread() {
let mut loop_ = Loop::new();
let udp_watcher = { UdpWatcher::new(&mut loop_) };
let addr = next_test_ip6();
udp_watcher.bind(addr);
udp_watcher.close(||());
loop_.run();
loop_.close();
}
}
#[test]
fn listen_ip4() {
do run_in_bare_thread() {
static MAX: int = 10;
let mut loop_ = Loop::new();
@@ -366,10 +652,82 @@ mod test {
server_tcp_watcher.bind(addr);
let loop_ = loop_;
rtdebug!("listening");
do server_tcp_watcher.listen |server_stream_watcher, status| {
do server_tcp_watcher.listen |mut server_stream_watcher, status| {
rtdebug!("listened!");
assert!(status.is_none());
let mut loop_ = loop_;
let client_tcp_watcher = TcpWatcher::new(&mut loop_);
let mut client_tcp_watcher = client_tcp_watcher.as_stream();
server_stream_watcher.accept(client_tcp_watcher);
let count_cell = Cell::new(0);
let server_stream_watcher = server_stream_watcher;
rtdebug!("starting read");
let alloc: AllocCallback = |size| {
vec_to_uv_buf(vec::from_elem(size, 0))
};
do client_tcp_watcher.read_start(alloc) |stream_watcher, nread, buf, status| {
rtdebug!("i'm reading!");
let buf = vec_from_uv_buf(buf);
let mut count = count_cell.take();
if status.is_none() {
rtdebug!("got %d bytes", nread);
let buf = buf.unwrap();
for buf.slice(0, nread as uint).iter().advance() |byte| {
assert!(*byte == count as u8);
rtdebug!("%u", *byte as uint);
count += 1;
}
} else {
assert_eq!(count, MAX);
do stream_watcher.close {
server_stream_watcher.close(||());
}
}
count_cell.put_back(count);
}
}
let _client_thread = do Thread::start {
rtdebug!("starting client thread");
let mut loop_ = Loop::new();
let mut tcp_watcher = { TcpWatcher::new(&mut loop_) };
do tcp_watcher.connect(addr) |mut stream_watcher, status| {
rtdebug!("connecting");
assert!(status.is_none());
let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9];
let buf = slice_to_uv_buf(msg);
let msg_cell = Cell::new(msg);
do stream_watcher.write(buf) |stream_watcher, status| {
rtdebug!("writing");
assert!(status.is_none());
let msg_cell = Cell::new(msg_cell.take());
stream_watcher.close(||ignore(msg_cell.take()));
}
}
loop_.run();
loop_.close();
};
let mut loop_ = loop_;
loop_.run();
loop_.close();
}
}
#[test]
fn listen_ip6() {
do run_in_bare_thread() {
static MAX: int = 10;
let mut loop_ = Loop::new();
let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) };
let addr = next_test_ip6();
server_tcp_watcher.bind(addr);
let loop_ = loop_;
rtdebug!("listening");
do server_tcp_watcher.listen |mut server_stream_watcher, status| {
rtdebug!("listened!");
assert!(status.is_none());
let mut server_stream_watcher = server_stream_watcher;
let mut loop_ = loop_;
let client_tcp_watcher = TcpWatcher::new(&mut loop_);
let mut client_tcp_watcher = client_tcp_watcher.as_stream();
@@ -409,10 +767,9 @@ mod test {
rtdebug!("starting client thread");
let mut loop_ = Loop::new();
let mut tcp_watcher = { TcpWatcher::new(&mut loop_) };
do tcp_watcher.connect(addr) |stream_watcher, status| {
do tcp_watcher.connect(addr) |mut stream_watcher, status| {
rtdebug!("connecting");
assert!(status.is_none());
let mut stream_watcher = stream_watcher;
let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9];
let buf = slice_to_uv_buf(msg);
let msg_cell = Cell::new(msg);
@@ -432,4 +789,122 @@ mod test {
loop_.close();
}
}
#[test]
fn udp_recv_ip4() {
do run_in_bare_thread() {
static MAX: int = 10;
let mut loop_ = Loop::new();
let server_addr = next_test_ip4();
let client_addr = next_test_ip4();
let server = UdpWatcher::new(&loop_);
assert!(server.bind(server_addr).is_ok());
rtdebug!("starting read");
let alloc: AllocCallback = |size| {
vec_to_uv_buf(vec::from_elem(size, 0))
};
do server.recv_start(alloc) |server, nread, buf, src, flags, status| {
server.recv_stop();
rtdebug!("i'm reading!");
assert!(status.is_none());
assert_eq!(flags, 0);
assert_eq!(src, client_addr);
let buf = vec_from_uv_buf(buf);
let mut count = 0;
rtdebug!("got %d bytes", nread);
let buf = buf.unwrap();
for buf.slice(0, nread as uint).iter().advance() |&byte| {
assert!(byte == count as u8);
rtdebug!("%u", byte as uint);
count += 1;
}
assert_eq!(count, MAX);
server.close(||{});
}
do Thread::start {
let mut loop_ = Loop::new();
let client = UdpWatcher::new(&loop_);
assert!(client.bind(client_addr).is_ok());
let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buf = slice_to_uv_buf(msg);
do client.send(buf, server_addr) |client, status| {
rtdebug!("writing");
assert!(status.is_none());
client.close(||{});
}
loop_.run();
loop_.close();
};
loop_.run();
loop_.close();
}
}
#[test]
fn udp_recv_ip6() {
do run_in_bare_thread() {
static MAX: int = 10;
let mut loop_ = Loop::new();
let server_addr = next_test_ip6();
let client_addr = next_test_ip6();
let server = UdpWatcher::new(&loop_);
assert!(server.bind(server_addr).is_ok());
rtdebug!("starting read");
let alloc: AllocCallback = |size| {
vec_to_uv_buf(vec::from_elem(size, 0))
};
do server.recv_start(alloc) |server, nread, buf, src, flags, status| {
server.recv_stop();
rtdebug!("i'm reading!");
assert!(status.is_none());
assert_eq!(flags, 0);
assert_eq!(src, client_addr);
let buf = vec_from_uv_buf(buf);
let mut count = 0;
rtdebug!("got %d bytes", nread);
let buf = buf.unwrap();
for buf.slice(0, nread as uint).iter().advance() |&byte| {
assert!(byte == count as u8);
rtdebug!("%u", byte as uint);
count += 1;
}
assert_eq!(count, MAX);
server.close(||{});
}
do Thread::start {
let mut loop_ = Loop::new();
let client = UdpWatcher::new(&loop_);
assert!(client.bind(client_addr).is_ok());
let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let buf = slice_to_uv_buf(msg);
do client.send(buf, server_addr) |client, status| {
rtdebug!("writing");
assert!(status.is_none());
client.close(||{});
}
loop_.run();
loop_.close();
};
loop_.run();
loop_.close();
}
}
}

View File

@@ -12,6 +12,7 @@ use option::*;
use result::*;
use ops::Drop;
use cell::Cell;
use cast;
use cast::transmute;
use clone::Clone;
use rt::io::IoError;
@@ -23,11 +24,15 @@ use rt::sched::Scheduler;
use rt::io::{standard_error, OtherIoError};
use rt::tube::Tube;
use rt::local::Local;
use unstable::sync::{Exclusive, exclusive};
#[cfg(test)] use container::Container;
#[cfg(test)] use uint;
#[cfg(test)] use unstable::run_in_bare_thread;
#[cfg(test)] use rt::test::*;
#[cfg(test)] use rt::test::{spawntask_immediately,
next_test_ip4,
run_in_newsched_task};
pub struct UvEventLoop {
uvio: UvIoFactory
@@ -39,11 +44,6 @@ impl UvEventLoop {
uvio: UvIoFactory(Loop::new())
}
}
/// A convenience constructor
pub fn new_scheduler() -> Scheduler {
Scheduler::new(~UvEventLoop::new())
}
}
impl Drop for UvEventLoop {
@@ -63,9 +63,8 @@ impl EventLoop for UvEventLoop {
fn callback(&mut self, f: ~fn()) {
let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop());
do idle_watcher.start |idle_watcher, status| {
do idle_watcher.start |mut idle_watcher, status| {
assert!(status.is_none());
let mut idle_watcher = idle_watcher;
idle_watcher.stop();
idle_watcher.close(||());
f();
@@ -81,6 +80,10 @@ impl EventLoop for UvEventLoop {
}
}
fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallbackObject {
~UvRemoteCallback::new(self.uvio.uv_loop(), f)
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject> {
Some(&mut self.uvio)
}
@@ -100,6 +103,89 @@ fn test_callback_run_once() {
}
}
pub struct UvRemoteCallback {
// The uv async handle for triggering the callback
async: AsyncWatcher,
// A flag to tell the callback to exit, set from the dtor. This is
// almost never contested - only in rare races with the dtor.
exit_flag: Exclusive<bool>
}
impl UvRemoteCallback {
pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback {
let exit_flag = exclusive(false);
let exit_flag_clone = exit_flag.clone();
let async = do AsyncWatcher::new(loop_) |watcher, status| {
assert!(status.is_none());
f();
unsafe {
do exit_flag_clone.with_imm |&should_exit| {
if should_exit {
watcher.close(||());
}
}
}
};
UvRemoteCallback {
async: async,
exit_flag: exit_flag
}
}
}
impl RemoteCallback for UvRemoteCallback {
fn fire(&mut self) { self.async.send() }
}
impl Drop for UvRemoteCallback {
fn drop(&self) {
unsafe {
let this: &mut UvRemoteCallback = cast::transmute_mut(self);
do this.exit_flag.with |should_exit| {
// NB: These two things need to happen atomically. Otherwise
// the event handler could wake up due to a *previous*
// signal and see the exit flag, destroying the handle
// before the final send.
*should_exit = true;
this.async.send();
}
}
}
}
#[cfg(test)]
mod test_remote {
use cell::Cell;
use rt::test::*;
use rt::thread::Thread;
use rt::tube::Tube;
use rt::rtio::EventLoop;
use rt::local::Local;
use rt::sched::Scheduler;
#[test]
fn test_uv_remote() {
do run_in_newsched_task {
let mut tube = Tube::new();
let tube_clone = tube.clone();
let remote_cell = Cell::new_empty();
do Local::borrow::<Scheduler, ()>() |sched| {
let tube_clone = tube_clone.clone();
let tube_clone_cell = Cell::new(tube_clone);
let remote = do sched.event_loop.remote_callback {
tube_clone_cell.take().send(1);
};
remote_cell.put_back(remote);
}
let _thread = do Thread::start {
remote_cell.take().fire();
};
assert!(tube.recv() == 1);
}
}
}
pub struct UvIoFactory(Loop);
impl UvIoFactory {
@@ -122,12 +208,10 @@ impl IoFactory for UvIoFactory {
assert!(scheduler.in_task_context());
// Block this task and take ownership, switch to scheduler context
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |sched, task| {
rtdebug!("connect: entered scheduler context");
do Local::borrow::<Scheduler> |scheduler| {
assert!(!scheduler.in_task_context());
}
assert!(!sched.in_task_context());
let mut tcp_watcher = TcpWatcher::new(self.uv_loop());
let task_cell = Cell::new(task);
@@ -136,7 +220,7 @@ impl IoFactory for UvIoFactory {
rtdebug!("connect: in connect callback");
if status.is_none() {
rtdebug!("status is none");
let res = Ok(~UvTcpStream { watcher: stream_watcher });
let res = Ok(~UvTcpStream(stream_watcher));
// Store the stream in the task's stack
unsafe { (*result_cell_ptr).put_back(res); }
@@ -167,7 +251,7 @@ impl IoFactory for UvIoFactory {
Ok(_) => Ok(~UvTcpListener::new(watcher)),
Err(uverr) => {
let scheduler = Local::take::<Scheduler>();
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
do watcher.as_stream().close {
let scheduler = Local::take::<Scheduler>();
@@ -178,6 +262,24 @@ impl IoFactory for UvIoFactory {
}
}
}
fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError> {
let /*mut*/ watcher = UdpWatcher::new(self.uv_loop());
match watcher.bind(addr) {
Ok(_) => Ok(~UvUdpSocket(watcher)),
Err(uverr) => {
let scheduler = Local::take::<Scheduler>();
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
do watcher.close {
let scheduler = Local::take::<Scheduler>();
scheduler.resume_task_immediately(task_cell.take());
}
}
Err(uv_error_to_io_error(uverr))
}
}
}
}
// FIXME #6090: Prefer newtype structs but Drop doesn't work
@@ -203,7 +305,7 @@ impl Drop for UvTcpListener {
fn drop(&self) {
let watcher = self.watcher();
let scheduler = Local::take::<Scheduler>();
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
do watcher.as_stream().close {
let scheduler = Local::take::<Scheduler>();
@@ -213,6 +315,11 @@ impl Drop for UvTcpListener {
}
}
impl RtioSocket for UvTcpListener {
// XXX implement
fn socket_name(&self) -> IpAddr { fail!(); }
}
impl RtioTcpListener for UvTcpListener {
fn accept(&mut self) -> Result<~RtioTcpStreamObject, IoError> {
@@ -229,15 +336,14 @@ impl RtioTcpListener for UvTcpListener {
let incoming_streams_cell = Cell::new(incoming_streams_cell.take());
let mut server_tcp_watcher = server_tcp_watcher;
do server_tcp_watcher.listen |server_stream_watcher, status| {
do server_tcp_watcher.listen |mut server_stream_watcher, status| {
let maybe_stream = if status.is_none() {
let mut server_stream_watcher = server_stream_watcher;
let mut loop_ = server_stream_watcher.event_loop();
let client_tcp_watcher = TcpWatcher::new(&mut loop_);
let client_tcp_watcher = client_tcp_watcher.as_stream();
// XXX: Need's to be surfaced in interface
server_stream_watcher.accept(client_tcp_watcher);
Ok(~UvTcpStream { watcher: client_tcp_watcher })
Ok(~UvTcpStream(client_tcp_watcher))
} else {
Err(standard_error(OtherIoError))
};
@@ -249,25 +355,22 @@ impl RtioTcpListener for UvTcpListener {
return self.incoming_streams.recv();
}
// XXX implement
fn accept_simultaneously(&self) { fail!(); }
fn dont_accept_simultaneously(&self) { fail!(); }
}
// FIXME #6090: Prefer newtype structs but Drop doesn't work
pub struct UvTcpStream {
watcher: StreamWatcher
}
impl UvTcpStream {
fn watcher(&self) -> StreamWatcher { self.watcher }
}
pub struct UvTcpStream(StreamWatcher);
impl Drop for UvTcpStream {
fn drop(&self) {
rtdebug!("closing tcp stream");
let watcher = self.watcher();
let scheduler = Local::take::<Scheduler>();
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
do watcher.close {
do self.close {
let scheduler = Local::take::<Scheduler>();
scheduler.resume_task_immediately(task_cell.take());
}
@@ -275,34 +378,35 @@ impl Drop for UvTcpStream {
}
}
impl RtioSocket for UvTcpStream {
// XXX implement
fn socket_name(&self) -> IpAddr { fail!(); }
}
impl RtioTcpStream for UvTcpStream {
fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> {
fn read(&self, buf: &mut [u8]) -> Result<uint, IoError> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<uint, IoError>> = &result_cell;
let scheduler = Local::take::<Scheduler>();
assert!(scheduler.in_task_context());
let watcher = self.watcher();
let buf_ptr: *&mut [u8] = &buf;
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |sched, task| {
rtdebug!("read: entered scheduler context");
do Local::borrow::<Scheduler> |scheduler| {
assert!(!scheduler.in_task_context());
}
let mut watcher = watcher;
assert!(!sched.in_task_context());
let task_cell = Cell::new(task);
// XXX: We shouldn't reallocate these callbacks every
// call to read
let alloc: AllocCallback = |_| unsafe {
slice_to_uv_buf(*buf_ptr)
};
do watcher.read_start(alloc) |watcher, nread, _buf, status| {
let mut watcher = **self;
do watcher.read_start(alloc) |mut watcher, nread, _buf, status| {
// Stop reading so that no read callbacks are
// triggered before the user calls `read` again.
// XXX: Is there a performance impact to calling
// stop here?
let mut watcher = watcher;
watcher.read_stop();
let result = if status.is_none() {
@@ -323,17 +427,16 @@ impl RtioTcpStream for UvTcpStream {
return result_cell.take();
}
fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
fn write(&self, buf: &[u8]) -> Result<(), IoError> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<(), IoError>> = &result_cell;
let scheduler = Local::take::<Scheduler>();
assert!(scheduler.in_task_context());
let watcher = self.watcher();
let buf_ptr: *&[u8] = &buf;
do scheduler.deschedule_running_task_and_then |task| {
let mut watcher = watcher;
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
let buf = unsafe { slice_to_uv_buf(*buf_ptr) };
let mut watcher = **self;
do watcher.write(buf) |_watcher, status| {
let result = if status.is_none() {
Ok(())
@@ -351,6 +454,112 @@ impl RtioTcpStream for UvTcpStream {
assert!(!result_cell.is_empty());
return result_cell.take();
}
// XXX implement
fn peer_name(&self) -> IpAddr { fail!(); }
fn control_congestion(&self) { fail!(); }
fn nodelay(&self) { fail!(); }
fn keepalive(&self, _delay_in_seconds: uint) { fail!(); }
fn letdie(&self) { fail!(); }
}
pub struct UvUdpSocket(UdpWatcher);
impl Drop for UvUdpSocket {
fn drop(&self) {
rtdebug!("closing udp socket");
let scheduler = Local::take::<Scheduler>();
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
do self.close {
let scheduler = Local::take::<Scheduler>();
scheduler.resume_task_immediately(task_cell.take());
}
}
}
}
impl RtioSocket for UvUdpSocket {
// XXX implement
fn socket_name(&self) -> IpAddr { fail!(); }
}
impl RtioUdpSocket for UvUdpSocket {
fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<(uint, IpAddr), IoError>> = &result_cell;
let scheduler = Local::take::<Scheduler>();
assert!(scheduler.in_task_context());
let buf_ptr: *&mut [u8] = &buf;
do scheduler.deschedule_running_task_and_then |sched, task| {
rtdebug!("recvfrom: entered scheduler context");
assert!(!sched.in_task_context());
let task_cell = Cell::new(task);
let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) };
do self.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| {
let _ = flags; // XXX add handling for partials?
watcher.recv_stop();
let result = match status {
None => {
assert!(nread >= 0);
Ok((nread as uint, addr))
}
Some(err) => Err(uv_error_to_io_error(err))
};
unsafe { (*result_cell_ptr).put_back(result); }
let scheduler = Local::take::<Scheduler>();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<(), IoError>> = &result_cell;
let scheduler = Local::take::<Scheduler>();
assert!(scheduler.in_task_context());
let buf_ptr: *&[u8] = &buf;
do scheduler.deschedule_running_task_and_then |_, task| {
let task_cell = Cell::new(task);
let buf = unsafe { slice_to_uv_buf(*buf_ptr) };
do self.send(buf, dst) |_watcher, status| {
let result = match status {
None => Ok(()),
Some(err) => Err(uv_error_to_io_error(err)),
};
unsafe { (*result_cell_ptr).put_back(result); }
let scheduler = Local::take::<Scheduler>();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
// XXX implement
fn join_multicast(&self, _multi: IpAddr) { fail!(); }
fn leave_multicast(&self, _multi: IpAddr) { fail!(); }
fn loop_multicast_locally(&self) { fail!(); }
fn dont_loop_multicast_locally(&self) { fail!(); }
fn multicast_time_to_live(&self, _ttl: int) { fail!(); }
fn time_to_live(&self, _ttl: int) { fail!(); }
fn hear_broadcasts(&self) { fail!(); }
fn ignore_broadcasts(&self) { fail!(); }
}
#[test]
@@ -365,6 +574,18 @@ fn test_simple_io_no_connect() {
}
}
#[test]
fn test_simple_udp_io_bind_only() {
do run_in_newsched_task {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let addr = next_test_ip4();
let maybe_socket = (*io).udp_bind(addr);
assert!(maybe_socket.is_ok());
}
}
}
#[test]
fn test_simple_tcp_server_and_client() {
do run_in_newsched_task {
@@ -375,7 +596,7 @@ fn test_simple_tcp_server_and_client() {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let mut listener = (*io).tcp_bind(addr).unwrap();
let mut stream = listener.accept().unwrap();
let stream = listener.accept().unwrap();
let mut buf = [0, .. 2048];
let nread = stream.read(buf).unwrap();
assert_eq!(nread, 8);
@@ -389,13 +610,44 @@ fn test_simple_tcp_server_and_client() {
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let mut stream = (*io).tcp_connect(addr).unwrap();
let stream = (*io).tcp_connect(addr).unwrap();
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
}
}
}
}
#[test]
fn test_simple_udp_server_and_client() {
do run_in_newsched_task {
let server_addr = next_test_ip4();
let client_addr = next_test_ip4();
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let server_socket = (*io).udp_bind(server_addr).unwrap();
let mut buf = [0, .. 2048];
let (nread,src) = server_socket.recvfrom(buf).unwrap();
assert_eq!(nread, 8);
for uint::range(0, nread) |i| {
rtdebug!("%u", buf[i] as uint);
assert_eq!(buf[i], i as u8);
}
assert_eq!(src, client_addr);
}
}
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let client_socket = (*io).udp_bind(client_addr).unwrap();
client_socket.sendto([0, 1, 2, 3, 4, 5, 6, 7], server_addr);
}
}
}
}
#[test] #[ignore(reason = "busted")]
fn test_read_and_block() {
do run_in_newsched_task {
@@ -404,7 +656,7 @@ fn test_read_and_block() {
do spawntask_immediately {
let io = unsafe { Local::unsafe_borrow::<IoFactoryObject>() };
let mut listener = unsafe { (*io).tcp_bind(addr).unwrap() };
let mut stream = listener.accept().unwrap();
let stream = listener.accept().unwrap();
let mut buf = [0, .. 2048];
let expected = 32;
@@ -424,11 +676,9 @@ fn test_read_and_block() {
// Yield to the other task in hopes that it
// will trigger a read callback while we are
// not ready for it
do scheduler.deschedule_running_task_and_then |task| {
do scheduler.deschedule_running_task_and_then |sched, task| {
let task = Cell::new(task);
do Local::borrow::<Scheduler> |scheduler| {
scheduler.enqueue_task(task.take());
}
sched.enqueue_task(task.take());
}
}
@@ -439,7 +689,7 @@ fn test_read_and_block() {
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let mut stream = (*io).tcp_connect(addr).unwrap();
let stream = (*io).tcp_connect(addr).unwrap();
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
@@ -460,7 +710,7 @@ fn test_read_read_read() {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let mut listener = (*io).tcp_bind(addr).unwrap();
let mut stream = listener.accept().unwrap();
let stream = listener.accept().unwrap();
let buf = [1, .. 2048];
let mut total_bytes_written = 0;
while total_bytes_written < MAX {
@@ -473,7 +723,7 @@ fn test_read_read_read() {
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let mut stream = (*io).tcp_connect(addr).unwrap();
let stream = (*io).tcp_connect(addr).unwrap();
let mut buf = [0, .. 2048];
let mut total_bytes_read = 0;
while total_bytes_read < MAX {
@@ -489,3 +739,96 @@ fn test_read_read_read() {
}
}
}
#[test]
fn test_udp_twice() {
do run_in_newsched_task {
let server_addr = next_test_ip4();
let client_addr = next_test_ip4();
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let client = (*io).udp_bind(client_addr).unwrap();
assert!(client.sendto([1], server_addr).is_ok());
assert!(client.sendto([2], server_addr).is_ok());
}
}
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let server = (*io).udp_bind(server_addr).unwrap();
let mut buf1 = [0];
let mut buf2 = [0];
let (nread1, src1) = server.recvfrom(buf1).unwrap();
let (nread2, src2) = server.recvfrom(buf2).unwrap();
assert_eq!(nread1, 1);
assert_eq!(nread2, 1);
assert_eq!(src1, client_addr);
assert_eq!(src2, client_addr);
assert_eq!(buf1[0], 1);
assert_eq!(buf2[0], 2);
}
}
}
}
#[test]
fn test_udp_many_read() {
do run_in_newsched_task {
let server_out_addr = next_test_ip4();
let server_in_addr = next_test_ip4();
let client_out_addr = next_test_ip4();
let client_in_addr = next_test_ip4();
static MAX: uint = 500_000;
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let server_out = (*io).udp_bind(server_out_addr).unwrap();
let server_in = (*io).udp_bind(server_in_addr).unwrap();
let msg = [1, .. 2048];
let mut total_bytes_sent = 0;
let mut buf = [1];
while buf[0] == 1 {
// send more data
assert!(server_out.sendto(msg, client_in_addr).is_ok());
total_bytes_sent += msg.len();
// check if the client has received enough
let res = server_in.recvfrom(buf);
assert!(res.is_ok());
let (nread, src) = res.unwrap();
assert_eq!(nread, 1);
assert_eq!(src, client_out_addr);
}
assert!(total_bytes_sent >= MAX);
}
}
do spawntask_immediately {
unsafe {
let io = Local::unsafe_borrow::<IoFactoryObject>();
let client_out = (*io).udp_bind(client_out_addr).unwrap();
let client_in = (*io).udp_bind(client_in_addr).unwrap();
let mut total_bytes_recv = 0;
let mut buf = [0, .. 2048];
while total_bytes_recv < MAX {
// ask for more
assert!(client_out.sendto([1], server_in_addr).is_ok());
// wait for data
let res = client_in.recvfrom(buf);
assert!(res.is_ok());
let (nread, src) = res.unwrap();
assert_eq!(src, server_out_addr);
total_bytes_recv += nread;
for uint::range(0, nread) |i| {
assert_eq!(buf[i], 1);
}
}
// tell the server we're done
assert!(client_out.sendto([0], server_in_addr).is_ok());
}
}
}
}

View File

@@ -60,17 +60,24 @@ pub type uv_handle_t = c_void;
pub type uv_loop_t = c_void;
pub type uv_idle_t = c_void;
pub type uv_tcp_t = c_void;
pub type uv_udp_t = c_void;
pub type uv_connect_t = c_void;
pub type uv_write_t = c_void;
pub type uv_async_t = c_void;
pub type uv_timer_t = c_void;
pub type uv_stream_t = c_void;
pub type uv_fs_t = c_void;
pub type uv_udp_send_t = c_void;
pub type uv_idle_cb = *u8;
pub type uv_alloc_cb = *u8;
pub type uv_udp_send_cb = *u8;
pub type uv_udp_recv_cb = *u8;
pub type sockaddr = c_void;
pub type sockaddr_in = c_void;
pub type sockaddr_in6 = c_void;
pub type uv_membership = c_void;
#[deriving(Eq)]
pub enum uv_handle_type {
@@ -187,31 +194,88 @@ pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int {
rust_uv_idle_stop(handle)
}
pub unsafe fn udp_init(loop_handle: *uv_loop_t, handle: *uv_udp_t) -> c_int {
return rust_uv_udp_init(loop_handle, handle);
}
pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int {
return rust_uv_udp_bind(server, addr, flags);
}
pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int {
return rust_uv_udp_bind6(server, addr, flags);
}
pub unsafe fn udp_send<T>(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t],
addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int {
let buf_ptr = vec::raw::to_ptr(buf_in);
let buf_cnt = buf_in.len() as i32;
return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb);
}
pub unsafe fn udp_send6<T>(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t],
addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int {
let buf_ptr = vec::raw::to_ptr(buf_in);
let buf_cnt = buf_in.len() as i32;
return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb);
}
pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb,
on_recv: uv_udp_recv_cb) -> c_int {
return rust_uv_udp_recv_start(server, on_alloc, on_recv);
}
pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int {
return rust_uv_udp_recv_stop(server);
}
pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t {
return rust_uv_get_udp_handle_from_send_req(send_req);
}
pub unsafe fn udp_get_sockname(handle: *uv_udp_t, name: *sockaddr_in) -> c_int {
return rust_uv_udp_getsockname(handle, name);
}
pub unsafe fn udp_get_sockname6(handle: *uv_udp_t, name: *sockaddr_in6) -> c_int {
return rust_uv_udp_getsockname6(handle, name);
}
pub unsafe fn udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char,
interface_addr: *c_char, membership: uv_membership) -> c_int {
return rust_uv_udp_set_membership(handle, multicast_addr, interface_addr, membership);
}
pub unsafe fn udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int {
return rust_uv_udp_set_multicast_loop(handle, on);
}
pub unsafe fn udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int {
return rust_uv_udp_set_multicast_ttl(handle, ttl);
}
pub unsafe fn udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int {
return rust_uv_udp_set_broadcast(handle, on);
}
pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int {
return rust_uv_tcp_init(loop_handle, handle);
}
// FIXME ref #2064
pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in,
after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr,
after_connect_cb, addr_ptr);
pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in, after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in6,
after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr,
after_connect_cb, addr_ptr);
pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in6, after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int {
return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int {
return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr);
}
@@ -224,6 +288,26 @@ pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) -
return rust_uv_tcp_getpeername6(tcp_handle_ptr, name);
}
pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_in) -> c_int {
return rust_uv_tcp_getsockname(handle, name);
}
pub unsafe fn tcp_getsockname6(handle: *uv_tcp_t, name: *sockaddr_in6) -> c_int {
return rust_uv_tcp_getsockname6(handle, name);
}
pub unsafe fn tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int {
return rust_uv_tcp_nodelay(handle, enable);
}
pub unsafe fn tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int {
return rust_uv_tcp_keepalive(handle, enable, delay);
}
pub unsafe fn tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int {
return rust_uv_tcp_simultaneous_accepts(handle, enable);
}
pub unsafe fn listen<T>(stream: *T, backlog: c_int, cb: *u8) -> c_int {
return rust_uv_listen(stream as *c_void, backlog, cb);
}
@@ -237,7 +321,7 @@ pub unsafe fn write<T>(req: *uv_write_t, stream: *T, buf_in: &[uv_buf_t], cb: *u
let buf_cnt = buf_in.len() as i32;
return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb);
}
pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: *u8, on_read: *u8) -> c_int {
pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: uv_alloc_cb, on_read: *u8) -> c_int {
return rust_uv_read_start(stream as *c_void, on_alloc, on_read);
}
@@ -281,6 +365,22 @@ pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int {
return rust_uv_timer_stop(timer_ptr);
}
pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool {
match rust_uv_is_ipv4_sockaddr(addr) { 0 => false, _ => true }
}
pub unsafe fn is_ip6_addr(addr: *sockaddr) -> bool {
match rust_uv_is_ipv6_sockaddr(addr) { 0 => false, _ => true }
}
pub unsafe fn as_sockaddr_in(addr: *sockaddr) -> *sockaddr_in {
return rust_uv_sockaddr_as_sockaddr_in(addr);
}
pub unsafe fn as_sockaddr_in6(addr: *sockaddr) -> *sockaddr_in6 {
return rust_uv_sockaddr_as_sockaddr_in6(addr);
}
pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in {
do str::as_c_str(ip) |ip_buf| {
rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int)
@@ -300,6 +400,22 @@ pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) {
rust_uv_free_ip6_addr(addr);
}
pub unsafe fn ip4_name(addr: *sockaddr_in, dst: *u8, size: size_t) -> c_int {
return rust_uv_ip4_name(addr, dst, size);
}
pub unsafe fn ip6_name(addr: *sockaddr_in6, dst: *u8, size: size_t) -> c_int {
return rust_uv_ip6_name(addr, dst, size);
}
pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint {
return rust_uv_ip4_port(addr);
}
pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint {
return rust_uv_ip6_port(addr);
}
// data access helpers
pub unsafe fn get_loop_for_uv_handle<T>(handle: *T) -> *c_void {
return rust_uv_get_loop_for_uv_handle(handle as *c_void);
@@ -384,16 +500,11 @@ extern {
fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int;
fn rust_uv_async_send(handle: *uv_async_t);
fn rust_uv_async_init(loop_handle: *c_void,
async_handle: *uv_async_t,
cb: *u8) -> c_int;
fn rust_uv_async_init(loop_handle: *c_void, async_handle: *uv_async_t, cb: *u8) -> c_int;
fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int;
// FIXME ref #2604 .. ?
fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t);
fn rust_uv_last_error(loop_handle: *c_void) -> uv_err_t;
// FIXME ref #2064
fn rust_uv_strerror(err: *uv_err_t) -> *c_char;
// FIXME ref #2064
fn rust_uv_err_name(err: *uv_err_t) -> *c_char;
fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in;
fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6;
@@ -403,40 +514,51 @@ extern {
fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int;
fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint;
fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint;
// FIXME ref #2064
fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
after_cb: *u8,
fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, cb: *u8,
addr: *sockaddr_in) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
after_cb: *u8,
fn rust_uv_tcp_connect6(req: *uv_connect_t, handle: *uv_tcp_t, cb: *u8,
addr: *sockaddr_in6) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int;
fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t,
name: *sockaddr_in) -> c_int;
fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t,
name: *sockaddr_in6) ->c_int;
fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int;
fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int;
fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_in) -> c_int;
fn rust_uv_tcp_getsockname6(handle: *uv_tcp_t, name: *sockaddr_in6) -> c_int;
fn rust_uv_tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int;
fn rust_uv_tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int;
fn rust_uv_tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int;
fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int;
fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int;
fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int;
fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t,
buf_cnt: c_int, addr: *sockaddr_in, cb: *u8) -> c_int;
fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t,
buf_cnt: c_int, addr: *sockaddr_in6, cb: *u8) -> c_int;
fn rust_uv_udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int;
fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int;
fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t;
fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_in) -> c_int;
fn rust_uv_udp_getsockname6(handle: *uv_udp_t, name: *sockaddr_in6) -> c_int;
fn rust_uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char,
interface_addr: *c_char, membership: uv_membership) -> c_int;
fn rust_uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int;
fn rust_uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int;
fn rust_uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int;
fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int;
fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int;
fn rust_uv_sockaddr_as_sockaddr_in(addr: *sockaddr) -> *sockaddr_in;
fn rust_uv_sockaddr_as_sockaddr_in6(addr: *sockaddr) -> *sockaddr_in6;
fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int;
fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int;
fn rust_uv_write(req: *c_void,
stream: *c_void,
buf_in: *uv_buf_t,
buf_cnt: c_int,
fn rust_uv_write(req: *c_void, stream: *c_void, buf_in: *uv_buf_t, buf_cnt: c_int,
cb: *u8) -> c_int;
fn rust_uv_read_start(stream: *c_void,
on_alloc: *u8,
on_read: *u8) -> c_int;
fn rust_uv_read_start(stream: *c_void, on_alloc: *u8, on_read: *u8) -> c_int;
fn rust_uv_read_stop(stream: *c_void) -> c_int;
fn rust_uv_timer_init(loop_handle: *c_void,
timer_handle: *uv_timer_t) -> c_int;
fn rust_uv_timer_start(timer_handle: *uv_timer_t,
cb: *u8,
timeout: libc::uint64_t,
fn rust_uv_timer_init(loop_handle: *c_void, timer_handle: *uv_timer_t) -> c_int;
fn rust_uv_timer_start(timer_handle: *uv_timer_t, cb: *u8, timeout: libc::uint64_t,
repeat: libc::uint64_t) -> c_int;
fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int;

View File

@@ -1,458 +0,0 @@
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use option::*;
use result::*;
use super::io::net::ip::IpAddr;
use super::uv::*;
use super::rtio::*;
use ops::Drop;
use cell::Cell;
use cast::transmute;
use super::sched::{Scheduler, local_sched};
#[cfg(test)] use container::Container;
#[cfg(test)] use uint;
#[cfg(test)] use unstable::run_in_bare_thread;
#[cfg(test)] use super::test::*;
pub struct UvEventLoop {
uvio: UvIoFactory
}
impl UvEventLoop {
pub fn new() -> UvEventLoop {
UvEventLoop {
uvio: UvIoFactory(Loop::new())
}
}
/// A convenience constructor
pub fn new_scheduler() -> Scheduler {
Scheduler::new(~UvEventLoop::new())
}
}
impl Drop for UvEventLoop {
fn drop(&self) {
// XXX: Need mutable finalizer
let this = unsafe {
transmute::<&UvEventLoop, &mut UvEventLoop>(self)
};
this.uvio.uv_loop().close();
}
}
impl EventLoop for UvEventLoop {
fn run(&mut self) {
self.uvio.uv_loop().run();
}
fn callback(&mut self, f: ~fn()) {
let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop());
do idle_watcher.start |idle_watcher, status| {
assert!(status.is_none());
let mut idle_watcher = idle_watcher;
idle_watcher.stop();
idle_watcher.close();
f();
}
}
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject> {
Some(&mut self.uvio)
}
}
#[test]
fn test_callback_run_once() {
do run_in_bare_thread {
let mut event_loop = UvEventLoop::new();
let mut count = 0;
let count_ptr: *mut int = &mut count;
do event_loop.callback {
unsafe { *count_ptr += 1 }
}
event_loop.run();
assert!(count == 1);
}
}
pub struct UvIoFactory(Loop);
impl UvIoFactory {
pub fn uv_loop<'a>(&'a mut self) -> &'a mut Loop {
match self { &UvIoFactory(ref mut ptr) => ptr }
}
}
impl IoFactory for UvIoFactory {
// Connect to an address and return a new stream
// NB: This blocks the task waiting on the connection.
// It would probably be better to return a future
fn connect(&mut self, addr: IpAddr) -> Option<~StreamObject> {
// Create a cell in the task to hold the result. We will fill
// the cell before resuming the task.
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Option<~StreamObject>> = &result_cell;
let scheduler = local_sched::take();
assert!(scheduler.in_task_context());
// Block this task and take ownership, switch to scheduler context
do scheduler.deschedule_running_task_and_then |task| {
rtdebug!("connect: entered scheduler context");
do local_sched::borrow |scheduler| {
assert!(!scheduler.in_task_context());
}
let mut tcp_watcher = TcpWatcher::new(self.uv_loop());
let task_cell = Cell::new(task);
// Wait for a connection
do tcp_watcher.connect(addr) |stream_watcher, status| {
rtdebug!("connect: in connect callback");
let maybe_stream = if status.is_none() {
rtdebug!("status is none");
Some(~UvStream(stream_watcher))
} else {
rtdebug!("status is some");
stream_watcher.close(||());
None
};
// Store the stream in the task's stack
unsafe { (*result_cell_ptr).put_back(maybe_stream); }
// Context switch
let scheduler = local_sched::take();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
fn bind(&mut self, addr: IpAddr) -> Option<~TcpListenerObject> {
let mut watcher = TcpWatcher::new(self.uv_loop());
watcher.bind(addr);
return Some(~UvTcpListener(watcher));
}
}
pub struct UvTcpListener(TcpWatcher);
impl UvTcpListener {
fn watcher(&self) -> TcpWatcher {
match self { &UvTcpListener(w) => w }
}
fn close(&self) {
// XXX: Need to wait until close finishes before returning
self.watcher().as_stream().close(||());
}
}
impl Drop for UvTcpListener {
fn drop(&self) {
// XXX: Again, this never gets called. Use .close() instead
//self.watcher().as_stream().close(||());
}
}
impl TcpListener for UvTcpListener {
fn listen(&mut self) -> Option<~StreamObject> {
rtdebug!("entering listen");
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Option<~StreamObject>> = &result_cell;
let server_tcp_watcher = self.watcher();
let scheduler = local_sched::take();
assert!(scheduler.in_task_context());
do scheduler.deschedule_running_task_and_then |task| {
let task_cell = Cell::new(task);
let mut server_tcp_watcher = server_tcp_watcher;
do server_tcp_watcher.listen |server_stream_watcher, status| {
let maybe_stream = if status.is_none() {
let mut server_stream_watcher = server_stream_watcher;
let mut loop_ = loop_from_watcher(&server_stream_watcher);
let client_tcp_watcher = TcpWatcher::new(&mut loop_).as_stream();
// XXX: Needs to be surfaced in interface
server_stream_watcher.accept(client_tcp_watcher);
Some(~UvStream::new(client_tcp_watcher))
} else {
None
};
unsafe { (*result_cell_ptr).put_back(maybe_stream); }
rtdebug!("resuming task from listen");
// Context switch
let scheduler = local_sched::take();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
}
pub struct UvStream(StreamWatcher);
impl UvStream {
fn new(watcher: StreamWatcher) -> UvStream {
UvStream(watcher)
}
fn watcher(&self) -> StreamWatcher {
match self { &UvStream(w) => w }
}
// XXX: finalize isn't working for ~UvStream???
fn close(&self) {
// XXX: Need to wait until this finishes before returning
self.watcher().close(||());
}
}
impl Drop for UvStream {
fn drop(&self) {
rtdebug!("closing stream");
//self.watcher().close(||());
}
}
impl Stream for UvStream {
fn read(&mut self, buf: &mut [u8]) -> Result<uint, ()> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<uint, ()>> = &result_cell;
let scheduler = local_sched::take();
assert!(scheduler.in_task_context());
let watcher = self.watcher();
let buf_ptr: *&mut [u8] = &buf;
do scheduler.deschedule_running_task_and_then |task| {
rtdebug!("read: entered scheduler context");
do local_sched::borrow |scheduler| {
assert!(!scheduler.in_task_context());
}
let mut watcher = watcher;
let task_cell = Cell::new(task);
// XXX: We shouldn't reallocate these callbacks every
// call to read
let alloc: AllocCallback = |_| unsafe {
slice_to_uv_buf(*buf_ptr)
};
do watcher.read_start(alloc) |watcher, nread, _buf, status| {
// Stop reading so that no read callbacks are
// triggered before the user calls `read` again.
// XXX: Is there a performance impact to calling
// stop here?
let mut watcher = watcher;
watcher.read_stop();
let result = if status.is_none() {
assert!(nread >= 0);
Ok(nread as uint)
} else {
Err(())
};
unsafe { (*result_cell_ptr).put_back(result); }
let scheduler = local_sched::take();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
fn write(&mut self, buf: &[u8]) -> Result<(), ()> {
let result_cell = Cell::new_empty();
let result_cell_ptr: *Cell<Result<(), ()>> = &result_cell;
let scheduler = local_sched::take();
assert!(scheduler.in_task_context());
let watcher = self.watcher();
let buf_ptr: *&[u8] = &buf;
do scheduler.deschedule_running_task_and_then |task| {
let mut watcher = watcher;
let task_cell = Cell::new(task);
let buf = unsafe { &*buf_ptr };
// XXX: OMGCOPIES
let buf = buf.to_vec();
do watcher.write(buf) |_watcher, status| {
let result = if status.is_none() {
Ok(())
} else {
Err(())
};
unsafe { (*result_cell_ptr).put_back(result); }
let scheduler = local_sched::take();
scheduler.resume_task_immediately(task_cell.take());
}
}
assert!(!result_cell.is_empty());
return result_cell.take();
}
}
#[test]
fn test_simple_io_no_connect() {
do run_in_newsched_task {
let io = unsafe { local_sched::unsafe_borrow_io() };
let addr = next_test_ip4();
let maybe_chan = io.connect(addr);
assert!(maybe_chan.is_none());
}
}
#[test]
fn test_simple_tcp_server_and_client() {
do run_in_newsched_task {
let addr = next_test_ip4();
// Start the server first so it's listening when we connect
do spawntask_immediately {
unsafe {
let io = local_sched::unsafe_borrow_io();
let mut listener = io.bind(addr).unwrap();
let mut stream = listener.listen().unwrap();
let mut buf = [0, .. 2048];
let nread = stream.read(buf).unwrap();
assert!(nread == 8);
for uint::range(0, nread) |i| {
rtdebug!("%u", buf[i] as uint);
assert!(buf[i] == i as u8);
}
stream.close();
listener.close();
}
}
do spawntask_immediately {
unsafe {
let io = local_sched::unsafe_borrow_io();
let mut stream = io.connect(addr).unwrap();
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.close();
}
}
}
}
#[test] #[ignore(reason = "busted")]
fn test_read_and_block() {
do run_in_newsched_task {
let addr = next_test_ip4();
do spawntask_immediately {
let io = unsafe { local_sched::unsafe_borrow_io() };
let mut listener = io.bind(addr).unwrap();
let mut stream = listener.listen().unwrap();
let mut buf = [0, .. 2048];
let expected = 32;
let mut current = 0;
let mut reads = 0;
while current < expected {
let nread = stream.read(buf).unwrap();
for uint::range(0, nread) |i| {
let val = buf[i] as uint;
assert!(val == current % 8);
current += 1;
}
reads += 1;
let scheduler = local_sched::take();
// Yield to the other task in hopes that it
// will trigger a read callback while we are
// not ready for it
do scheduler.deschedule_running_task_and_then |task| {
let task = Cell::new(task);
do local_sched::borrow |scheduler| {
scheduler.task_queue.push_back(task.take());
}
}
}
// Make sure we had multiple reads
assert!(reads > 1);
stream.close();
listener.close();
}
do spawntask_immediately {
let io = unsafe { local_sched::unsafe_borrow_io() };
let mut stream = io.connect(addr).unwrap();
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.write([0, 1, 2, 3, 4, 5, 6, 7]);
stream.close();
}
}
}
#[test]
fn test_read_read_read() {
do run_in_newsched_task {
let addr = next_test_ip4();
static MAX: uint = 500000;
do spawntask_immediately {
unsafe {
let io = local_sched::unsafe_borrow_io();
let mut listener = io.bind(addr).unwrap();
let mut stream = listener.listen().unwrap();
let buf = [1, .. 2048];
let mut total_bytes_written = 0;
while total_bytes_written < MAX {
stream.write(buf);
total_bytes_written += buf.len();
}
stream.close();
listener.close();
}
}
do spawntask_immediately {
let io = unsafe { local_sched::unsafe_borrow_io() };
let mut stream = io.connect(addr).unwrap();
let mut buf = [0, .. 2048];
let mut total_bytes_read = 0;
while total_bytes_read < MAX {
let nread = stream.read(buf).unwrap();
rtdebug!("read %u bytes", nread as uint);
total_bytes_read += nread;
for uint::range(0, nread) |i| {
assert!(buf[i] == 1);
}
}
rtdebug!("read %u bytes total", total_bytes_read as uint);
stream.close();
}
}
}

View File

@@ -1,443 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Low-level bindings to the libuv library.
*
* This module contains a set of direct, 'bare-metal' wrappers around
* the libuv C-API.
*
* We're not bothering yet to redefine uv's structs as Rust structs
* because they are quite large and change often between versions.
* The maintenance burden is just too high. Instead we use the uv's
* `uv_handle_size` and `uv_req_size` to find the correct size of the
* structs and allocate them on the heap. This can be revisited later.
*
* There are also a collection of helper functions to ease interacting
* with the low-level API.
*
* As new functionality, existant in uv.h, is added to the rust stdlib,
* the mappings should be added in this module.
*/
#[allow(non_camel_case_types)]; // C types
use libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t};
use libc::{malloc, free};
use prelude::*;
pub struct uv_err_t {
code: c_int,
sys_errno_: c_int
}
pub struct uv_buf_t {
base: *u8,
len: libc::size_t,
}
pub type uv_handle_t = c_void;
pub type uv_loop_t = c_void;
pub type uv_idle_t = c_void;
pub type uv_tcp_t = c_void;
pub type uv_connect_t = c_void;
pub type uv_write_t = c_void;
pub type uv_async_t = c_void;
pub type uv_timer_t = c_void;
pub type uv_stream_t = c_void;
pub type uv_fs_t = c_void;
pub type uv_idle_cb = *u8;
pub type sockaddr_in = c_void;
pub type sockaddr_in6 = c_void;
#[deriving(Eq)]
pub enum uv_handle_type {
UV_UNKNOWN_HANDLE,
UV_ASYNC,
UV_CHECK,
UV_FS_EVENT,
UV_FS_POLL,
UV_HANDLE,
UV_IDLE,
UV_NAMED_PIPE,
UV_POLL,
UV_PREPARE,
UV_PROCESS,
UV_STREAM,
UV_TCP,
UV_TIMER,
UV_TTY,
UV_UDP,
UV_SIGNAL,
UV_FILE,
UV_HANDLE_TYPE_MAX
}
#[deriving(Eq)]
pub enum uv_req_type {
UV_UNKNOWN_REQ,
UV_REQ,
UV_CONNECT,
UV_WRITE,
UV_SHUTDOWN,
UV_UDP_SEND,
UV_FS,
UV_WORK,
UV_GETADDRINFO,
UV_REQ_TYPE_MAX
}
pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void {
assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX);
let size = rust_uv_handle_size(handle as uint);
let p = malloc(size);
assert!(p.is_not_null());
return p;
}
pub unsafe fn free_handle(v: *c_void) {
free(v)
}
pub unsafe fn malloc_req(req: uv_req_type) -> *c_void {
assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX);
let size = rust_uv_req_size(req as uint);
let p = malloc(size);
assert!(p.is_not_null());
return p;
}
pub unsafe fn free_req(v: *c_void) {
free(v)
}
#[test]
fn handle_sanity_check() {
unsafe {
assert!(UV_HANDLE_TYPE_MAX as uint == rust_uv_handle_type_max());
}
}
#[test]
fn request_sanity_check() {
unsafe {
assert!(UV_REQ_TYPE_MAX as uint == rust_uv_req_type_max());
}
}
pub unsafe fn loop_new() -> *c_void {
return rust_uv_loop_new();
}
pub unsafe fn loop_delete(loop_handle: *c_void) {
rust_uv_loop_delete(loop_handle);
}
pub unsafe fn run(loop_handle: *c_void) {
rust_uv_run(loop_handle);
}
pub unsafe fn close<T>(handle: *T, cb: *u8) {
rust_uv_close(handle as *c_void, cb);
}
pub unsafe fn walk(loop_handle: *c_void, cb: *u8, arg: *c_void) {
rust_uv_walk(loop_handle, cb, arg);
}
pub unsafe fn idle_new() -> *uv_idle_t {
rust_uv_idle_new()
}
pub unsafe fn idle_delete(handle: *uv_idle_t) {
rust_uv_idle_delete(handle)
}
pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int {
rust_uv_idle_init(loop_handle, handle)
}
pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int {
rust_uv_idle_start(handle, cb)
}
pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int {
rust_uv_idle_stop(handle)
}
pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int {
return rust_uv_tcp_init(loop_handle, handle);
}
// FIXME ref #2064
pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in,
after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr,
after_connect_cb, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
addr_ptr: *sockaddr_in6,
after_connect_cb: *u8) -> c_int {
return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr,
after_connect_cb, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int {
return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr);
}
// FIXME ref #2064
pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int {
return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr);
}
pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int {
return rust_uv_tcp_getpeername(tcp_handle_ptr, name);
}
pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int {
return rust_uv_tcp_getpeername6(tcp_handle_ptr, name);
}
pub unsafe fn listen<T>(stream: *T, backlog: c_int, cb: *u8) -> c_int {
return rust_uv_listen(stream as *c_void, backlog, cb);
}
pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int {
return rust_uv_accept(server as *c_void, client as *c_void);
}
pub unsafe fn write<T>(req: *uv_write_t, stream: *T, buf_in: &[uv_buf_t], cb: *u8) -> c_int {
let buf_ptr = vec::raw::to_ptr(buf_in);
let buf_cnt = buf_in.len() as i32;
return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb);
}
pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: *u8, on_read: *u8) -> c_int {
return rust_uv_read_start(stream as *c_void, on_alloc, on_read);
}
pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int {
return rust_uv_read_stop(stream as *c_void);
}
pub unsafe fn last_error(loop_handle: *c_void) -> uv_err_t {
return rust_uv_last_error(loop_handle);
}
pub unsafe fn strerror(err: *uv_err_t) -> *c_char {
return rust_uv_strerror(err);
}
pub unsafe fn err_name(err: *uv_err_t) -> *c_char {
return rust_uv_err_name(err);
}
pub unsafe fn async_init(loop_handle: *c_void, async_handle: *uv_async_t, cb: *u8) -> c_int {
return rust_uv_async_init(loop_handle, async_handle, cb);
}
pub unsafe fn async_send(async_handle: *uv_async_t) {
return rust_uv_async_send(async_handle);
}
pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t {
let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t };
let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf);
rust_uv_buf_init(out_buf_ptr, input, len as size_t);
return out_buf;
}
pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int {
return rust_uv_timer_init(loop_ptr, timer_ptr);
}
pub unsafe fn timer_start(timer_ptr: *uv_timer_t, cb: *u8, timeout: uint,
repeat: uint) -> c_int {
return rust_uv_timer_start(timer_ptr, cb, timeout as c_uint, repeat as c_uint);
}
pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int {
return rust_uv_timer_stop(timer_ptr);
}
pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in {
do str::as_c_str(ip) |ip_buf| {
rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int)
}
}
pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 {
do str::as_c_str(ip) |ip_buf| {
rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int)
}
}
pub unsafe fn free_ip4_addr(addr: *sockaddr_in) {
rust_uv_free_ip4_addr(addr);
}
pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) {
rust_uv_free_ip6_addr(addr);
}
// data access helpers
pub unsafe fn get_loop_for_uv_handle<T>(handle: *T) -> *c_void {
return rust_uv_get_loop_for_uv_handle(handle as *c_void);
}
pub unsafe fn get_stream_handle_from_connect_req(connect: *uv_connect_t) -> *uv_stream_t {
return rust_uv_get_stream_handle_from_connect_req(connect);
}
pub unsafe fn get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t {
return rust_uv_get_stream_handle_from_write_req(write_req);
}
pub unsafe fn get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void {
rust_uv_get_data_for_uv_loop(loop_ptr)
}
pub unsafe fn set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void) {
rust_uv_set_data_for_uv_loop(loop_ptr, data);
}
pub unsafe fn get_data_for_uv_handle<T>(handle: *T) -> *c_void {
return rust_uv_get_data_for_uv_handle(handle as *c_void);
}
pub unsafe fn set_data_for_uv_handle<T, U>(handle: *T, data: *U) {
rust_uv_set_data_for_uv_handle(handle as *c_void, data as *c_void);
}
pub unsafe fn get_data_for_req<T>(req: *T) -> *c_void {
return rust_uv_get_data_for_req(req as *c_void);
}
pub unsafe fn set_data_for_req<T, U>(req: *T, data: *U) {
rust_uv_set_data_for_req(req as *c_void, data as *c_void);
}
pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 {
return rust_uv_get_base_from_buf(buf);
}
pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t {
return rust_uv_get_len_from_buf(buf);
}
pub unsafe fn malloc_buf_base_of(suggested_size: size_t) -> *u8 {
return rust_uv_malloc_buf_base_of(suggested_size);
}
pub unsafe fn free_base_of_buf(buf: uv_buf_t) {
rust_uv_free_base_of_buf(buf);
}
pub unsafe fn get_last_err_info(uv_loop: *c_void) -> ~str {
let err = last_error(uv_loop);
let err_ptr = ptr::to_unsafe_ptr(&err);
let err_name = str::raw::from_c_str(err_name(err_ptr));
let err_msg = str::raw::from_c_str(strerror(err_ptr));
return fmt!("LIBUV ERROR: name: %s msg: %s",
err_name, err_msg);
}
pub unsafe fn get_last_err_data(uv_loop: *c_void) -> uv_err_data {
let err = last_error(uv_loop);
let err_ptr = ptr::to_unsafe_ptr(&err);
let err_name = str::raw::from_c_str(err_name(err_ptr));
let err_msg = str::raw::from_c_str(strerror(err_ptr));
uv_err_data { err_name: err_name, err_msg: err_msg }
}
pub struct uv_err_data {
err_name: ~str,
err_msg: ~str,
}
extern {
fn rust_uv_handle_size(type_: uintptr_t) -> size_t;
fn rust_uv_req_size(type_: uintptr_t) -> size_t;
fn rust_uv_handle_type_max() -> uintptr_t;
fn rust_uv_req_type_max() -> uintptr_t;
// libuv public API
fn rust_uv_loop_new() -> *c_void;
fn rust_uv_loop_delete(lp: *c_void);
fn rust_uv_run(loop_handle: *c_void);
fn rust_uv_close(handle: *c_void, cb: *u8);
fn rust_uv_walk(loop_handle: *c_void, cb: *u8, arg: *c_void);
fn rust_uv_idle_new() -> *uv_idle_t;
fn rust_uv_idle_delete(handle: *uv_idle_t);
fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int;
fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int;
fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int;
fn rust_uv_async_send(handle: *uv_async_t);
fn rust_uv_async_init(loop_handle: *c_void,
async_handle: *uv_async_t,
cb: *u8) -> c_int;
fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int;
// FIXME ref #2604 .. ?
fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t);
fn rust_uv_last_error(loop_handle: *c_void) -> uv_err_t;
// FIXME ref #2064
fn rust_uv_strerror(err: *uv_err_t) -> *c_char;
// FIXME ref #2064
fn rust_uv_err_name(err: *uv_err_t) -> *c_char;
fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in;
fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6;
fn rust_uv_free_ip4_addr(addr: *sockaddr_in);
fn rust_uv_free_ip6_addr(addr: *sockaddr_in6);
fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int;
fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int;
fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint;
fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint;
// FIXME ref #2064
fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
after_cb: *u8,
addr: *sockaddr_in) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
after_cb: *u8,
addr: *sockaddr_in6) -> c_int;
// FIXME ref #2064
fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int;
fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t,
name: *sockaddr_in) -> c_int;
fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t,
name: *sockaddr_in6) ->c_int;
fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int;
fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int;
fn rust_uv_write(req: *c_void,
stream: *c_void,
buf_in: *uv_buf_t,
buf_cnt: c_int,
cb: *u8) -> c_int;
fn rust_uv_read_start(stream: *c_void,
on_alloc: *u8,
on_read: *u8) -> c_int;
fn rust_uv_read_stop(stream: *c_void) -> c_int;
fn rust_uv_timer_init(loop_handle: *c_void,
timer_handle: *uv_timer_t) -> c_int;
fn rust_uv_timer_start(timer_handle: *uv_timer_t,
cb: *u8,
timeout: c_uint,
repeat: c_uint) -> c_int;
fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int;
fn rust_uv_malloc_buf_base_of(sug_size: size_t) -> *u8;
fn rust_uv_free_base_of_buf(buf: uv_buf_t);
fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t;
fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t;
fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void;
fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void;
fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void);
fn rust_uv_get_data_for_uv_handle(handle: *c_void) -> *c_void;
fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void);
fn rust_uv_get_data_for_req(req: *c_void) -> *c_void;
fn rust_uv_set_data_for_req(req: *c_void, data: *c_void);
fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8;
fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t;
}

View File

@@ -12,7 +12,6 @@
#[allow(missing_doc)];
use option::{Some, None};
use cast;
use gc;
use io;
@@ -151,10 +150,12 @@ impl FailWithCause for &'static str {
// FIXME #4427: Temporary until rt::rt_fail_ goes away
pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! {
use option::Option;
use cell::Cell;
use either::Left;
use rt::{context, OldTaskContext, TaskContext};
use rt::task::{Task, Unwinder};
use rt::task::Task;
use rt::local::Local;
use rt::logging::Logger;
let context = context();
match context {
@@ -171,24 +172,29 @@ pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! {
let msg = str::raw::from_c_str(msg);
let file = str::raw::from_c_str(file);
let outmsg = fmt!("%s at line %i of file %s", msg, line as int, file);
let outmsg = fmt!("task failed at '%s', %s:%i",
msg, file, line as int);
// XXX: Logging doesn't work correctly in non-task context because it
// invokes the local heap
if context == TaskContext {
error!(outmsg);
// XXX: Logging doesn't work here - the check to call the log
// function never passes - so calling the log function directly.
let outmsg = Cell::new(outmsg);
do Local::borrow::<Task, ()> |task| {
task.logger.log(Left(outmsg.take()));
}
} else {
rtdebug!("%s", outmsg);
rterrln!("%s", outmsg);
}
gc::cleanup_stack_for_failure();
let task = Local::unsafe_borrow::<Task>();
let unwinder: &mut Option<Unwinder> = &mut (*task).unwinder;
match *unwinder {
Some(ref mut unwinder) => unwinder.begin_unwind(),
None => abort!("failure without unwinder. aborting process")
if (*task).unwinder.unwinding {
rtabort!("unwinding again");
}
(*task).unwinder.begin_unwind();
}
}
}

View File

@@ -497,13 +497,28 @@ pub fn try<T:Send>(f: ~fn() -> T) -> Result<T,()> {
pub fn yield() {
//! Yield control to the task scheduler
use rt::{context, OldTaskContext};
use rt::local::Local;
use rt::sched::Scheduler;
unsafe {
match context() {
OldTaskContext => {
let task_ = rt::rust_get_task();
let killed = rt::rust_task_yield(task_);
if killed && !failing() {
fail!("killed");
}
}
_ => {
// XXX: What does yield really mean in newsched?
let sched = Local::take::<Scheduler>();
do sched.deschedule_running_task_and_then |sched, task| {
sched.enqueue_task(task);
}
}
}
}
}
pub fn failing() -> bool {
@@ -520,20 +535,9 @@ pub fn failing() -> bool {
}
}
_ => {
let mut unwinding = false;
do Local::borrow::<Task> |local| {
unwinding = match local.unwinder {
Some(unwinder) => {
unwinder.unwinding
do Local::borrow::<Task, bool> |local| {
local.unwinder.unwinding
}
None => {
// Because there is no unwinder we can't be unwinding.
// (The process will abort on failure)
false
}
}
}
return unwinding;
}
}
}
@@ -1191,3 +1195,4 @@ fn test_simple_newsched_spawn() {
spawn(||())
}
}

View File

@@ -91,6 +91,7 @@ use uint;
use util;
use unstable::sync::{Exclusive, exclusive};
use rt::local::Local;
use rt::task::Task;
use iterator::IteratorUtil;
#[cfg(test)] use task::default_task_opts;
@@ -581,12 +582,41 @@ pub fn spawn_raw(opts: TaskOpts, f: ~fn()) {
}
}
fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) {
fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) {
use rt::sched::*;
let mut sched = Local::take::<Scheduler>();
let task = ~Coroutine::new(&mut sched.stack_pool, f);
sched.schedule_new_task(task);
let f = Cell::new(f);
let mut task = unsafe {
let sched = Local::unsafe_borrow::<Scheduler>();
rtdebug!("unsafe borrowed sched");
if opts.linked {
do Local::borrow::<Task, ~Task>() |running_task| {
~running_task.new_child(&mut (*sched).stack_pool, f.take())
}
} else {
// An unlinked task is a new root in the task tree
~Task::new_root(&mut (*sched).stack_pool, f.take())
}
};
if opts.notify_chan.is_some() {
let notify_chan = opts.notify_chan.swap_unwrap();
let notify_chan = Cell::new(notify_chan);
let on_exit: ~fn(bool) = |success| {
notify_chan.take().send(
if success { Success } else { Failure }
)
};
task.on_exit = Some(on_exit);
}
rtdebug!("spawn about to take scheduler");
let sched = Local::take::<Scheduler>();
rtdebug!("took sched in spawn");
sched.schedule_task(task);
}
fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) {

View File

@@ -10,29 +10,21 @@
//! Runtime calls emitted by the compiler.
use iterator::IteratorUtil;
use uint;
use cast::transmute;
use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int, STDERR_FILENO};
use managed::raw::BoxRepr;
use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int};
use str;
use sys;
use rt::{context, OldTaskContext};
use rt::task::Task;
use rt::local::Local;
use option::{Option, Some, None};
use io;
use rt::borrowck;
#[allow(non_camel_case_types)]
pub type rust_task = c_void;
pub static FROZEN_BIT: uint = 1 << (uint::bits - 1);
pub static MUT_BIT: uint = 1 << (uint::bits - 2);
static ALL_BITS: uint = FROZEN_BIT | MUT_BIT;
pub mod rustrt {
use unstable::lang::rust_task;
use libc::{c_void, c_char, uintptr_t};
use libc::{c_char, uintptr_t};
pub extern {
#[rust_stack]
@@ -46,15 +38,6 @@ pub mod rustrt {
size: uintptr_t)
-> *c_char;
#[fast_ffi]
unsafe fn rust_upcall_free_noswitch(ptr: *c_char);
#[rust_stack]
fn rust_take_task_borrow_list(task: *rust_task) -> *c_void;
#[rust_stack]
fn rust_set_task_borrow_list(task: *rust_task, map: *c_void);
#[rust_stack]
fn rust_try_get_task() -> *rust_task;
@@ -77,149 +60,6 @@ pub fn fail_bounds_check(file: *c_char, line: size_t,
}
}
#[deriving(Eq)]
struct BorrowRecord {
box: *mut BoxRepr,
file: *c_char,
line: size_t
}
fn try_take_task_borrow_list() -> Option<~[BorrowRecord]> {
unsafe {
let cur_task: *rust_task = rustrt::rust_try_get_task();
if cur_task.is_not_null() {
let ptr = rustrt::rust_take_task_borrow_list(cur_task);
if ptr.is_null() {
None
} else {
let v: ~[BorrowRecord] = transmute(ptr);
Some(v)
}
} else {
None
}
}
}
fn swap_task_borrow_list(f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) {
unsafe {
let cur_task: *rust_task = rustrt::rust_try_get_task();
if cur_task.is_not_null() {
let mut borrow_list: ~[BorrowRecord] = {
let ptr = rustrt::rust_take_task_borrow_list(cur_task);
if ptr.is_null() { ~[] } else { transmute(ptr) }
};
borrow_list = f(borrow_list);
rustrt::rust_set_task_borrow_list(cur_task, transmute(borrow_list));
}
}
}
pub unsafe fn clear_task_borrow_list() {
// pub because it is used by the box annihilator.
let _ = try_take_task_borrow_list();
}
unsafe fn fail_borrowed(box: *mut BoxRepr, file: *c_char, line: size_t) {
debug_borrow("fail_borrowed: ", box, 0, 0, file, line);
match try_take_task_borrow_list() {
None => { // not recording borrows
let msg = "borrowed";
do str::as_buf(msg) |msg_p, _| {
fail_(msg_p as *c_char, file, line);
}
}
Some(borrow_list) => { // recording borrows
let mut msg = ~"borrowed";
let mut sep = " at ";
for borrow_list.rev_iter().advance |entry| {
if entry.box == box {
msg.push_str(sep);
let filename = str::raw::from_c_str(entry.file);
msg.push_str(filename);
msg.push_str(fmt!(":%u", entry.line as uint));
sep = " and at ";
}
}
do str::as_buf(msg) |msg_p, _| {
fail_(msg_p as *c_char, file, line)
}
}
}
}
/// Because this code is so perf. sensitive, use a static constant so that
/// debug printouts are compiled out most of the time.
static ENABLE_DEBUG: bool = false;
#[inline]
unsafe fn debug_borrow<T>(tag: &'static str,
p: *const T,
old_bits: uint,
new_bits: uint,
filename: *c_char,
line: size_t) {
//! A useful debugging function that prints a pointer + tag + newline
//! without allocating memory.
if ENABLE_DEBUG && ::rt::env::get().debug_borrow {
debug_borrow_slow(tag, p, old_bits, new_bits, filename, line);
}
unsafe fn debug_borrow_slow<T>(tag: &'static str,
p: *const T,
old_bits: uint,
new_bits: uint,
filename: *c_char,
line: size_t) {
let dbg = STDERR_FILENO as io::fd_t;
dbg.write_str(tag);
dbg.write_hex(p as uint);
dbg.write_str(" ");
dbg.write_hex(old_bits);
dbg.write_str(" ");
dbg.write_hex(new_bits);
dbg.write_str(" ");
dbg.write_cstr(filename);
dbg.write_str(":");
dbg.write_hex(line as uint);
dbg.write_str("\n");
}
}
trait DebugPrints {
fn write_hex(&self, val: uint);
unsafe fn write_cstr(&self, str: *c_char);
}
impl DebugPrints for io::fd_t {
fn write_hex(&self, mut i: uint) {
let letters = ['0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', 'a', 'b', 'c', 'd', 'e', 'f'];
static UINT_NIBBLES: uint = ::uint::bytes << 1;
let mut buffer = [0_u8, ..UINT_NIBBLES+1];
let mut c = UINT_NIBBLES;
while c > 0 {
c -= 1;
buffer[c] = letters[i & 0xF] as u8;
i >>= 4;
}
self.write(buffer.slice(0, UINT_NIBBLES));
}
unsafe fn write_cstr(&self, p: *c_char) {
use libc::strlen;
use vec;
let len = strlen(p);
let p: *u8 = transmute(p);
do vec::raw::buf_as_slice(p, len as uint) |s| {
self.write(s);
}
}
}
#[lang="malloc"]
pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char {
match context() {
@@ -228,7 +68,10 @@ pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char {
}
_ => {
let mut alloc = ::ptr::null();
do Local::borrow::<Task> |task| {
do Local::borrow::<Task,()> |task| {
rtdebug!("task pointer: %x, heap pointer: %x",
to_uint(task),
to_uint(&task.heap));
alloc = task.heap.alloc(td as *c_void, size as uint) as *c_char;
}
return alloc;
@@ -241,110 +84,38 @@ pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char {
// problem occurs, call exit instead.
#[lang="free"]
pub unsafe fn local_free(ptr: *c_char) {
match context() {
OldTaskContext => {
rustrt::rust_upcall_free_noswitch(ptr);
}
_ => {
do Local::borrow::<Task> |task| {
task.heap.free(ptr as *c_void);
}
}
}
::rt::local_heap::local_free(ptr);
}
#[lang="borrow_as_imm"]
#[inline]
pub unsafe fn borrow_as_imm(a: *u8, file: *c_char, line: size_t) -> uint {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count = old_ref_count | FROZEN_BIT;
debug_borrow("borrow_as_imm:", a, old_ref_count, new_ref_count, file, line);
if (old_ref_count & MUT_BIT) != 0 {
fail_borrowed(a, file, line);
}
(*a).header.ref_count = new_ref_count;
old_ref_count
borrowck::borrow_as_imm(a, file, line)
}
#[lang="borrow_as_mut"]
#[inline]
pub unsafe fn borrow_as_mut(a: *u8, file: *c_char, line: size_t) -> uint {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count = old_ref_count | MUT_BIT | FROZEN_BIT;
debug_borrow("borrow_as_mut:", a, old_ref_count, new_ref_count, file, line);
if (old_ref_count & (MUT_BIT|FROZEN_BIT)) != 0 {
fail_borrowed(a, file, line);
}
(*a).header.ref_count = new_ref_count;
old_ref_count
borrowck::borrow_as_mut(a, file, line)
}
#[lang="record_borrow"]
pub unsafe fn record_borrow(a: *u8, old_ref_count: uint,
file: *c_char, line: size_t) {
if (old_ref_count & ALL_BITS) == 0 {
// was not borrowed before
let a: *mut BoxRepr = transmute(a);
debug_borrow("record_borrow:", a, old_ref_count, 0, file, line);
do swap_task_borrow_list |borrow_list| {
let mut borrow_list = borrow_list;
borrow_list.push(BorrowRecord {box: a, file: file, line: line});
borrow_list
}
}
borrowck::record_borrow(a, old_ref_count, file, line)
}
#[lang="unrecord_borrow"]
pub unsafe fn unrecord_borrow(a: *u8, old_ref_count: uint,
file: *c_char, line: size_t) {
if (old_ref_count & ALL_BITS) == 0 {
// was not borrowed before, so we should find the record at
// the end of the list
let a: *mut BoxRepr = transmute(a);
debug_borrow("unrecord_borrow:", a, old_ref_count, 0, file, line);
do swap_task_borrow_list |borrow_list| {
let mut borrow_list = borrow_list;
assert!(!borrow_list.is_empty());
let br = borrow_list.pop();
if br.box != a || br.file != file || br.line != line {
let err = fmt!("wrong borrow found, br=%?", br);
do str::as_buf(err) |msg_p, _| {
fail_(msg_p as *c_char, file, line)
}
}
borrow_list
}
}
borrowck::unrecord_borrow(a, old_ref_count, file, line)
}
#[lang="return_to_mut"]
#[inline]
pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint,
file: *c_char, line: size_t) {
// Sometimes the box is null, if it is conditionally frozen.
// See e.g. #4904.
if !a.is_null() {
let a: *mut BoxRepr = transmute(a);
let old_ref_count = (*a).header.ref_count;
let new_ref_count =
(old_ref_count & !ALL_BITS) | (orig_ref_count & ALL_BITS);
debug_borrow("return_to_mut:",
a, old_ref_count, new_ref_count, file, line);
(*a).header.ref_count = new_ref_count;
}
borrowck::return_to_mut(a, orig_ref_count, file, line)
}
#[lang="check_not_borrowed"]
@@ -352,12 +123,7 @@ pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint,
pub unsafe fn check_not_borrowed(a: *u8,
file: *c_char,
line: size_t) {
let a: *mut BoxRepr = transmute(a);
let ref_count = (*a).header.ref_count;
debug_borrow("check_not_borrowed:", a, ref_count, 0, file, line);
if (ref_count & FROZEN_BIT) != 0 {
fail_borrowed(a, file, line);
}
borrowck::check_not_borrowed(a, file, line)
}
#[lang="strdup_uniq"]
@@ -366,6 +132,11 @@ pub unsafe fn strdup_uniq(ptr: *c_uchar, len: uint) -> ~str {
str::raw::from_buf_len(ptr, len)
}
#[lang="annihilate"]
pub unsafe fn annihilate() {
::cleanup::annihilate()
}
#[lang="start"]
pub fn start(main: *u8, argc: int, argv: **c_char,
crate_map: *u8) -> int {

View File

@@ -205,8 +205,53 @@ extern {
fn rust_unlock_little_lock(lock: rust_little_lock);
}
/* *********************************************************************/
//FIXME: #5042 This should be replaced by proper atomic type
pub struct AtomicUint {
priv inner: uint
}
impl AtomicUint {
pub fn new(val: uint) -> AtomicUint { AtomicUint { inner: val } }
pub fn load(&self) -> uint {
unsafe { intrinsics::atomic_load(cast::transmute(self)) as uint }
}
pub fn store(&mut self, val: uint) {
unsafe { intrinsics::atomic_store(cast::transmute(self), val as int); }
}
pub fn add(&mut self, val: int) -> uint {
unsafe { intrinsics::atomic_xadd(cast::transmute(self), val as int) as uint }
}
pub fn cas(&mut self, old:uint, new: uint) -> uint {
unsafe { intrinsics::atomic_cxchg(cast::transmute(self), old as int, new as int) as uint }
}
}
pub struct AtomicInt {
priv inner: int
}
impl AtomicInt {
pub fn new(val: int) -> AtomicInt { AtomicInt { inner: val } }
pub fn load(&self) -> int {
unsafe { intrinsics::atomic_load(&self.inner) }
}
pub fn store(&mut self, val: int) {
unsafe { intrinsics::atomic_store(&mut self.inner, val); }
}
pub fn add(&mut self, val: int) -> int {
unsafe { intrinsics::atomic_xadd(&mut self.inner, val) }
}
pub fn cas(&mut self, old: int, new: int) -> int {
unsafe { intrinsics::atomic_cxchg(&mut self.inner, old, new) }
}
}
#[cfg(test)]
mod tests {
use super::*;
use comm;
use super::exclusive;
use task;
@@ -262,4 +307,28 @@ mod tests {
}
}
}
#[test]
fn atomic_int_smoke_test() {
let mut i = AtomicInt::new(0);
i.store(10);
assert!(i.load() == 10);
assert!(i.add(1) == 10);
assert!(i.load() == 11);
assert!(i.cas(11, 12) == 11);
assert!(i.cas(11, 13) == 12);
assert!(i.load() == 12);
}
#[test]
fn atomic_uint_smoke_test() {
let mut i = AtomicUint::new(0);
i.store(10);
assert!(i.load() == 10);
assert!(i.add(1) == 10);
assert!(i.load() == 11);
assert!(i.cas(11, 12) == 11);
assert!(i.cas(11, 13) == 12);
assert!(i.load() == 12);
}
}

View File

@@ -20,7 +20,6 @@ use cmp::{Eq, TotalEq, TotalOrd, Ordering, Less, Equal, Greater};
use clone::Clone;
use iterator::{FromIterator, Iterator, IteratorUtil};
use kinds::Copy;
use libc;
use libc::c_void;
use num::Zero;
use option::{None, Option, Some};
@@ -33,17 +32,12 @@ use sys::size_of;
use uint;
use unstable::intrinsics;
#[cfg(stage0)]
use intrinsic::{get_tydesc, TyDesc};
use intrinsic::{get_tydesc};
#[cfg(not(stage0))]
use unstable::intrinsics::{get_tydesc, contains_managed, TyDesc};
use unstable::intrinsics::{get_tydesc, contains_managed};
use vec;
use util;
extern {
#[fast_ffi]
unsafe fn vec_reserve_shared_actual(t: *TyDesc, v: **raw::VecRepr, n: libc::size_t);
}
/// Returns true if two vectors have the same length
pub fn same_length<T, U>(xs: &[T], ys: &[U]) -> bool {
xs.len() == ys.len()
@@ -1139,7 +1133,9 @@ impl<T> OwnedVector<T> for ~[T] {
let td = get_tydesc::<T>();
if ((**ptr).box_header.ref_count ==
managed::raw::RC_MANAGED_UNIQUE) {
vec_reserve_shared_actual(td, ptr as **raw::VecRepr, n as libc::size_t);
// XXX transmute shouldn't be necessary
let td = cast::transmute(td);
::at_vec::raw::reserve_raw(td, ptr, n);
} else {
let alloc = n * sys::nonzero_size_of::<T>();
*ptr = realloc_raw(*ptr as *mut c_void, alloc + size_of::<raw::VecRepr>())
@@ -1169,7 +1165,7 @@ impl<T> OwnedVector<T> for ~[T] {
let ptr: *mut *mut raw::VecRepr = cast::transmute(self);
let td = get_tydesc::<T>();
if contains_managed::<T>() {
vec_reserve_shared_actual(td, ptr as **raw::VecRepr, n as libc::size_t);
::at_vec::raw::reserve_raw(td, ptr, n);
} else {
let alloc = n * sys::nonzero_size_of::<T>();
let size = alloc + size_of::<raw::VecRepr>();

View File

@@ -68,11 +68,10 @@ rust_env_pairs() {
}
#endif
extern "C" CDECL void
vec_reserve_shared_actual(type_desc* ty, rust_vec_box** vp,
size_t n_elts) {
extern "C" CDECL void *
rust_local_realloc(rust_opaque_box *ptr, size_t size) {
rust_task *task = rust_get_current_task();
reserve_vec_exact_shared(task, vp, n_elts * ty->size);
return task->boxed.realloc(ptr, size);
}
extern "C" CDECL size_t
@@ -87,15 +86,10 @@ rand_gen_seed(uint8_t* dest, size_t size) {
extern "C" CDECL void *
rand_new_seeded(uint8_t* seed, size_t seed_size) {
rust_task *task = rust_get_current_task();
rust_rng *rng = (rust_rng *) task->malloc(sizeof(rust_rng),
"rand_new_seeded");
if (!rng) {
task->fail();
return NULL;
}
char *env_seed = task->kernel->env->rust_seed;
rng_init(rng, env_seed, seed, seed_size);
assert(seed != NULL);
rust_rng *rng = (rust_rng *) malloc(sizeof(rust_rng));
assert(rng != NULL && "rng alloc failed");
rng_init(rng, NULL, seed, seed_size);
return rng;
}
@@ -106,8 +100,7 @@ rand_next(rust_rng *rng) {
extern "C" CDECL void
rand_free(rust_rng *rng) {
rust_task *task = rust_get_current_task();
task->free(rng);
free(rng);
}
@@ -594,12 +587,18 @@ rust_log_console_on() {
log_console_on();
}
extern void log_console_off(rust_env *env);
extern void log_console_off();
extern "C" CDECL void
rust_log_console_off() {
rust_task *task = rust_get_current_task();
log_console_off(task->kernel->env);
log_console_off();
}
extern bool should_log_console();
extern "C" CDECL uintptr_t
rust_should_log_console() {
return (uintptr_t)should_log_console();
}
extern "C" CDECL void
@@ -871,6 +870,12 @@ rust_delete_memory_region(memory_region *region) {
delete region;
}
extern "C" CDECL boxed_region*
rust_current_boxed_region() {
rust_task *task = rust_get_current_task();
return &task->boxed;
}
extern "C" CDECL boxed_region*
rust_new_boxed_region(memory_region *region,
uintptr_t poison_on_free) {
@@ -887,6 +892,11 @@ rust_boxed_region_malloc(boxed_region *region, type_desc *td, size_t size) {
return region->malloc(td, size);
}
extern "C" CDECL rust_opaque_box*
rust_boxed_region_realloc(boxed_region *region, rust_opaque_box *ptr, size_t size) {
return region->realloc(ptr, size);
}
extern "C" CDECL void
rust_boxed_region_free(boxed_region *region, rust_opaque_box *box) {
region->free(box);
@@ -919,6 +929,46 @@ rust_running_on_valgrind() {
return RUNNING_ON_VALGRIND;
}
extern int get_num_cpus();
extern "C" CDECL uintptr_t
rust_get_num_cpus() {
return get_num_cpus();
}
static lock_and_signal global_args_lock;
static uintptr_t global_args_ptr = 0;
extern "C" CDECL void
rust_take_global_args_lock() {
global_args_lock.lock();
}
extern "C" CDECL void
rust_drop_global_args_lock() {
global_args_lock.unlock();
}
extern "C" CDECL uintptr_t*
rust_get_global_args_ptr() {
return &global_args_ptr;
}
static lock_and_signal exit_status_lock;
static uintptr_t exit_status = 0;
extern "C" CDECL void
rust_set_exit_status_newrt(uintptr_t code) {
scoped_lock with(exit_status_lock);
exit_status = code;
}
extern "C" CDECL uintptr_t
rust_get_exit_status_newrt() {
scoped_lock with(exit_status_lock);
return exit_status;
}
//
// Local Variables:
// mode: C++

View File

@@ -40,7 +40,7 @@ rust_drop_env_lock() {
}
#if defined(__WIN32__)
static int
int
get_num_cpus() {
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
@@ -48,7 +48,7 @@ get_num_cpus() {
return (int) sysinfo.dwNumberOfProcessors;
}
#elif defined(__BSD__)
static int
int
get_num_cpus() {
/* swiped from http://stackoverflow.com/questions/150355/
programmatically-find-the-number-of-cores-on-a-machine */
@@ -75,7 +75,7 @@ get_num_cpus() {
return numCPU;
}
#elif defined(__GNUC__)
static int
int
get_num_cpus() {
return sysconf(_SC_NPROCESSORS_ONLN);
}

View File

@@ -79,6 +79,11 @@ rust_gc_metadata() {
return (void *)global_safe_points;
}
extern "C" CDECL void
rust_update_gc_metadata(const void* map) {
update_gc_metadata(map);
}
//
// Local Variables:
// mode: C++

View File

@@ -43,11 +43,15 @@ log_console_on() {
* overridden by the environment.
*/
void
log_console_off(rust_env *env) {
log_console_off() {
scoped_lock with(_log_lock);
if (env->logspec == NULL) {
_log_to_console = false;
}
}
bool
should_log_console() {
scoped_lock with(_log_lock);
return _log_to_console;
}
rust_log::rust_log(rust_sched_loop *sched_loop) :

View File

@@ -168,11 +168,11 @@ rust_dbg_extern_identity_TwoDoubles(TwoDoubles u) {
// Generates increasing port numbers for network testing
extern "C" CDECL uintptr_t
rust_dbg_next_port() {
rust_dbg_next_port(uintptr_t base_port) {
static lock_and_signal dbg_port_lock;
static uintptr_t next_port = 9600;
static uintptr_t next_offset = 0;
scoped_lock with(dbg_port_lock);
uintptr_t this_port = next_port;
next_port += 1;
uintptr_t this_port = base_port + next_offset;
next_offset += 1;
return this_port;
}

View File

@@ -57,16 +57,6 @@ vec_data(rust_vec *v) {
return reinterpret_cast<T*>(v->data);
}
inline void reserve_vec_exact_shared(rust_task* task, rust_vec_box** vpp,
size_t size) {
rust_opaque_box** ovpp = (rust_opaque_box**)vpp;
if (size > (*vpp)->body.alloc) {
*vpp = (rust_vec_box*)task->boxed.realloc(
*ovpp, size + sizeof(rust_vec));
(*vpp)->body.alloc = size;
}
}
inline void reserve_vec_exact(rust_vec_box** vpp,
size_t size) {
if (size > (*vpp)->body.alloc) {

View File

@@ -294,6 +294,118 @@ rust_uv_tcp_getpeername6
return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen);
}
extern "C" int
rust_uv_tcp_getsockname
(uv_tcp_t* handle, sockaddr_in* name) {
int namelen = sizeof(sockaddr_in);
return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen);
}
extern "C" int
rust_uv_tcp_getsockname6
(uv_tcp_t* handle, sockaddr_in6* name) {
int namelen = sizeof(sockaddr_in6);
return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen);
}
extern "C" int
rust_uv_tcp_nodelay
(uv_tcp_t* handle, int enable) {
return uv_tcp_nodelay(handle, enable);
}
extern "C" int
rust_uv_tcp_keepalive
(uv_tcp_t* handle, int enable, unsigned int delay) {
return uv_tcp_keepalive(handle, enable, delay);
}
extern "C" int
rust_uv_tcp_simultaneous_accepts
(uv_tcp_t* handle, int enable) {
return uv_tcp_simultaneous_accepts(handle, enable);
}
extern "C" int
rust_uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
return uv_udp_init(loop, handle);
}
extern "C" int
rust_uv_udp_bind(uv_udp_t* server, sockaddr_in* addr_ptr, unsigned flags) {
return uv_udp_bind(server, *addr_ptr, flags);
}
extern "C" int
rust_uv_udp_bind6(uv_udp_t* server, sockaddr_in6* addr_ptr, unsigned flags) {
return uv_udp_bind6(server, *addr_ptr, flags);
}
extern "C" int
rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in,
int buf_cnt, sockaddr_in* addr_ptr, uv_udp_send_cb cb) {
return uv_udp_send(req, handle, buf_in, buf_cnt, *addr_ptr, cb);
}
extern "C" int
rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in,
int buf_cnt, sockaddr_in6* addr_ptr, uv_udp_send_cb cb) {
return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb);
}
extern "C" int
rust_uv_udp_recv_start(uv_udp_t* server, uv_alloc_cb on_alloc, uv_udp_recv_cb on_read) {
return uv_udp_recv_start(server, on_alloc, on_read);
}
extern "C" int
rust_uv_udp_recv_stop(uv_udp_t* server) {
return uv_udp_recv_stop(server);
}
extern "C" uv_udp_t*
rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) {
return send_req->handle;
}
extern "C" int
rust_uv_udp_getsockname
(uv_udp_t* handle, sockaddr_in* name) {
int namelen = sizeof(sockaddr_in);
return uv_udp_getsockname(handle, (sockaddr*)name, &namelen);
}
extern "C" int
rust_uv_udp_getsockname6
(uv_udp_t* handle, sockaddr_in6* name) {
int namelen = sizeof(sockaddr_in6);
return uv_udp_getsockname(handle, (sockaddr*)name, &namelen);
}
extern "C" int
rust_uv_udp_set_membership
(uv_udp_t* handle, const char* m_addr, const char* i_addr, uv_membership membership) {
return uv_udp_set_membership(handle, m_addr, i_addr, membership);
}
extern "C" int
rust_uv_udp_set_multicast_loop
(uv_udp_t* handle, int on) {
return uv_udp_set_multicast_loop(handle, on);
}
extern "C" int
rust_uv_udp_set_multicast_ttl
(uv_udp_t* handle, int ttl) {
return uv_udp_set_multicast_ttl(handle, ttl);
}
extern "C" int
rust_uv_udp_set_broadcast
(uv_udp_t* handle, int on) {
return uv_udp_set_broadcast(handle, on);
}
extern "C" int
rust_uv_listen(uv_stream_t* stream, int backlog,
uv_connection_cb cb) {
@@ -546,10 +658,34 @@ extern "C" void
rust_uv_freeaddrinfo(addrinfo* res) {
uv_freeaddrinfo(res);
}
extern "C" int
rust_uv_is_ipv4_sockaddr(sockaddr* addr) {
return addr->sa_family == AF_INET;
}
extern "C" int
rust_uv_is_ipv6_sockaddr(sockaddr* addr) {
return addr->sa_family == AF_INET6;
}
extern "C" sockaddr_in*
rust_uv_sockaddr_as_sockaddr_in(sockaddr* addr) {
// return (sockaddr_in*)addr->sa_data;
return (sockaddr_in*)addr;
}
extern "C" sockaddr_in6*
rust_uv_sockaddr_as_sockaddr_in6(sockaddr* addr) {
//return (sockaddr_in6*)addr->sa_data;
return (sockaddr_in6*)addr;
}
extern "C" bool
rust_uv_is_ipv4_addrinfo(addrinfo* input) {
return input->ai_family == AF_INET;
}
extern "C" bool
rust_uv_is_ipv6_addrinfo(addrinfo* input) {
return input->ai_family == AF_INET6;

View File

@@ -39,6 +39,7 @@ rust_list_dir_wfd_size
rust_list_dir_wfd_fp_buf
rust_log_console_on
rust_log_console_off
rust_should_log_console
rust_set_environ
rust_unset_sigprocmask
rust_sched_current_nonlazy_threads
@@ -54,7 +55,7 @@ rust_get_stack_segment
rust_get_c_stack
rust_log_str
start_task
vec_reserve_shared_actual
rust_local_realloc
task_clear_event_reject
task_wait_event
task_signal_event
@@ -105,6 +106,29 @@ rust_uv_tcp_connect
rust_uv_tcp_bind
rust_uv_tcp_connect6
rust_uv_tcp_bind6
rust_uv_tcp_getsockname
rust_uv_tcp_getsockname6
rust_uv_tcp_nodelay
rust_uv_tcp_keepalive
rust_uv_tcp_simultaneous_accepts
rust_uv_udp_init
rust_uv_udp_bind
rust_uv_udp_bind6
rust_uv_udp_send
rust_uv_udp_send6
rust_uv_udp_recv_start
rust_uv_udp_recv_stop
rust_uv_get_udp_handle_from_send_req
rust_uv_udp_getsockname
rust_uv_udp_getsockname6
rust_uv_udp_set_membership
rust_uv_udp_set_multicast_loop
rust_uv_udp_set_multicast_ttl
rust_uv_udp_set_broadcast
rust_uv_is_ipv4_sockaddr
rust_uv_is_ipv6_sockaddr
rust_uv_sockaddr_as_sockaddr_in
rust_uv_sockaddr_as_sockaddr_in6
rust_uv_listen
rust_uv_accept
rust_uv_write
@@ -178,6 +202,7 @@ rust_task_deref
tdefl_compress_mem_to_heap
tinfl_decompress_mem_to_heap
rust_gc_metadata
rust_update_gc_metadata
rust_uv_ip4_port
rust_uv_ip6_port
rust_uv_tcp_getpeername
@@ -228,6 +253,7 @@ rust_delete_memory_region
rust_new_boxed_region
rust_delete_boxed_region
rust_boxed_region_malloc
rust_boxed_region_realloc
rust_boxed_region_free
rust_try
rust_begin_unwind
@@ -239,3 +265,10 @@ rust_take_env_lock
rust_drop_env_lock
rust_update_log_settings
rust_running_on_valgrind
rust_get_num_cpus
rust_get_global_args_ptr
rust_current_boxed_region
rust_take_global_args_lock
rust_drop_global_args_lock
rust_set_exit_status_newrt
rust_get_exit_status_newrt

View File

@@ -23,7 +23,6 @@ mod rustrt {
pub fn rust_get_sched_id() -> libc::intptr_t;
pub fn rust_get_argc() -> libc::c_int;
pub fn get_task_id() -> libc::intptr_t;
pub fn rust_sched_threads();
pub fn rust_get_task();
}
}
@@ -31,7 +30,6 @@ mod rustrt {
fn calllink01() { unsafe { rustrt::rust_get_sched_id(); } }
fn calllink02() { unsafe { rustrt::rust_get_argc(); } }
fn calllink08() { unsafe { rustrt::get_task_id(); } }
fn calllink09() { unsafe { rustrt::rust_sched_threads(); } }
fn calllink10() { unsafe { rustrt::rust_get_task(); } }
fn runtest(f: extern fn(), frame_backoff: u32) {
@@ -64,7 +62,6 @@ pub fn main() {
calllink01,
calllink02,
calllink08,
calllink09,
calllink10
];
let mut rng = rand::rng();