diff --git a/mk/rt.mk b/mk/rt.mk index 3d5e9cbcb82a1..39679cbed6961 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -207,7 +207,7 @@ LIBUV_MAKEFILE_$(1) := $$(CFG_BUILD_DIR)$$(RT_OUTPUT_DIR_$(1))/libuv/Makefile $$(LIBUV_MAKEFILE_$(1)): $$(LIBUV_DEPS) (cd $(S)src/libuv/ && \ - $$(CFG_PYTHON) ./gyp_uv -f make -Dtarget_arch=$$(LIBUV_ARCH_$(1)) \ + $$(CFG_PYTHON) ./gyp_uv.py -f make -Dtarget_arch=$$(LIBUV_ARCH_$(1)) \ -D ninja \ -DOS=$$(LIBUV_OSTYPE_$(1)) \ -Goutput_dir=$$(@D) --generator-output $$(@D)) @@ -218,7 +218,7 @@ $$(LIBUV_MAKEFILE_$(1)): $$(LIBUV_DEPS) ifdef CFG_WINDOWSY_$(1) $$(LIBUV_LIB_$(1)): $$(LIBUV_DEPS) $$(Q)$$(MAKE) -C $$(S)src/libuv -f Makefile.mingw \ - CFLAGS="$$(CFG_GCCISH_CFLAGS) $$(LIBUV_FLAGS_$$(HOST_$(1))) $$(SNAP_DEFINES)" \ + CC="$$(CC) $$(CFG_GCCISH_CFLAGS) $$(LIBUV_FLAGS_$$(HOST_$(1))) $$(SNAP_DEFINES)" \ AR="$$(AR_$(1))" \ V=$$(VERBOSE) $$(Q)cp $$(S)src/libuv/libuv.a $$@ diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 09736749997be..601cc9f84add0 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -8,41 +8,34 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cast::transmute; -use std::cell::Cell; -use std::libc::{c_int, c_void}; -use std::ptr::null; use ai = std::rt::io::net::addrinfo; +use std::libc::c_int; +use std::ptr::null; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; -use uvll; -use uvll::UV_GETADDRINFO; -use super::{Loop, UvError, NativeHandle, status_to_maybe_uv_error}; use net; +use super::{Loop, UvError, Request, wait_until_woken_after}; +use uvll; -type GetAddrInfoCallback = ~fn(GetAddrInfoRequest, &net::UvAddrInfo, Option); - -pub struct GetAddrInfoRequest(*uvll::uv_getaddrinfo_t); - -pub struct RequestData { - priv getaddrinfo_cb: Option, +struct Addrinfo { + handle: *uvll::addrinfo, } -impl GetAddrInfoRequest { - pub fn new() -> GetAddrInfoRequest { - let req = unsafe { uvll::malloc_req(UV_GETADDRINFO) }; - assert!(req.is_not_null()); - let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); - req.install_req_data(); - return req; - } +struct Ctx { + slot: Option, + status: c_int, + addrinfo: Option, +} - pub fn getaddrinfo(&mut self, loop_: &Loop, node: Option<&str>, - service: Option<&str>, hints: Option, - cb: GetAddrInfoCallback) { +pub struct GetAddrInfoRequest; +impl GetAddrInfoRequest { + pub fn run(loop_: &Loop, node: Option<&str>, service: Option<&str>, + hints: Option) -> Result<~[ai::Info], UvError> { assert!(node.is_some() || service.is_some()); - - let (c_node, c_node_ptr) = match node { + let (_c_node, c_node_ptr) = match node { Some(n) => { let c_node = n.to_c_str(); let c_node_ptr = c_node.with_ref(|r| r); @@ -51,7 +44,7 @@ impl GetAddrInfoRequest { None => (None, null()) }; - let (c_service, c_service_ptr) = match service { + let (_c_service, c_service_ptr) = match service { Some(s) => { let c_service = s.to_c_str(); let c_service_ptr = c_service.with_ref(|r| r); @@ -60,17 +53,6 @@ impl GetAddrInfoRequest { None => (None, null()) }; - let cb = Cell::new(cb); - let wrapper_cb: GetAddrInfoCallback = |req, addrinfo, err| { - // Capture some heap values that need to stay alive for the - // getaddrinfo call - let _ = &c_node; - let _ = &c_service; - - let cb = cb.take(); - cb(req, addrinfo, err) - }; - let hint = hints.map(|hint| { let mut flags = 0; do each_ai_flag |cval, aival| { @@ -78,19 +60,6 @@ impl GetAddrInfoRequest { flags |= cval as i32; } } - /* XXX: do we really want to support these? - let socktype = match hint.socktype { - Some(ai::Stream) => uvll::rust_SOCK_STREAM(), - Some(ai::Datagram) => uvll::rust_SOCK_DGRAM(), - Some(ai::Raw) => uvll::rust_SOCK_RAW(), - None => 0, - }; - let protocol = match hint.protocol { - Some(ai::UDP) => uvll::rust_IPPROTO_UDP(), - Some(ai::TCP) => uvll::rust_IPPROTO_TCP(), - _ => 0, - }; - */ let socktype = 0; let protocol = 0; @@ -106,66 +75,48 @@ impl GetAddrInfoRequest { } }); let hint_ptr = hint.as_ref().map_default(null(), |x| x as *uvll::addrinfo); + let mut req = Request::new(uvll::UV_GETADDRINFO); + + return match unsafe { + uvll::uv_getaddrinfo(loop_.handle, req.handle, + getaddrinfo_cb, c_node_ptr, c_service_ptr, + hint_ptr) + } { + 0 => { + req.defuse(); // uv callback now owns this request + let mut cx = Ctx { slot: None, status: 0, addrinfo: None }; + + do wait_until_woken_after(&mut cx.slot) { + req.set_data(&cx); + } - self.get_req_data().getaddrinfo_cb = Some(wrapper_cb); - - unsafe { - assert!(0 == uvll::getaddrinfo(loop_.native_handle(), - self.native_handle(), - getaddrinfo_cb, - c_node_ptr, - c_service_ptr, - hint_ptr)); - } - - extern "C" fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, - status: c_int, - res: *uvll::addrinfo) { - let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); - let err = status_to_maybe_uv_error(status); - let addrinfo = net::UvAddrInfo(res); - let data = req.get_req_data(); - (*data.getaddrinfo_cb.get_ref())(req, &addrinfo, err); - unsafe { - uvll::freeaddrinfo(res); + match cx.status { + 0 => Ok(accum_addrinfo(cx.addrinfo.get_ref())), + n => Err(UvError(n)) + } } - } - } + n => Err(UvError(n)) + }; - fn get_loop(&self) -> Loop { - unsafe { - Loop { - handle: uvll::get_loop_from_fs_req(self.native_handle()) - } - } - } - fn install_req_data(&mut self) { - let req = self.native_handle() as *uvll::uv_getaddrinfo_t; - let data = ~RequestData { - getaddrinfo_cb: None - }; - unsafe { - let data = transmute::<~RequestData, *c_void>(data); - uvll::set_data_for_req(req, data); - } - } + extern fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, + status: c_int, + res: *uvll::addrinfo) { + let req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; + cx.status = status; + cx.addrinfo = Some(Addrinfo { handle: res }); - fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { - unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let data = transmute::<&*c_void, &mut ~RequestData>(&data); - return &mut **data; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.slot.take_unwrap()); } } +} - fn delete(self) { - unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let _data = transmute::<*c_void, ~RequestData>(data); - uvll::set_data_for_req(self.native_handle(), null::<()>()); - uvll::free_req(self.native_handle()); - } +impl Drop for Addrinfo { + fn drop(&mut self) { + unsafe { uvll::uv_freeaddrinfo(self.handle) } } } @@ -184,15 +135,13 @@ fn each_ai_flag(_f: &fn(c_int, ai::Flag)) { } // Traverse the addrinfo linked list, producing a vector of Rust socket addresses -pub fn accum_addrinfo(addr: &net::UvAddrInfo) -> ~[ai::Info] { +pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { unsafe { - let &net::UvAddrInfo(addr) = addr; - let mut addr = addr; + let mut addr = addr.handle; let mut addrs = ~[]; loop { - let uvaddr = net::sockaddr_to_UvSocketAddr((*addr).ai_addr); - let rustaddr = net::uv_socket_addr_to_socket_addr(uvaddr); + let rustaddr = net::sockaddr_to_socket_addr((*addr).ai_addr); let mut flags = 0; do each_ai_flag |cval, aival| { @@ -235,39 +184,27 @@ pub fn accum_addrinfo(addr: &net::UvAddrInfo) -> ~[ai::Info] { } } -impl NativeHandle<*uvll::uv_getaddrinfo_t> for GetAddrInfoRequest { - fn from_native_handle(handle: *uvll::uv_getaddrinfo_t) -> GetAddrInfoRequest { - GetAddrInfoRequest(handle) - } - fn native_handle(&self) -> *uvll::uv_getaddrinfo_t { - match self { &GetAddrInfoRequest(ptr) => ptr } - } -} - #[cfg(test)] mod test { - use Loop; use std::rt::io::net::ip::{SocketAddr, Ipv4Addr}; use super::*; + use super::super::local_loop; #[test] fn getaddrinfo_test() { - let mut loop_ = Loop::new(); - let mut req = GetAddrInfoRequest::new(); - do req.getaddrinfo(&loop_, Some("localhost"), None, None) |_, addrinfo, _| { - let sockaddrs = accum_addrinfo(addrinfo); - let mut found_local = false; - let local_addr = &SocketAddr { - ip: Ipv4Addr(127, 0, 0, 1), - port: 0 - }; - for addr in sockaddrs.iter() { - found_local = found_local || addr.address == *local_addr; + match GetAddrInfoRequest::run(local_loop(), Some("localhost"), None, None) { + Ok(infos) => { + let mut found_local = false; + let local_addr = &SocketAddr { + ip: Ipv4Addr(127, 0, 0, 1), + port: 0 + }; + for addr in infos.iter() { + found_local = found_local || addr.address == *local_addr; + } + assert!(found_local); } - assert!(found_local); + Err(e) => fail!("{:?}", e), } - loop_.run(); - loop_.close(); - req.delete(); } } diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 4a1858ee03672..04e7bce5bd181 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -8,76 +8,155 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cast; use std::libc::c_int; +use std::rt::rtio::{Callback, RemoteCallback}; +use std::unstable::sync::Exclusive; use uvll; -use super::{Watcher, Loop, NativeHandle, AsyncCallback, WatcherInterop}; -use super::status_to_maybe_uv_error; +use super::{Loop, UvHandle}; -pub struct AsyncWatcher(*uvll::uv_async_t); -impl Watcher for AsyncWatcher { } +// The entire point of async is to call into a loop from other threads so it +// does not need to home. +pub struct AsyncWatcher { + handle: *uvll::uv_async_t, + + // A flag to tell the callback to exit, set from the dtor. This is + // almost never contested - only in rare races with the dtor. + exit_flag: Exclusive +} + +struct Payload { + callback: ~Callback, + exit_flag: Exclusive, +} impl AsyncWatcher { - pub fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { + pub fn new(loop_: &mut Loop, cb: ~Callback) -> AsyncWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); + assert_eq!(unsafe { + uvll::uv_async_init(loop_.handle, handle, async_cb) + }, 0); + let flag = Exclusive::new(false); + let payload = ~Payload { callback: cb, exit_flag: flag.clone() }; unsafe { - let handle = uvll::malloc_handle(uvll::UV_ASYNC); - assert!(handle.is_not_null()); - let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - let data = watcher.get_watcher_data(); - data.async_cb = Some(cb); - assert_eq!(0, uvll::async_init(loop_.native_handle(), handle, async_cb)); - return watcher; + let payload: *u8 = cast::transmute(payload); + uvll::set_data_for_uv_handle(handle, payload); } + return AsyncWatcher { handle: handle, exit_flag: flag, }; + } +} - extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { - let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); - let status = status_to_maybe_uv_error(status); - let data = watcher.get_watcher_data(); - let cb = data.async_cb.get_ref(); - (*cb)(watcher, status); - } +impl UvHandle for AsyncWatcher { + fn uv_handle(&self) -> *uvll::uv_async_t { self.handle } + unsafe fn from_uv_handle<'a>(_: &'a *uvll::uv_async_t) -> &'a mut AsyncWatcher { + fail!("async watchers can't be built from their handles"); } +} - pub fn send(&mut self) { - unsafe { - let handle = self.native_handle(); - uvll::async_send(handle); - } +extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { + assert!(status == 0); + let payload: &mut Payload = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + + // The synchronization logic here is subtle. To review, + // the uv async handle type promises that, after it is + // triggered the remote callback is definitely called at + // least once. UvRemoteCallback needs to maintain those + // semantics while also shutting down cleanly from the + // dtor. In our case that means that, when the + // UvRemoteCallback dtor calls `async.send()`, here `f` is + // always called later. + + // In the dtor both the exit flag is set and the async + // callback fired under a lock. Here, before calling `f`, + // we take the lock and check the flag. Because we are + // checking the flag before calling `f`, and the flag is + // set under the same lock as the send, then if the flag + // is set then we're guaranteed to call `f` after the + // final send. + + // If the check was done after `f()` then there would be a + // period between that call and the check where the dtor + // could be called in the other thread, missing the final + // callback while still destroying the handle. + + let should_exit = unsafe { + payload.exit_flag.with_imm(|&should_exit| should_exit) + }; + + payload.callback.call(); + + if should_exit { + unsafe { uvll::uv_close(handle, close_cb) } } } -impl NativeHandle<*uvll::uv_async_t> for AsyncWatcher { - fn from_native_handle(handle: *uvll::uv_async_t) -> AsyncWatcher { - AsyncWatcher(handle) +extern fn close_cb(handle: *uvll::uv_handle_t) { + // drop the payload + let _payload: ~Payload = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + // and then free the handle + unsafe { uvll::free_handle(handle) } +} + +impl RemoteCallback for AsyncWatcher { + fn fire(&mut self) { + unsafe { uvll::uv_async_send(self.handle) } } - fn native_handle(&self) -> *uvll::uv_async_t { - match self { &AsyncWatcher(ptr) => ptr } +} + +impl Drop for AsyncWatcher { + fn drop(&mut self) { + unsafe { + do self.exit_flag.with |should_exit| { + // NB: These two things need to happen atomically. Otherwise + // the event handler could wake up due to a *previous* + // signal and see the exit flag, destroying the handle + // before the final send. + *should_exit = true; + uvll::uv_async_send(self.handle) + } + } } } #[cfg(test)] -mod test { +mod test_remote { + use std::cell::Cell; + use std::rt::rtio::Callback; + use std::rt::thread::Thread; + use std::rt::tube::Tube; use super::*; - use Loop; - use std::unstable::run_in_bare_thread; - use std::rt::thread::Thread; - use std::cell::Cell; + use super::super::local_loop; + // Make sure that we can fire watchers in remote threads and that they + // actually trigger what they say they will. #[test] fn smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let watcher = AsyncWatcher::new(&mut loop_, |w, _| w.close(||()) ); - let watcher_cell = Cell::new(watcher); - let thread = do Thread::start { - let mut watcher = watcher_cell.take(); - watcher.send(); - }; - loop_.run(); - loop_.close(); - thread.join(); + struct MyCallback(Option>); + impl Callback for MyCallback { + fn call(&mut self) { + // this can get called more than once, but we only want to send + // once + if self.is_some() { + self.take_unwrap().send(1); + } + } } + + let mut tube = Tube::new(); + let cb = ~MyCallback(Some(tube.clone())); + let watcher = Cell::new(AsyncWatcher::new(local_loop(), cb as ~Callback)); + + let thread = do Thread::start { + watcher.take().fire(); + }; + + assert_eq!(tube.recv(), 1); + thread.join(); } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 8c9302e123815..a5848194d05bb 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -8,701 +8,560 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::ptr::null; -use std::c_str; use std::c_str::CString; +use std::c_str; use std::cast::transmute; +use std::cast; +use std::libc::{c_int, c_char, c_void, size_t}; use std::libc; -use std::libc::{c_int, c_char, c_void}; - -use super::{Request, NativeHandle, Loop, FsCallback, Buf, - status_to_maybe_uv_error, UvError}; +use std::rt::BlockedTask; +use std::rt::io::{FileStat, IoError}; +use std::rt::io; +use std::rt::local::Local; +use std::rt::rtio; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::task; +use std::vec; + +use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after}; +use uvio::HomingIO; use uvll; -use uvll::*; -pub struct FsRequest(*uvll::uv_fs_t); -impl Request for FsRequest {} +pub struct FsRequest { + req: *uvll::uv_fs_t, + priv fired: bool, +} -pub struct RequestData { - priv complete_cb: Option +pub struct FileWatcher { + priv loop_: Loop, + priv fd: c_int, + priv close: rtio::CloseBehavior, + priv home: SchedHandle, } impl FsRequest { - pub fn new() -> FsRequest { - let fs_req = unsafe { malloc_req(UV_FS) }; - assert!(fs_req.is_not_null()); - let fs_req: FsRequest = NativeHandle::from_native_handle(fs_req); - fs_req - } - - pub fn open(self, loop_: &Loop, path: &CString, flags: int, mode: int, - cb: FsCallback) { - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = path.with_ref(|p| unsafe { - uvll::fs_open(loop_.native_handle(), - self.native_handle(), p, flags, mode, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn open_sync(mut self, loop_: &Loop, path: &CString, - flags: int, mode: int) -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = path.with_ref(|p| unsafe { - uvll::fs_open(loop_.native_handle(), - self.native_handle(), p, flags, mode, complete_cb_ptr) - }); - self.sync_cleanup(result) - } - - pub fn unlink(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn unlink_sync(mut self, loop_: &Loop, path: &CString) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = path.with_ref(|p| unsafe { - uvll::fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - self.sync_cleanup(result) - } - - pub fn lstat(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_lstat(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn stat(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::fs_stat(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn write(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let base_ptr = buf.base as *c_void; - let len = buf.len as uint; - let ret = unsafe { - uvll::fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len, offset, complete_cb_ptr) - }; - assert_eq!(ret, 0); - } - pub fn write_sync(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let base_ptr = buf.base as *c_void; - let len = buf.len as uint; - let result = unsafe { - uvll::fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len, offset, complete_cb_ptr) - }; - self.sync_cleanup(result) - } - - pub fn read(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let buf_ptr = buf.base as *c_void; - let len = buf.len as uint; - let ret = unsafe { - uvll::fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len, offset, complete_cb_ptr) - }; - assert_eq!(ret, 0); - } - pub fn read_sync(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let buf_ptr = buf.base as *c_void; - let len = buf.len as uint; - let result = unsafe { - uvll::fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len, offset, complete_cb_ptr) - }; - self.sync_cleanup(result) - } - - pub fn close(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) - }, 0); - } - pub fn close_sync(mut self, loop_: &Loop, - fd: c_int) -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = unsafe { - uvll::fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) - }; - self.sync_cleanup(result) - } - - pub fn mkdir(mut self, loop_: &Loop, path: &CString, mode: c_int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_mkdir(loop_.native_handle(), - self.native_handle(), p, mode, complete_cb_ptr) - }), 0); - } - - pub fn rmdir(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_rmdir(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }), 0); - } - - pub fn rename(mut self, loop_: &Loop, path: &CString, to: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::fs_rename(loop_.native_handle(), - self.native_handle(), - path.with_ref(|p| p), - to.with_ref(|p| p), - complete_cb_ptr) - }, 0); - } - - pub fn chmod(mut self, loop_: &Loop, path: &CString, mode: c_int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_chmod(loop_.native_handle(), self.native_handle(), p, mode, - complete_cb_ptr) - }), 0); - } - - pub fn readdir(mut self, loop_: &Loop, path: &CString, - flags: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_readdir(loop_.native_handle(), - self.native_handle(), p, flags, complete_cb_ptr) - }), 0); - } - - pub fn readlink(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_readlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }), 0); - } - - pub fn chown(mut self, loop_: &Loop, path: &CString, uid: int, gid: int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_chown(loop_.native_handle(), - self.native_handle(), p, + pub fn open(loop_: &Loop, path: &CString, flags: int, mode: int) + -> Result + { + execute(|req, cb| unsafe { + uvll::uv_fs_open(loop_.handle, + req, path.with_ref(|p| p), flags as c_int, + mode as c_int, cb) + }).map(|req| + FileWatcher::new(*loop_, req.get_result() as c_int, + rtio::CloseSynchronously) + ) + } + + pub fn unlink(loop_: &Loop, path: &CString) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_unlink(loop_.handle, req, path.with_ref(|p| p), + cb) + }) + } + + pub fn lstat(loop_: &Loop, path: &CString) -> Result { + execute(|req, cb| unsafe { + uvll::uv_fs_lstat(loop_.handle, req, path.with_ref(|p| p), + cb) + }).map(|req| req.mkstat()) + } + + pub fn stat(loop_: &Loop, path: &CString) -> Result { + execute(|req, cb| unsafe { + uvll::uv_fs_stat(loop_.handle, req, path.with_ref(|p| p), + cb) + }).map(|req| req.mkstat()) + } + + pub fn write(loop_: &Loop, fd: c_int, buf: &[u8], offset: i64) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_write(loop_.handle, req, + fd, vec::raw::to_ptr(buf) as *c_void, + buf.len() as size_t, offset, cb) + }) + } + + pub fn read(loop_: &Loop, fd: c_int, buf: &mut [u8], offset: i64) + -> Result + { + do execute(|req, cb| unsafe { + uvll::uv_fs_read(loop_.handle, req, + fd, vec::raw::to_ptr(buf) as *c_void, + buf.len() as size_t, offset, cb) + }).map |req| { + req.get_result() as int + } + } + + pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_mkdir(loop_.handle, req, path.with_ref(|p| p), + mode, cb) + }) + } + + pub fn rmdir(loop_: &Loop, path: &CString) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_rmdir(loop_.handle, req, path.with_ref(|p| p), + cb) + }) + } + + pub fn rename(loop_: &Loop, path: &CString, to: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_rename(loop_.handle, + req, + path.with_ref(|p| p), + to.with_ref(|p| p), + cb) + }) + } + + pub fn chmod(loop_: &Loop, path: &CString, mode: c_int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_chmod(loop_.handle, req, path.with_ref(|p| p), + mode, cb) + }) + } + + pub fn readdir(loop_: &Loop, path: &CString, flags: c_int) + -> Result<~[Path], UvError> + { + execute(|req, cb| unsafe { + uvll::uv_fs_readdir(loop_.handle, + req, path.with_ref(|p| p), flags, cb) + }).map(|req| unsafe { + let mut paths = ~[]; + let path = CString::new(path.with_ref(|p| p), false); + let parent = Path::new(path); + do c_str::from_c_multistring(req.get_ptr() as *libc::c_char, + Some(req.get_result() as uint)) |rel| { + let p = rel.as_bytes(); + paths.push(parent.join(p.slice_to(rel.len()))); + }; + paths + }) + } + + pub fn readlink(loop_: &Loop, path: &CString) -> Result { + do execute(|req, cb| unsafe { + uvll::uv_fs_readlink(loop_.handle, req, + path.with_ref(|p| p), cb) + }).map |req| { + Path::new(unsafe { + CString::new(req.get_ptr() as *libc::c_char, false) + }) + } + } + + pub fn chown(loop_: &Loop, path: &CString, uid: int, gid: int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_chown(loop_.handle, + req, path.with_ref(|p| p), uid as uvll::uv_uid_t, gid as uvll::uv_gid_t, - complete_cb_ptr) - }), 0); + cb) + }) } - pub fn truncate(mut self, loop_: &Loop, file: c_int, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_ftruncate(loop_.native_handle(), - self.native_handle(), file, offset, - complete_cb_ptr) - }, 0); + pub fn truncate(loop_: &Loop, file: c_int, offset: i64) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_ftruncate(loop_.handle, req, file, offset, cb) + }) } - pub fn link(mut self, loop_: &Loop, src: &CString, dst: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_link(loop_.native_handle(), self.native_handle(), + pub fn link(loop_: &Loop, src: &CString, dst: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_link(loop_.handle, req, src.with_ref(|p| p), dst.with_ref(|p| p), - complete_cb_ptr) - }, 0); + cb) + }) } - pub fn symlink(mut self, loop_: &Loop, src: &CString, dst: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_symlink(loop_.native_handle(), self.native_handle(), + pub fn symlink(loop_: &Loop, src: &CString, dst: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_symlink(loop_.handle, req, src.with_ref(|p| p), dst.with_ref(|p| p), - 0, - complete_cb_ptr) - }, 0); - } - - pub fn fsync(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_fsync(loop_.native_handle(), self.native_handle(), fd, - complete_cb_ptr) - }, 0); - } - - pub fn datasync(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_fdatasync(loop_.native_handle(), self.native_handle(), fd, - complete_cb_ptr) - }, 0); - } - - // accessors/utility funcs - fn sync_cleanup(self, result: c_int) - -> Result { - self.cleanup_and_delete(); - match status_to_maybe_uv_error(result as i32) { - Some(err) => Err(err), - None => Ok(result) - } - } - fn req_boilerplate(&mut self, cb: Option) -> *u8 { - let result = match cb { - Some(_) => { - compl_cb as *u8 - }, - None => 0 as *u8 - }; - self.install_req_data(cb); - result - } - pub fn install_req_data(&mut self, cb: Option) { - let fs_req = (self.native_handle()) as *uvll::uv_write_t; - let data = ~RequestData { - complete_cb: cb - }; - unsafe { - let data = transmute::<~RequestData, *c_void>(data); - uvll::set_data_for_req(fs_req, data); - } + 0, cb) + }) } - fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { - unsafe { - let data = uvll::get_data_for_req((self.native_handle())); - let data = transmute::<&*c_void, &mut ~RequestData>(&data); - &mut **data - } + pub fn fsync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_fsync(loop_.handle, req, fd, cb) + }) } - pub fn get_path(&self) -> *c_char { - unsafe { uvll::get_path_from_fs_req(self.native_handle()) } + pub fn datasync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_fdatasync(loop_.handle, req, fd, cb) + }) } - pub fn get_result(&self) -> c_int { - unsafe { uvll::get_result_from_fs_req(self.native_handle()) } + pub fn utime(loop_: &Loop, path: &CString, atime: u64, mtime: u64) + -> Result<(), UvError> + { + // libuv takes seconds + let atime = atime as libc::c_double / 1000.0; + let mtime = mtime as libc::c_double / 1000.0; + execute_nop(|req, cb| unsafe { + uvll::uv_fs_utime(loop_.handle, req, path.with_ref(|p| p), + atime, mtime, cb) + }) } - pub fn get_loop(&self) -> Loop { - unsafe { Loop{handle:uvll::get_loop_from_fs_req(self.native_handle())} } + pub fn get_result(&self) -> c_int { + unsafe { uvll::get_result_from_fs_req(self.req) } } - pub fn get_stat(&self) -> uv_stat_t { - let stat = uv_stat_t::new(); - unsafe { uvll::populate_stat(self.native_handle(), &stat); } + pub fn get_stat(&self) -> uvll::uv_stat_t { + let stat = uvll::uv_stat_t::new(); + unsafe { uvll::populate_stat(self.req, &stat); } stat } pub fn get_ptr(&self) -> *libc::c_void { + unsafe { uvll::get_ptr_from_fs_req(self.req) } + } + + pub fn mkstat(&self) -> FileStat { + let path = unsafe { uvll::get_path_from_fs_req(self.req) }; + let path = unsafe { Path::new(CString::new(path, false)) }; + let stat = self.get_stat(); + fn to_msec(stat: uvll::uv_timespec_t) -> u64 { + // Be sure to cast to u64 first to prevent overflowing if the tv_sec + // field is a 32-bit integer. + (stat.tv_sec as u64) * 1000 + (stat.tv_nsec as u64) / 1000000 + } + let kind = match (stat.st_mode as c_int) & libc::S_IFMT { + libc::S_IFREG => io::TypeFile, + libc::S_IFDIR => io::TypeDirectory, + libc::S_IFIFO => io::TypeNamedPipe, + libc::S_IFBLK => io::TypeBlockSpecial, + libc::S_IFLNK => io::TypeSymlink, + _ => io::TypeUnknown, + }; + FileStat { + path: path, + size: stat.st_size as u64, + kind: kind, + perm: (stat.st_mode as io::FilePermission) & io::AllPermissions, + created: to_msec(stat.st_birthtim), + modified: to_msec(stat.st_mtim), + accessed: to_msec(stat.st_atim), + unstable: io::UnstableFileStat { + device: stat.st_dev as u64, + inode: stat.st_ino as u64, + rdev: stat.st_rdev as u64, + nlink: stat.st_nlink as u64, + uid: stat.st_uid as u64, + gid: stat.st_gid as u64, + blksize: stat.st_blksize as u64, + blocks: stat.st_blocks as u64, + flags: stat.st_flags as u64, + gen: stat.st_gen as u64, + } + } + } +} + +impl Drop for FsRequest { + fn drop(&mut self) { unsafe { - uvll::get_ptr_from_fs_req(self.native_handle()) + if self.fired { + uvll::uv_fs_req_cleanup(self.req); + } + uvll::free_req(self.req); } } +} - pub fn each_path(&mut self, f: &fn(&CString)) { - let ptr = self.get_ptr(); - match self.get_result() { - n if (n <= 0) => {} - n => { - let n_len = n as uint; - // we pass in the len that uv tells us is there - // for the entries and we don't continue past that.. - // it appears that sometimes the multistring isn't - // correctly delimited and we stray into garbage memory? - // in any case, passing Some(n_len) fixes it and ensures - // good results - unsafe { - c_str::from_c_multistring(ptr as *libc::c_char, - Some(n_len), f); +fn execute(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) + -> Result +{ + return do task::unkillable { + let mut req = FsRequest { + fired: false, + req: unsafe { uvll::malloc_req(uvll::UV_FS) } + }; + match f(req.req, fs_cb) { + 0 => { + req.fired = true; + let mut slot = None; + do wait_until_woken_after(&mut slot) { + unsafe { uvll::set_data_for_req(req.req, &slot) } + } + match req.get_result() { + n if n < 0 => Err(UvError(n)), + _ => Ok(req), } } + n => Err(UvError(n)) + } + }; + + extern fn fs_cb(req: *uvll::uv_fs_t) { + let slot: &mut Option = unsafe { + cast::transmute(uvll::get_data_for_req(req)) + }; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(slot.take_unwrap()); } +} - fn cleanup_and_delete(self) { - unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let _data = transmute::<*c_void, ~RequestData>(data); - uvll::set_data_for_req(self.native_handle(), null::<()>()); - uvll::fs_req_cleanup(self.native_handle()); - free_req(self.native_handle() as *c_void) +fn execute_nop(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) + -> Result<(), UvError> +{ + execute(f).map(|_| {}) +} + +impl HomingIO for FileWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl FileWatcher { + pub fn new(loop_: Loop, fd: c_int, close: rtio::CloseBehavior) -> FileWatcher { + FileWatcher { + loop_: loop_, + fd: fd, + close: close, + home: get_handle_to_current_scheduler!() } } -} -impl NativeHandle<*uvll::uv_fs_t> for FsRequest { - fn from_native_handle(handle: *uvll:: uv_fs_t) -> FsRequest { - FsRequest(handle) + fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { + let _m = self.fire_homing_missile(); + let r = FsRequest::read(&self.loop_, self.fd, buf, offset); + r.map_err(uv_error_to_io_error) } - fn native_handle(&self) -> *uvll::uv_fs_t { - match self { &FsRequest(ptr) => ptr } + fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + let r = FsRequest::write(&self.loop_, self.fd, buf, offset); + r.map_err(uv_error_to_io_error) + } + fn seek_common(&mut self, pos: i64, whence: c_int) -> + Result{ + #[fixed_stack_segment]; #[inline(never)]; + unsafe { + match libc::lseek(self.fd, pos as libc::off_t, whence) { + -1 => { + Err(IoError { + kind: io::OtherIoError, + desc: "Failed to lseek.", + detail: None + }) + }, + n => Ok(n as u64) + } + } } } -fn sync_cleanup(result: int) - -> Result { - match status_to_maybe_uv_error(result as i32) { - Some(err) => Err(err), - None => Ok(result) +impl Drop for FileWatcher { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + match self.close { + rtio::DontClose => {} + rtio::CloseAsynchronously => { + unsafe { + let req = uvll::malloc_req(uvll::UV_FS); + uvll::uv_fs_close(self.loop_.handle, req, self.fd, close_cb); + } + + extern fn close_cb(req: *uvll::uv_fs_t) { + unsafe { + uvll::uv_fs_req_cleanup(req); + uvll::free_req(req); + } + } + } + rtio::CloseSynchronously => { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_close(self.loop_.handle, req, self.fd, cb) + }); + } + } } } -extern fn compl_cb(req: *uv_fs_t) { - let mut req: FsRequest = NativeHandle::from_native_handle(req); - // pull the user cb out of the req data - let cb = { - let data = req.get_req_data(); - assert!(data.complete_cb.is_some()); - // option dance, option dance. oooooh yeah. - data.complete_cb.take_unwrap() - }; - // in uv_fs_open calls, the result will be the fd in the - // case of success, otherwise it's -1 indicating an error - let result = req.get_result(); - let status = status_to_maybe_uv_error(result); - // we have a req and status, call the user cb.. - // only giving the user a ref to the FsRequest, as we - // have to clean it up, afterwards (and they aren't really - // reusable, anyways - cb(&mut req, status); - // clean up the req (and its data!) after calling the user cb - req.cleanup_and_delete(); +impl rtio::RtioFileStream for FileWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + self.base_read(buf, -1) + } + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + self.base_write(buf, -1) + } + fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result { + self.base_read(buf, offset as i64) + } + fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> { + self.base_write(buf, offset as i64) + } + fn seek(&mut self, pos: i64, whence: io::SeekStyle) -> Result { + use std::libc::{SEEK_SET, SEEK_CUR, SEEK_END}; + let whence = match whence { + io::SeekSet => SEEK_SET, + io::SeekCur => SEEK_CUR, + io::SeekEnd => SEEK_END + }; + self.seek_common(pos, whence) + } + fn tell(&self) -> Result { + use std::libc::SEEK_CUR; + // this is temporary + let self_ = unsafe { cast::transmute_mut(self) }; + self_.seek_common(0, SEEK_CUR) + } + fn fsync(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + FsRequest::fsync(&self.loop_, self.fd).map_err(uv_error_to_io_error) + } + fn datasync(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + FsRequest::datasync(&self.loop_, self.fd).map_err(uv_error_to_io_error) + } + fn truncate(&mut self, offset: i64) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + let r = FsRequest::truncate(&self.loop_, self.fd, offset); + r.map_err(uv_error_to_io_error) + } } #[cfg(test)] mod test { - use super::*; - //use std::rt::test::*; - use std::libc::{STDOUT_FILENO, c_int}; - use std::vec; - use std::str; - use std::unstable::run_in_bare_thread; - use super::super::{Loop, Buf, slice_to_uv_buf}; + use std::libc::c_int; use std::libc::{O_CREAT, O_RDWR, O_RDONLY, S_IWUSR, S_IRUSR}; - - #[test] - fn file_test_full_simple() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let create_flags = O_RDWR | O_CREAT; - let read_flags = O_RDONLY; - // 0644 BZZT! WRONG! 0600! See below. - let mode = S_IWUSR |S_IRUSR; - // these aren't defined in std::libc :( - //map_mode(S_IRGRP) | - //map_mode(S_IROTH); - let path_str = "./tmp/file_full_simple.txt"; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); - let write_buf_ptr: *Buf = &write_buf; - let read_buf_len = 1028; - let read_mem = vec::from_elem(read_buf_len, 0u8); - let read_buf = slice_to_uv_buf(read_mem); - let read_buf_ptr: *Buf = &read_buf; - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path_str.to_c_str(), create_flags as int, - mode as int) |req, uverr| { - assert!(uverr.is_none()); - let fd = req.get_result(); - let buf = unsafe { *write_buf_ptr }; - let write_req = FsRequest::new(); - do write_req.write(&req.get_loop(), fd, buf, -1) |req, uverr| { - let close_req = FsRequest::new(); - do close_req.close(&req.get_loop(), fd) |req, _| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path_str.to_c_str(), - read_flags as int,0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let fd = req.get_result(); - let read_buf = unsafe { *read_buf_ptr }; - let read_req = FsRequest::new(); - do read_req.read(&loop_, fd, read_buf, 0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - // we know nread >=0 because uverr is none.. - let nread = req.get_result() as uint; - // nread == 0 would be EOF - if nread > 0 { - let read_str = unsafe { - let read_buf = *read_buf_ptr; - str::from_utf8( - vec::from_buf( - read_buf.base, nread)) - }; - assert!(read_str == ~"hello"); - let close_req = FsRequest::new(); - do close_req.close(&loop_, fd) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = &req.get_loop(); - let unlink_req = FsRequest::new(); - do unlink_req.unlink(loop_, - &path_str.to_c_str()) - |_,uverr| { - assert!(uverr.is_none()); - }; - }; - }; - }; - }; - }; - }; - }; - loop_.run(); - loop_.close(); - } - } + use std::rt::io; + use std::str; + use std::vec; + use super::*; + use l = super::super::local_loop; #[test] fn file_test_full_simple_sync() { - do run_in_bare_thread { - // setup - let mut loop_ = Loop::new(); - let create_flags = O_RDWR | - O_CREAT; - let read_flags = O_RDONLY; - // 0644 - let mode = S_IWUSR | - S_IRUSR; - //S_IRGRP | - //S_IROTH; - let path_str = "./tmp/file_full_simple_sync.txt"; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); + let create_flags = O_RDWR | O_CREAT; + let read_flags = O_RDONLY; + let mode = S_IWUSR | S_IRUSR; + let path_str = "./tmp/file_full_simple_sync.txt"; + + { // open/create - let open_req = FsRequest::new(); - let result = open_req.open_sync(&loop_, &path_str.to_c_str(), - create_flags as int, mode as int); + let result = FsRequest::open(l(), &path_str.to_c_str(), + create_flags as int, mode as int); assert!(result.is_ok()); - let fd = result.unwrap(); + let result = result.unwrap(); + let fd = result.fd; + // write - let write_req = FsRequest::new(); - let result = write_req.write_sync(&loop_, fd, write_buf, -1); - assert!(result.is_ok()); - // close - let close_req = FsRequest::new(); - let result = close_req.close_sync(&loop_, fd); + let result = FsRequest::write(l(), fd, "hello".as_bytes(), -1); assert!(result.is_ok()); + } + + { // re-open - let open_req = FsRequest::new(); - let result = open_req.open_sync(&loop_, &path_str.to_c_str(), - read_flags as int,0); + let result = FsRequest::open(l(), &path_str.to_c_str(), + read_flags as int, 0); assert!(result.is_ok()); - let len = 1028; - let fd = result.unwrap(); + let result = result.unwrap(); + let fd = result.fd; + // read - let read_mem: ~[u8] = vec::from_elem(len, 0u8); - let buf = slice_to_uv_buf(read_mem); - let read_req = FsRequest::new(); - let result = read_req.read_sync(&loop_, fd, buf, 0); + let mut read_mem = vec::from_elem(1000, 0u8); + let result = FsRequest::read(l(), fd, read_mem, 0); assert!(result.is_ok()); + let nread = result.unwrap(); - // nread == 0 would be EOF.. we know it's >= zero because otherwise - // the above assert would fail - if nread > 0 { - let read_str = str::from_utf8( - read_mem.slice(0, nread as uint)); - assert!(read_str == ~"hello"); - // close - let close_req = FsRequest::new(); - let result = close_req.close_sync(&loop_, fd); - assert!(result.is_ok()); - // unlink - let unlink_req = FsRequest::new(); - let result = unlink_req.unlink_sync(&loop_, &path_str.to_c_str()); - assert!(result.is_ok()); - } else { fail!("nread was 0.. wudn't expectin' that."); } - loop_.close(); + assert!(nread > 0); + let read_str = str::from_utf8(read_mem.slice(0, nread as uint)); + assert_eq!(read_str, ~"hello"); } + // unlink + let result = FsRequest::unlink(l(), &path_str.to_c_str()); + assert!(result.is_ok()); } - fn naive_print(loop_: &Loop, input: &str) { - let write_val = input.as_bytes(); - let write_buf = slice_to_uv_buf(write_val); - let write_req = FsRequest::new(); - write_req.write_sync(loop_, STDOUT_FILENO, write_buf, -1); - } - - #[test] - fn file_test_write_to_stdout() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - naive_print(&loop_, "zanzibar!\n"); - loop_.run(); - loop_.close(); - }; - } #[test] - fn file_test_stat_simple() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/file_test_stat_simple.txt"; - let create_flags = O_RDWR | - O_CREAT; - let mode = S_IWUSR | - S_IRUSR; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); - let write_buf_ptr: *Buf = &write_buf; - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path.to_c_str(), create_flags as int, - mode as int) |req, uverr| { - assert!(uverr.is_none()); - let fd = req.get_result(); - let buf = unsafe { *write_buf_ptr }; - let write_req = FsRequest::new(); - do write_req.write(&req.get_loop(), fd, buf, 0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat = req.get_stat(); - let sz: uint = stat.st_size as uint; - assert!(sz > 0); - let close_req = FsRequest::new(); - do close_req.close(&loop_, fd) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let unlink_req = FsRequest::new(); - do unlink_req.unlink(&loop_, - &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, - &path.to_c_str()) |_, uverr| { - // should cause an error because the - // file doesn't exist anymore - assert!(uverr.is_some()); - }; - }; - }; - }; - }; - }; - loop_.run(); - loop_.close(); - } + fn file_test_stat() { + let path = &"./tmp/file_test_stat_simple".to_c_str(); + let create_flags = (O_RDWR | O_CREAT) as int; + let mode = (S_IWUSR | S_IRUSR) as int; + + let result = FsRequest::open(l(), path, create_flags, mode); + assert!(result.is_ok()); + let file = result.unwrap(); + + let result = FsRequest::write(l(), file.fd, "hello".as_bytes(), 0); + assert!(result.is_ok()); + + let result = FsRequest::stat(l(), path); + assert!(result.is_ok()); + assert_eq!(result.unwrap().size, 5); + + fn free(_: T) {} + free(file); + + let result = FsRequest::unlink(l(), path); + assert!(result.is_ok()); } #[test] fn file_test_mk_rm_dir() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/mk_rm_dir"; - let mode = S_IWUSR | - S_IRUSR; - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), - mode as c_int) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat = req.get_stat(); - naive_print(&loop_, format!("{:?}", stat)); - assert!(stat.is_dir()); - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |_req, uverr| { - assert!(uverr.is_some()); - } - } - } - } - loop_.run(); - loop_.close(); - } + let path = &"./tmp/mk_rm_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; + + let result = FsRequest::mkdir(l(), path, mode); + assert!(result.is_ok()); + + let result = FsRequest::stat(l(), path); + assert!(result.is_ok()); + assert!(result.unwrap().kind == io::TypeDirectory); + + let result = FsRequest::rmdir(l(), path); + assert!(result.is_ok()); + + let result = FsRequest::stat(l(), path); + assert!(result.is_err()); } + #[test] fn file_test_mkdir_chokes_on_double_create() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/double_create_dir"; - let mode = S_IWUSR | - S_IRUSR; - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), mode as c_int) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), - mode as c_int) |req,uverr| { - assert!(uverr.is_some()); - let loop_ = req.get_loop(); - let _stat = req.get_stat(); - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let _loop = req.get_loop(); - } - } - } - loop_.run(); - loop_.close(); - } + let path = &"./tmp/double_create_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; + + let result = FsRequest::stat(l(), path); + assert!(result.is_err(), "{:?}", result); + let result = FsRequest::mkdir(l(), path, mode as c_int); + assert!(result.is_ok(), "{:?}", result); + let result = FsRequest::mkdir(l(), path, mode as c_int); + assert!(result.is_err(), "{:?}", result); + let result = FsRequest::rmdir(l(), path); + assert!(result.is_ok(), "{:?}", result); } + #[test] fn file_test_rmdir_chokes_on_nonexistant_path() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/never_existed_dir"; - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |_req, uverr| { - assert!(uverr.is_some()); - } - loop_.run(); - loop_.close(); - } + let path = &"./tmp/never_existed_dir".to_c_str(); + let result = FsRequest::rmdir(l(), path); + assert!(result.is_err()); } } diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 4f606b5f01f8a..80481498881c4 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -8,130 +8,161 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc::c_int; +use std::cast; +use std::libc::{c_int, c_void}; use uvll; -use super::{Watcher, Loop, NativeHandle, IdleCallback, status_to_maybe_uv_error}; - -pub struct IdleWatcher(*uvll::uv_idle_t); -impl Watcher for IdleWatcher { } +use super::{Loop, UvHandle}; +use std::rt::rtio::{Callback, PausibleIdleCallback}; + +pub struct IdleWatcher { + handle: *uvll::uv_idle_t, + idle_flag: bool, + closed: bool, + callback: ~Callback, +} impl IdleWatcher { - pub fn new(loop_: &mut Loop) -> IdleWatcher { + pub fn new(loop_: &mut Loop, cb: ~Callback) -> ~IdleWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_IDLE); + assert_eq!(unsafe { + uvll::uv_idle_init(loop_.handle, handle) + }, 0); + let me = ~IdleWatcher { + handle: handle, + idle_flag: false, + closed: false, + callback: cb, + }; + return me.install(); + } + + pub fn onetime(loop_: &mut Loop, f: proc()) { + let handle = UvHandle::alloc(None::, uvll::UV_IDLE); unsafe { - let handle = uvll::malloc_handle(uvll::UV_IDLE); - assert!(handle.is_not_null()); - assert_eq!(uvll::idle_init(loop_.native_handle(), handle), 0); - let mut watcher: IdleWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher + assert_eq!(uvll::uv_idle_init(loop_.handle, handle), 0); + let data: *c_void = cast::transmute(~f); + uvll::set_data_for_uv_handle(handle, data); + assert_eq!(uvll::uv_idle_start(handle, onetime_cb), 0) } - } - pub fn start(&mut self, cb: IdleCallback) { - { - let data = self.get_watcher_data(); - data.idle_cb = Some(cb); + extern fn onetime_cb(handle: *uvll::uv_idle_t, status: c_int) { + assert_eq!(status, 0); + unsafe { + let data = uvll::get_data_for_uv_handle(handle); + let f: ~proc() = cast::transmute(data); + (*f)(); + uvll::uv_idle_stop(handle); + uvll::uv_close(handle, close_cb); + } } - unsafe { - assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } } } +} - pub fn restart(&mut self) { - unsafe { - assert!(self.get_watcher_data().idle_cb.is_some()); - assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) +impl PausibleIdleCallback for IdleWatcher { + fn pause(&mut self) { + if self.idle_flag == true { + assert_eq!(unsafe {uvll::uv_idle_stop(self.handle) }, 0); + self.idle_flag = false; } } - - pub fn stop(&mut self) { - // NB: Not resetting the Rust idle_cb to None here because `stop` is - // likely called from *within* the idle callback, causing a use after - // free - - unsafe { - assert_eq!(uvll::idle_stop(self.native_handle()), 0); + fn resume(&mut self) { + if self.idle_flag == false { + assert_eq!(unsafe { uvll::uv_idle_start(self.handle, idle_cb) }, 0) + self.idle_flag = true; } } } -impl NativeHandle<*uvll::uv_idle_t> for IdleWatcher { - fn from_native_handle(handle: *uvll::uv_idle_t) -> IdleWatcher { - IdleWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_idle_t { - match self { &IdleWatcher(ptr) => ptr } - } +impl UvHandle for IdleWatcher { + fn uv_handle(&self) -> *uvll::uv_idle_t { self.handle } } extern fn idle_cb(handle: *uvll::uv_idle_t, status: c_int) { - let mut idle_watcher: IdleWatcher = NativeHandle::from_native_handle(handle); - let data = idle_watcher.get_watcher_data(); - let cb: &IdleCallback = data.idle_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(idle_watcher, status); + assert_eq!(status, 0); + let idle: &mut IdleWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + idle.callback.call(); +} + +impl Drop for IdleWatcher { + fn drop(&mut self) { + self.pause(); + self.close_async_(); + } } #[cfg(test)] mod test { - - use Loop; use super::*; - use std::unstable::run_in_bare_thread; + use std::rt::tube::Tube; + use std::rt::rtio::{Callback, PausibleIdleCallback}; + use super::super::local_loop; + + struct MyCallback(Tube, int); + impl Callback for MyCallback { + fn call(&mut self) { + match *self { + MyCallback(ref mut tube, val) => tube.send(val) + } + } + } #[test] - #[ignore(reason = "valgrind - loop destroyed before watcher?")] - fn idle_new_then_close() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let idle_watcher = { IdleWatcher::new(&mut loop_) }; - idle_watcher.close(||()); - } + fn not_used() { + let cb = ~MyCallback(Tube::new(), 1); + let _idle = IdleWatcher::new(local_loop(), cb as ~Callback); } #[test] - fn idle_smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - let mut count = 10; - let count_ptr: *mut int = &mut count; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - if unsafe { *count_ptr == 10 } { - idle_watcher.stop(); - idle_watcher.close(||()); - } else { - unsafe { *count_ptr = *count_ptr + 1; } - } - } - loop_.run(); - loop_.close(); - assert_eq!(count, 10); - } + fn smoke_test() { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + tube.recv(); + } + + #[test] #[should_fail] + fn smoke_fail() { + let tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + fail!(); } #[test] - fn idle_start_stop_start() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - idle_watcher.stop(); - do idle_watcher.start |idle_watcher, status| { - assert!(status.is_none()); - let mut idle_watcher = idle_watcher; - idle_watcher.stop(); - idle_watcher.close(||()); - } - } - loop_.run(); - loop_.close(); - } + fn fun_combinations_of_methods() { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + tube.recv(); + idle.pause(); + idle.resume(); + idle.resume(); + tube.recv(); + idle.pause(); + idle.pause(); + idle.resume(); + tube.recv(); + } + + #[test] + fn pause_pauses() { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle1 = IdleWatcher::new(local_loop(), cb as ~Callback); + let cb = ~MyCallback(tube.clone(), 2); + let mut idle2 = IdleWatcher::new(local_loop(), cb as ~Callback); + idle2.resume(); + assert_eq!(tube.recv(), 2); + idle2.pause(); + idle1.resume(); + assert_eq!(tube.recv(), 1); } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index f0a607ae35f1c..edb1953b9b1c3 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -45,29 +45,31 @@ via `close` and `delete` methods. #[feature(macro_rules, globs)]; -use std::str::raw::from_c_str; -use std::vec; -use std::ptr; -use std::str; -use std::libc::{c_void, c_int, size_t, malloc, free}; use std::cast::transmute; +use std::cast; +use std::libc::{c_int, malloc}; use std::ptr::null; +use std::ptr; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; +use std::str::raw::from_c_str; +use std::str; +use std::task; use std::unstable::finally::Finally; -use std::rt::io::net::ip::SocketAddr; -use std::rt::io::signal::Signum; +use std::vec; use std::rt::io::IoError; -//#[cfg(test)] use unstable::run_in_bare_thread; - -pub use self::file::{FsRequest}; -pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher}; -pub use self::idle::IdleWatcher; -pub use self::timer::TimerWatcher; pub use self::async::AsyncWatcher; +pub use self::file::{FsRequest, FileWatcher}; +pub use self::idle::IdleWatcher; +pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher}; +pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor}; pub use self::process::Process; -pub use self::pipe::Pipe; pub use self::signal::SignalWatcher; +pub use self::timer::TimerWatcher; +pub use self::tty::TtyWatcher; mod macros; @@ -87,177 +89,194 @@ pub mod process; pub mod pipe; pub mod tty; pub mod signal; +pub mod stream; -/// XXX: Loop(*handle) is buggy with destructors. Normal structs -/// with dtors may not be destructured, but tuple structs can, -/// but the results are not correct. -pub struct Loop { - priv handle: *uvll::uv_loop_t -} +/// A type that wraps a uv handle +pub trait UvHandle { + fn uv_handle(&self) -> *T; -pub struct Handle(*uvll::uv_handle_t); + // FIXME(#8888) dummy self + fn alloc(_: Option, ty: uvll::uv_handle_type) -> *T { + unsafe { + let handle = uvll::malloc_handle(ty); + assert!(!handle.is_null()); + handle as *T + } + } -impl Watcher for Handle {} -impl NativeHandle<*uvll::uv_handle_t> for Handle { - fn from_native_handle(h: *uvll::uv_handle_t) -> Handle { Handle(h) } - fn native_handle(&self) -> *uvll::uv_handle_t { **self } -} + unsafe fn from_uv_handle<'a>(h: &'a *T) -> &'a mut Self { + cast::transmute(uvll::get_data_for_uv_handle(*h)) + } -/// The trait implemented by uv 'watchers' (handles). Watchers are -/// non-owning wrappers around the uv handles and are not completely -/// safe - there may be multiple instances for a single underlying -/// handle. Watchers are generally created, then `start`ed, `stop`ed -/// and `close`ed, but due to their complex life cycle may not be -/// entirely memory safe if used in unanticipated patterns. -pub trait Watcher { } + fn install(~self) -> ~Self { + unsafe { + let myptr = cast::transmute::<&~Self, &*u8>(&self); + uvll::set_data_for_uv_handle(self.uv_handle(), *myptr); + } + self + } -pub trait Request { } + fn close_async_(&mut self) { + // we used malloc to allocate all handles, so we must always have at + // least a callback to free all the handles we allocated. + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } + } -/// A type that wraps a native handle -pub trait NativeHandle { - fn from_native_handle(T) -> Self; - fn native_handle(&self) -> T; -} + unsafe { + uvll::set_data_for_uv_handle(self.uv_handle(), null::<()>()); + uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb) + } + } -impl Loop { - pub fn new() -> Loop { - let handle = unsafe { uvll::loop_new() }; - assert!(handle.is_not_null()); - NativeHandle::from_native_handle(handle) + fn close(&mut self) { + let mut slot = None; + + unsafe { + uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb); + uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null::<()>()); + + do wait_until_woken_after(&mut slot) { + uvll::set_data_for_uv_handle(self.uv_handle(), &slot); + } + } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { + let data = uvll::get_data_for_uv_handle(handle); + uvll::free_handle(handle); + if data == ptr::null() { return } + let slot: &mut Option = cast::transmute(data); + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(slot.take_unwrap()); + } + } } +} - pub fn run(&mut self) { - unsafe { uvll::run(self.native_handle()) }; +pub struct ForbidSwitch { + msg: &'static str, + sched: uint, +} + +impl ForbidSwitch { + fn new(s: &'static str) -> ForbidSwitch { + ForbidSwitch { + msg: s, sched: Local::borrow(|s: &mut Scheduler| s.sched_id()) + } } +} - pub fn close(&mut self) { - unsafe { uvll::loop_delete(self.native_handle()) }; +impl Drop for ForbidSwitch { + fn drop(&mut self) { + assert!(self.sched == Local::borrow(|s: &mut Scheduler| s.sched_id()), + "didnt want a scheduler switch: {}", self.msg); } } -impl NativeHandle<*uvll::uv_loop_t> for Loop { - fn from_native_handle(handle: *uvll::uv_loop_t) -> Loop { - Loop { handle: handle } +pub struct ForbidUnwind { + msg: &'static str, + failing_before: bool, +} + +impl ForbidUnwind { + fn new(s: &'static str) -> ForbidUnwind { + ForbidUnwind { + msg: s, failing_before: task::failing(), + } } - fn native_handle(&self) -> *uvll::uv_loop_t { - self.handle +} + +impl Drop for ForbidUnwind { + fn drop(&mut self) { + assert!(self.failing_before == task::failing(), + "didnt want an unwind during: {}", self.msg); } } -// XXX: The uv alloc callback also has a *uv_handle_t arg -pub type AllocCallback = ~fn(uint) -> Buf; -pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); -pub type NullCallback = ~fn(); -pub type IdleCallback = ~fn(IdleWatcher, Option); -pub type ConnectionCallback = ~fn(StreamWatcher, Option); -pub type FsCallback = ~fn(&mut FsRequest, Option); -// first int is exit_status, second is term_signal -pub type ExitCallback = ~fn(Process, int, int, Option); -pub type TimerCallback = ~fn(TimerWatcher, Option); -pub type AsyncCallback = ~fn(AsyncWatcher, Option); -pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); -pub type UdpSendCallback = ~fn(UdpWatcher, Option); -pub type SignalCallback = ~fn(SignalWatcher, Signum); - - -/// Callbacks used by StreamWatchers, set as custom data on the foreign handle. -/// XXX: Would be better not to have all watchers allocate room for all callback types. -struct WatcherData { - read_cb: Option, - write_cb: Option, - connect_cb: Option, - close_cb: Option, - alloc_cb: Option, - idle_cb: Option, - timer_cb: Option, - async_cb: Option, - udp_recv_cb: Option, - udp_send_cb: Option, - exit_cb: Option, - signal_cb: Option, +fn wait_until_woken_after(slot: *mut Option, f: &fn()) { + let _f = ForbidUnwind::new("wait_until_woken_after"); + unsafe { + assert!((*slot).is_none()); + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + f(); + *slot = Some(task); + } + } } -pub trait WatcherInterop { - fn event_loop(&self) -> Loop; - fn install_watcher_data(&mut self); - fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData; - fn drop_watcher_data(&mut self); - fn close(self, cb: NullCallback); - fn close_async(self); +pub struct Request { + handle: *uvll::uv_req_t, + priv defused: bool, } -impl> WatcherInterop for W { - /// Get the uv event loop from a Watcher - fn event_loop(&self) -> Loop { +impl Request { + pub fn new(ty: uvll::uv_req_type) -> Request { unsafe { - let handle = self.native_handle(); - let loop_ = uvll::get_loop_for_uv_handle(handle); - NativeHandle::from_native_handle(loop_) + let handle = uvll::malloc_req(ty); + uvll::set_data_for_req(handle, null::<()>()); + Request::wrap(handle) } } - fn install_watcher_data(&mut self) { - unsafe { - let data = ~WatcherData { - read_cb: None, - write_cb: None, - connect_cb: None, - close_cb: None, - alloc_cb: None, - idle_cb: None, - timer_cb: None, - async_cb: None, - udp_recv_cb: None, - udp_send_cb: None, - exit_cb: None, - signal_cb: None, - }; - let data = transmute::<~WatcherData, *c_void>(data); - uvll::set_data_for_uv_handle(self.native_handle(), data); - } + pub fn wrap(handle: *uvll::uv_req_t) -> Request { + Request { handle: handle, defused: false } } - fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData { - unsafe { - let data = uvll::get_data_for_uv_handle(self.native_handle()); - let data = transmute::<&*c_void, &mut ~WatcherData>(&data); - return &mut **data; - } + pub fn set_data(&self, t: *T) { + unsafe { uvll::set_data_for_req(self.handle, t) } } - fn drop_watcher_data(&mut self) { - unsafe { - let data = uvll::get_data_for_uv_handle(self.native_handle()); - let _data = transmute::<*c_void, ~WatcherData>(data); - uvll::set_data_for_uv_handle(self.native_handle(), null::<()>()); - } + pub unsafe fn get_data(&self) -> &'static mut T { + let data = uvll::get_data_for_req(self.handle); + assert!(data != null()); + cast::transmute(data) + } + + // This function should be used when the request handle has been given to an + // underlying uv function, and the uv function has succeeded. This means + // that uv will at some point invoke the callback, and in the meantime we + // can't deallocate the handle because libuv could be using it. + // + // This is still a problem in blocking situations due to linked failure. In + // the connection callback the handle should be re-wrapped with the `wrap` + // function to ensure its destruction. + pub fn defuse(&mut self) { + self.defused = true; } +} - fn close(mut self, cb: NullCallback) { - { - let data = self.get_watcher_data(); - assert!(data.close_cb.is_none()); - data.close_cb = Some(cb); +impl Drop for Request { + fn drop(&mut self) { + if !self.defused { + unsafe { uvll::free_req(self.handle) } } + } +} - unsafe { uvll::close(self.native_handle(), close_cb); } +/// XXX: Loop(*handle) is buggy with destructors. Normal structs +/// with dtors may not be destructured, but tuple structs can, +/// but the results are not correct. +pub struct Loop { + priv handle: *uvll::uv_loop_t +} - extern fn close_cb(handle: *uvll::uv_handle_t) { - let mut h: Handle = NativeHandle::from_native_handle(handle); - h.get_watcher_data().close_cb.take_unwrap()(); - h.drop_watcher_data(); - unsafe { uvll::free_handle(handle as *c_void) } - } +impl Loop { + pub fn new() -> Loop { + let handle = unsafe { uvll::loop_new() }; + assert!(handle.is_not_null()); + Loop::wrap(handle) } - fn close_async(self) { - unsafe { uvll::close(self.native_handle(), close_cb); } + pub fn wrap(handle: *uvll::uv_loop_t) -> Loop { Loop { handle: handle } } - extern fn close_cb(handle: *uvll::uv_handle_t) { - let mut h: Handle = NativeHandle::from_native_handle(handle); - h.drop_watcher_data(); - unsafe { uvll::free_handle(handle as *c_void) } - } + pub fn run(&mut self) { + unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }; + } + + pub fn close(&mut self) { + unsafe { uvll::uv_loop_delete(self.handle) }; } } @@ -270,7 +289,7 @@ impl UvError { pub fn name(&self) -> ~str { unsafe { let inner = match self { &UvError(a) => a }; - let name_str = uvll::err_name(inner); + let name_str = uvll::uv_err_name(inner); assert!(name_str.is_not_null()); from_c_str(name_str) } @@ -279,7 +298,7 @@ impl UvError { pub fn desc(&self) -> ~str { unsafe { let inner = match self { &UvError(a) => a }; - let desc_str = uvll::strerror(inner); + let desc_str = uvll::uv_strerror(inner); assert!(desc_str.is_not_null()); from_c_str(desc_str) } @@ -309,7 +328,7 @@ pub fn uv_error_to_io_error(uverr: UvError) -> IoError { use std::rt::io::*; // uv error descriptions are static - let c_desc = uvll::strerror(*uverr); + let c_desc = uvll::uv_strerror(*uverr); let desc = str::raw::c_str_to_static_slice(c_desc); let kind = match *uverr { @@ -337,9 +356,8 @@ pub fn uv_error_to_io_error(uverr: UvError) -> IoError { } } -/// Given a uv handle, convert a callback status to a UvError -pub fn status_to_maybe_uv_error(status: c_int) -> Option -{ +/// Given a uv error code, convert a callback status to a UvError +pub fn status_to_maybe_uv_error(status: c_int) -> Option { if status >= 0 { None } else { @@ -347,6 +365,10 @@ pub fn status_to_maybe_uv_error(status: c_int) -> Option } } +pub fn status_to_io_result(status: c_int) -> Result<(), IoError> { + if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))} +} + /// The uv buffer type pub type Buf = uvll::uv_buf_t; @@ -360,65 +382,56 @@ pub fn empty_buf() -> Buf { /// Borrow a slice to a Buf pub fn slice_to_uv_buf(v: &[u8]) -> Buf { let data = vec::raw::to_ptr(v); - unsafe { uvll::buf_init(data, v.len()) } + uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } -// XXX: Do these conversions without copying - -/// Transmute an owned vector to a Buf -pub fn vec_to_uv_buf(v: ~[u8]) -> Buf { - #[fixed_stack_segment]; #[inline(never)]; - +#[cfg(test)] +fn local_loop() -> &'static mut Loop { unsafe { - let data = malloc(v.len() as size_t) as *u8; - assert!(data.is_not_null()); - do v.as_imm_buf |b, l| { - let data = data as *mut u8; - ptr::copy_memory(data, b, l) - } - uvll::buf_init(data, v.len()) + cast::transmute(do Local::borrow |sched: &mut Scheduler| { + let mut io = None; + do sched.event_loop.io |i| { + let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) = + cast::transmute(i); + io = Some(uvio); + } + io.unwrap() + }.uv_loop()) } } -/// Transmute a Buf that was once a ~[u8] back to ~[u8] -pub fn vec_from_uv_buf(buf: Buf) -> Option<~[u8]> { - #[fixed_stack_segment]; #[inline(never)]; +#[cfg(test)] +mod test { + use std::cast::transmute; + use std::ptr; + use std::unstable::run_in_bare_thread; - if !(buf.len == 0 && buf.base.is_null()) { - let v = unsafe { vec::from_buf(buf.base, buf.len as uint) }; - unsafe { free(buf.base as *c_void) }; - return Some(v); - } else { - // No buffer - uvdebug!("No buffer!"); - return None; - } -} -/* -#[test] -fn test_slice_to_uv_buf() { - let slice = [0, .. 20]; - let buf = slice_to_uv_buf(slice); + use super::{slice_to_uv_buf, Loop}; - assert!(buf.len == 20); + #[test] + fn test_slice_to_uv_buf() { + let slice = [0, .. 20]; + let buf = slice_to_uv_buf(slice); - unsafe { - let base = transmute::<*u8, *mut u8>(buf.base); - (*base) = 1; - (*ptr::mut_offset(base, 1)) = 2; - } + assert_eq!(buf.len, 20); - assert!(slice[0] == 1); - assert!(slice[1] == 2); -} + unsafe { + let base = transmute::<*u8, *mut u8>(buf.base); + (*base) = 1; + (*ptr::mut_offset(base, 1)) = 2; + } + assert!(slice[0] == 1); + assert!(slice[1] == 2); + } -#[test] -fn loop_smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - loop_.run(); - loop_.close(); + + #[test] + fn loop_smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + loop_.run(); + loop_.close(); + } } } -*/ diff --git a/src/librustuv/macros.rs b/src/librustuv/macros.rs index cbbed316d83df..90b8263da79fd 100644 --- a/src/librustuv/macros.rs +++ b/src/librustuv/macros.rs @@ -27,6 +27,11 @@ macro_rules! uvdebug ( }) ) +// get a handle for the current scheduler +macro_rules! get_handle_to_current_scheduler( + () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() }) +) + pub fn dumb_println(args: &fmt::Arguments) { use std::rt::io::native::stdio::stderr; use std::rt::io::Writer; diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 0aaa931c9475e..32c9b6c3d1729 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -8,91 +8,82 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc::{size_t, ssize_t, c_int, c_void, c_uint}; -use std::vec; +use std::cast; +use std::libc; +use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; +use std::ptr; +use std::rt::BlockedTask; +use std::rt::io::IoError; +use std::rt::io::net::ip::{Ipv4Addr, Ipv6Addr, SocketAddr, IpAddr}; +use std::rt::local::Local; +use std::rt::rtio; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::tube::Tube; use std::str; -use std::rt::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; +use std::task; +use std::vec; +use stream::StreamWatcher; +use super::{Loop, Request, UvError, Buf, status_to_io_result, + uv_error_to_io_error, UvHandle, slice_to_uv_buf, + wait_until_woken_after}; +use uvio::HomingIO; use uvll; -use uvll::*; -use super::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, - UdpSendCallback, Loop, Watcher, Request, UvError, Buf, NativeHandle, - status_to_maybe_uv_error, empty_buf}; - -pub struct UvAddrInfo(*uvll::addrinfo); - -pub enum UvSocketAddr { - UvIpv4SocketAddr(*sockaddr_in), - UvIpv6SocketAddr(*sockaddr_in6), -} +use uvll::sockaddr; -pub fn sockaddr_to_UvSocketAddr(addr: *uvll::sockaddr) -> UvSocketAddr { - unsafe { - assert!((is_ip4_addr(addr) || is_ip6_addr(addr))); - assert!(!(is_ip4_addr(addr) && is_ip6_addr(addr))); - match addr { - _ if is_ip4_addr(addr) => UvIpv4SocketAddr(addr as *uvll::sockaddr_in), - _ if is_ip6_addr(addr) => UvIpv6SocketAddr(addr as *uvll::sockaddr_in6), - _ => fail!(), - } - } -} +//////////////////////////////////////////////////////////////////////////////// +/// Generic functions related to dealing with sockaddr things +//////////////////////////////////////////////////////////////////////////////// -fn socket_addr_as_uv_socket_addr(addr: SocketAddr, f: &fn(UvSocketAddr) -> T) -> T { +#[fixed_stack_segment] +fn socket_addr_as_sockaddr(addr: SocketAddr, f: &fn(*sockaddr) -> T) -> T { let malloc = match addr.ip { - Ipv4Addr(*) => malloc_ip4_addr, - Ipv6Addr(*) => malloc_ip6_addr, - }; - let wrap = match addr.ip { - Ipv4Addr(*) => UvIpv4SocketAddr, - Ipv6Addr(*) => UvIpv6SocketAddr, - }; - let free = match addr.ip { - Ipv4Addr(*) => free_ip4_addr, - Ipv6Addr(*) => free_ip6_addr, + Ipv4Addr(*) => uvll::rust_malloc_ip4_addr, + Ipv6Addr(*) => uvll::rust_malloc_ip6_addr, }; - let addr = unsafe { malloc(addr.ip.to_str(), addr.port as int) }; + let ip = addr.ip.to_str(); + let addr = ip.with_c_str(|p| unsafe { malloc(p, addr.port as c_int) }); do (|| { - f(wrap(addr)) + f(addr) }).finally { - unsafe { free(addr) }; + unsafe { libc::free(addr) }; } } -fn uv_socket_addr_as_socket_addr(addr: UvSocketAddr, f: &fn(SocketAddr) -> T) -> T { - let ip_size = match addr { - UvIpv4SocketAddr(*) => 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/, - UvIpv6SocketAddr(*) => 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/, - }; - let ip_name = { - let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); - unsafe { +#[fixed_stack_segment] +pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr { + unsafe { + let ip_size = if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/ + } else if uvll::rust_is_ipv6_sockaddr(addr) == 1 { + 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/ + } else { + fail!("unknown address?"); + }; + let ip_name = { + let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); let buf_ptr = vec::raw::to_ptr(buf); - match addr { - UvIpv4SocketAddr(addr) => uvll::ip4_name(addr, buf_ptr, ip_size as size_t), - UvIpv6SocketAddr(addr) => uvll::ip6_name(addr, buf_ptr, ip_size as size_t), + if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + uvll::uv_ip4_name(addr, buf_ptr as *c_char, ip_size as size_t); + } else { + uvll::uv_ip6_name(addr, buf_ptr as *c_char, ip_size as size_t); } + buf }; - buf - }; - let ip_port = unsafe { - let port = match addr { - UvIpv4SocketAddr(addr) => uvll::ip4_port(addr), - UvIpv6SocketAddr(addr) => uvll::ip6_port(addr), + let ip_port = { + let port = if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + uvll::rust_ip4_port(addr) + } else { + uvll::rust_ip6_port(addr) + }; + port as u16 }; - port as u16 - }; - let ip_str = str::from_utf8_slice(ip_name).trim_right_chars(&'\x00'); - let ip_addr = FromStr::from_str(ip_str).unwrap(); - - // finally run the closure - f(SocketAddr { ip: ip_addr, port: ip_port }) -} + let ip_str = str::from_utf8_slice(ip_name).trim_right_chars(&'\x00'); + let ip_addr = FromStr::from_str(ip_str).unwrap(); -pub fn uv_socket_addr_to_socket_addr(addr: UvSocketAddr) -> SocketAddr { - use std::util; - uv_socket_addr_as_socket_addr(addr, util::id) + SocketAddr { ip: ip_addr, port: ip_port } + } } #[cfg(test)] @@ -100,7 +91,9 @@ pub fn uv_socket_addr_to_socket_addr(addr: UvSocketAddr) -> SocketAddr { fn test_ip4_conversion() { use std::rt; let ip4 = rt::test::next_test_ip4(); - assert_eq!(ip4, socket_addr_as_uv_socket_addr(ip4, uv_socket_addr_to_socket_addr)); + do socket_addr_as_sockaddr(ip4) |addr| { + assert_eq!(ip4, sockaddr_to_socket_addr(addr)); + } } #[cfg(test)] @@ -108,744 +101,1187 @@ fn test_ip4_conversion() { fn test_ip6_conversion() { use std::rt; let ip6 = rt::test::next_test_ip6(); - assert_eq!(ip6, socket_addr_as_uv_socket_addr(ip6, uv_socket_addr_to_socket_addr)); -} - -// uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t -// and uv_file_t -pub struct StreamWatcher(*uvll::uv_stream_t); -impl Watcher for StreamWatcher { } - -impl StreamWatcher { - pub fn read_start(&mut self, alloc: AllocCallback, cb: ReadCallback) { - unsafe { - match uvll::read_start(self.native_handle(), alloc_cb, read_cb) { - 0 => { - let data = self.get_watcher_data(); - data.alloc_cb = Some(alloc); - data.read_cb = Some(cb); - } - n => { - cb(*self, 0, empty_buf(), Some(UvError(n))) - } - } - } + do socket_addr_as_sockaddr(ip6) |addr| { + assert_eq!(ip6, sockaddr_to_socket_addr(addr)); + } +} - extern fn alloc_cb(stream: *uvll::uv_stream_t, suggested_size: size_t) -> Buf { - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let alloc_cb = stream_watcher.get_watcher_data().alloc_cb.get_ref(); - return (*alloc_cb)(suggested_size as uint); - } +enum SocketNameKind { + TcpPeer, + Tcp, + Udp +} - extern fn read_cb(stream: *uvll::uv_stream_t, nread: ssize_t, buf: Buf) { - uvdebug!("buf addr: {}", buf.base); - uvdebug!("buf len: {}", buf.len); - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let cb = stream_watcher.get_watcher_data().read_cb.get_ref(); - let status = status_to_maybe_uv_error(nread as c_int); - (*cb)(stream_watcher, nread as int, buf, status); - } +#[fixed_stack_segment] +fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result { + unsafe { + let getsockname = match sk { + TcpPeer => uvll::uv_tcp_getpeername, + Tcp => uvll::uv_tcp_getsockname, + Udp => uvll::uv_udp_getsockname, + }; + + // Allocate a sockaddr_storage + // since we don't know if it's ipv4 or ipv6 + let size = uvll::rust_sockaddr_size(); + let name = libc::malloc(size as size_t); + assert!(!name.is_null()); + let mut namelen = size; + + let ret = match getsockname(handle, name, &mut namelen) { + 0 => Ok(sockaddr_to_socket_addr(name)), + n => Err(uv_error_to_io_error(UvError(n))) + }; + libc::free(name); + ret } +} + +//////////////////////////////////////////////////////////////////////////////// +/// TCP implementation +//////////////////////////////////////////////////////////////////////////////// - pub fn read_stop(&mut self) { - // It would be nice to drop the alloc and read callbacks here, - // but read_stop may be called from inside one of them and we - // would end up freeing the in-use environment - let handle = self.native_handle(); - unsafe { assert_eq!(uvll::read_stop(handle), 0); } +pub struct TcpWatcher { + handle: *uvll::uv_tcp_t, + stream: StreamWatcher, + home: SchedHandle, +} + +pub struct TcpListener { + home: SchedHandle, + handle: *uvll::uv_pipe_t, + priv closing_task: Option, + priv outgoing: Tube>, +} + +pub struct TcpAcceptor { + listener: ~TcpListener, + priv incoming: Tube>, +} + +// TCP watchers (clients/streams) + +impl TcpWatcher { + pub fn new(loop_: &Loop) -> TcpWatcher { + let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; + assert_eq!(unsafe { + uvll::uv_tcp_init(loop_.handle, handle) + }, 0); + TcpWatcher { + home: get_handle_to_current_scheduler!(), + handle: handle, + stream: StreamWatcher::new(handle), + } } - pub fn write(&mut self, buf: Buf, cb: ConnectionCallback) { - let req = WriteRequest::new(); - return unsafe { - match uvll::write(req.native_handle(), self.native_handle(), - [buf], write_cb) { - 0 => { - let data = self.get_watcher_data(); - assert!(data.write_cb.is_none()); - data.write_cb = Some(cb); - } - n => { - req.delete(); - cb(*self, Some(UvError(n))) + pub fn connect(loop_: &mut Loop, address: SocketAddr) + -> Result + { + struct Ctx { status: c_int, task: Option } + + return do task::unkillable { + let tcp = TcpWatcher::new(loop_); + let ret = do socket_addr_as_sockaddr(address) |addr| { + let mut req = Request::new(uvll::UV_CONNECT); + let result = unsafe { + uvll::uv_tcp_connect(req.handle, tcp.handle, addr, + connect_cb) + }; + match result { + 0 => { + req.defuse(); // uv callback now owns this request + let mut cx = Ctx { status: 0, task: None }; + do wait_until_woken_after(&mut cx.task) { + req.set_data(&cx); + } + match cx.status { + 0 => Ok(()), + n => Err(UvError(n)), + } + } + n => Err(UvError(n)) } + }; + + match ret { + Ok(()) => Ok(tcp), + Err(e) => Err(e), } }; - extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { - let write_request: WriteRequest = NativeHandle::from_native_handle(req); - let mut stream_watcher = write_request.stream(); - write_request.delete(); - let cb = stream_watcher.get_watcher_data().write_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); + extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { + let req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; + cx.status = status; + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(cx.task.take_unwrap()); } } +} + +impl HomingIO for TcpWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl rtio::RtioSocket for TcpWatcher { + fn socket_name(&mut self) -> Result { + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.handle) + } +} +impl rtio::RtioTcpStream for TcpWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_homing_missile(); + self.stream.read(buf).map_err(uv_error_to_io_error) + } - pub fn listen(&mut self, cb: ConnectionCallback) -> Result<(), UvError> { - { - let data = self.get_watcher_data(); - assert!(data.connect_cb.is_none()); - data.connect_cb = Some(cb); - } + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + self.stream.write(buf).map_err(uv_error_to_io_error) + } - return unsafe { - static BACKLOG: c_int = 128; // XXX should be configurable - match uvll::listen(self.native_handle(), BACKLOG, connection_cb) { - 0 => Ok(()), - n => Err(UvError(n)) - } - }; + fn peer_name(&mut self) -> Result { + let _m = self.fire_homing_missile(); + socket_name(TcpPeer, self.handle) + } - extern fn connection_cb(handle: *uvll::uv_stream_t, status: c_int) { - uvdebug!("connection_cb"); - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle); - let cb = stream_watcher.get_watcher_data().connect_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(stream_watcher, status); - } + fn control_congestion(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.handle, 0 as c_int) + }) } - pub fn accept(&mut self, stream: StreamWatcher) { - let self_handle = self.native_handle() as *c_void; - let stream_handle = stream.native_handle() as *c_void; - assert_eq!(0, unsafe { uvll::accept(self_handle, stream_handle) } ); + fn nodelay(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.handle, 1 as c_int) + }) } -} -impl NativeHandle<*uvll::uv_stream_t> for StreamWatcher { - fn from_native_handle(handle: *uvll::uv_stream_t) -> StreamWatcher { - StreamWatcher(handle) + fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.handle, 1 as c_int, + delay_in_seconds as c_uint) + }) } - fn native_handle(&self) -> *uvll::uv_stream_t { - match self { &StreamWatcher(ptr) => ptr } + + fn letdie(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.handle, 0 as c_int, 0 as c_uint) + }) } } -pub struct TcpWatcher(*uvll::uv_tcp_t); -impl Watcher for TcpWatcher { } +impl UvHandle for TcpWatcher { + fn uv_handle(&self) -> *uvll::uv_tcp_t { self.stream.handle } +} -impl TcpWatcher { - pub fn new(loop_: &Loop) -> TcpWatcher { - unsafe { - let handle = malloc_handle(UV_TCP); - assert!(handle.is_not_null()); - assert_eq!(0, uvll::tcp_init(loop_.native_handle(), handle)); - let mut watcher: TcpWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } - } - - pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_bind(self.native_handle(), addr), - UvIpv6SocketAddr(addr) => uvll::tcp_bind6(self.native_handle(), addr), - } +impl Drop for TcpWatcher { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + self.close(); + } +} + +// TCP listeners (unbound servers) + +impl TcpListener { + pub fn bind(loop_: &mut Loop, address: SocketAddr) + -> Result<~TcpListener, UvError> + { + do task::unkillable { + let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; + assert_eq!(unsafe { + uvll::uv_tcp_init(loop_.handle, handle) + }, 0); + let l = ~TcpListener { + home: get_handle_to_current_scheduler!(), + handle: handle, + closing_task: None, + outgoing: Tube::new(), }; - match result { - 0 => Ok(()), - _ => Err(UvError(result)), + let res = socket_addr_as_sockaddr(address, |addr| unsafe { + uvll::uv_tcp_bind(l.handle, addr) + }); + match res { + 0 => Ok(l.install()), + n => Err(UvError(n)) } } } +} - pub fn connect(&mut self, address: SocketAddr, cb: ConnectionCallback) { - unsafe { - assert!(self.get_watcher_data().connect_cb.is_none()); - self.get_watcher_data().connect_cb = Some(cb); +impl HomingIO for TcpListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} - let connect_handle = ConnectRequest::new().native_handle(); - uvdebug!("connect_t: {}", connect_handle); - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_connect(connect_handle, - self.native_handle(), addr, connect_cb), - UvIpv6SocketAddr(addr) => uvll::tcp_connect6(connect_handle, - self.native_handle(), addr, connect_cb), - }; - assert_eq!(0, result); - } +impl UvHandle for TcpListener { + fn uv_handle(&self) -> *uvll::uv_tcp_t { self.handle } +} - extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { - uvdebug!("connect_t: {}", req); - let connect_request: ConnectRequest = NativeHandle::from_native_handle(req); - let mut stream_watcher = connect_request.stream(); - connect_request.delete(); - let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); - } +impl rtio::RtioSocket for TcpListener { + fn socket_name(&mut self) -> Result { + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.handle) + } +} + +impl rtio::RtioTcpListener for TcpListener { + fn listen(mut ~self) -> Result<~rtio::RtioTcpAcceptor, IoError> { + // create the acceptor object from ourselves + let incoming = self.outgoing.clone(); + let mut acceptor = ~TcpAcceptor { + listener: self, + incoming: incoming, + }; + + let _m = acceptor.fire_homing_missile(); + // XXX: the 128 backlog should be configurable + match unsafe { uvll::uv_listen(acceptor.listener.handle, 128, listen_cb) } { + 0 => Ok(acceptor as ~rtio::RtioTcpAcceptor), + n => Err(uv_error_to_io_error(UvError(n))), + } + } +} + +extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { + assert!(status != uvll::ECANCELED); + let msg = match status { + 0 => { + let loop_ = Loop::wrap(unsafe { + uvll::get_loop_for_uv_handle(server) + }); + let client = TcpWatcher::new(&loop_); + assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0); + Ok(~client as ~rtio::RtioTcpStream) } + n => Err(uv_error_to_io_error(UvError(n))) + }; + + let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) }; + tcp.outgoing.send(msg); +} + +impl Drop for TcpListener { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + self.close(); } +} + +extern fn listener_close_cb(handle: *uvll::uv_handle_t) { + let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&handle) }; + unsafe { uvll::free_handle(handle) } - pub fn as_stream(&self) -> StreamWatcher { - NativeHandle::from_native_handle(self.native_handle() as *uvll::uv_stream_t) + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(tcp.closing_task.take_unwrap()); +} + +// TCP acceptors (bound servers) + +impl HomingIO for TcpAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} + +impl rtio::RtioSocket for TcpAcceptor { + fn socket_name(&mut self) -> Result { + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.listener.handle) } } -impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher { - fn from_native_handle(handle: *uvll::uv_tcp_t) -> TcpWatcher { - TcpWatcher(handle) +impl rtio::RtioTcpAcceptor for TcpAcceptor { + fn accept(&mut self) -> Result<~rtio::RtioTcpStream, IoError> { + let _m = self.fire_homing_missile(); + self.incoming.recv() } - fn native_handle(&self) -> *uvll::uv_tcp_t { - match self { &TcpWatcher(ptr) => ptr } + + fn accept_simultaneously(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 1) + }) + } + + fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 0) + }) } } -pub struct UdpWatcher(*uvll::uv_udp_t); -impl Watcher for UdpWatcher { } +//////////////////////////////////////////////////////////////////////////////// +/// UDP implementation +//////////////////////////////////////////////////////////////////////////////// + +pub struct UdpWatcher { + handle: *uvll::uv_udp_t, + home: SchedHandle, +} impl UdpWatcher { - pub fn new(loop_: &Loop) -> UdpWatcher { - unsafe { - let handle = malloc_handle(UV_UDP); - assert!(handle.is_not_null()); - assert_eq!(0, uvll::udp_init(loop_.native_handle(), handle)); - let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } - } - - pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::udp_bind(self.native_handle(), addr, 0u32), - UvIpv6SocketAddr(addr) => uvll::udp_bind6(self.native_handle(), addr, 0u32), - } + pub fn bind(loop_: &Loop, address: SocketAddr) + -> Result + { + do task::unkillable { + let udp = UdpWatcher { + handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, + home: get_handle_to_current_scheduler!(), }; + assert_eq!(unsafe { + uvll::uv_udp_init(loop_.handle, udp.handle) + }, 0); + let result = socket_addr_as_sockaddr(address, |addr| unsafe { + uvll::uv_udp_bind(udp.handle, addr, 0u32) + }); match result { - 0 => Ok(()), - _ => Err(UvError(result)), + 0 => Ok(udp), + n => Err(UvError(n)), } } } +} - pub fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { - { - let data = self.get_watcher_data(); - data.alloc_cb = Some(alloc); - data.udp_recv_cb = Some(cb); - } +impl UvHandle for UdpWatcher { + fn uv_handle(&self) -> *uvll::uv_udp_t { self.handle } +} - unsafe { uvll::udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } +impl HomingIO for UdpWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl rtio::RtioSocket for UdpWatcher { + fn socket_name(&mut self) -> Result { + let _m = self.fire_homing_missile(); + socket_name(Udp, self.handle) + } +} - extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { - let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let alloc_cb = udp_watcher.get_watcher_data().alloc_cb.get_ref(); - return (*alloc_cb)(suggested_size as uint); +impl rtio::RtioUdpSocket for UdpWatcher { + fn recvfrom(&mut self, buf: &mut [u8]) + -> Result<(uint, SocketAddr), IoError> + { + struct Ctx { + task: Option, + buf: Option, + result: Option<(ssize_t, Option)>, } + let _m = self.fire_homing_missile(); + + let a = match unsafe { + uvll::uv_udp_recv_start(self.handle, alloc_cb, recv_cb) + } { + 0 => { + let mut cx = Ctx { + task: None, + buf: Some(slice_to_uv_buf(buf)), + result: None, + }; + do wait_until_woken_after(&mut cx.task) { + unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) } + } + match cx.result.take_unwrap() { + (n, _) if n < 0 => + Err(uv_error_to_io_error(UvError(n as c_int))), + (n, addr) => Ok((n as uint, addr.unwrap())) + } + } + n => Err(uv_error_to_io_error(UvError(n))) + }; + return a; + + extern fn alloc_cb(handle: *uvll::uv_udp_t, + _suggested_size: size_t, + buf: *mut Buf) { + unsafe { + let cx: &mut Ctx = + cast::transmute(uvll::get_data_for_uv_handle(handle)); + *buf = cx.buf.take().expect("recv alloc_cb called more than once") + } + } + + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: *Buf, + addr: *uvll::sockaddr, _flags: c_uint) { + assert!(nread != uvll::ECANCELED as ssize_t); + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; - extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, - addr: *uvll::sockaddr, flags: c_uint) { // When there's no data to read the recv callback can be a no-op. // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring // this we just drop back to kqueue and wait for the next callback. if nread == 0 { - return; + cx.buf = Some(unsafe { *buf }); + return } - uvdebug!("buf addr: {}", buf.base); - uvdebug!("buf len: {}", buf.len); - let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref(); - let status = status_to_maybe_uv_error(nread as c_int); - let addr = uv_socket_addr_to_socket_addr(sockaddr_to_UvSocketAddr(addr)); - (*cb)(udp_watcher, nread as int, buf, addr, flags as uint, status); - } - } - - pub fn recv_stop(&mut self) { - unsafe { uvll::udp_recv_stop(self.native_handle()); } - } - - pub fn send(&mut self, buf: Buf, address: SocketAddr, cb: UdpSendCallback) { - { - let data = self.get_watcher_data(); - assert!(data.udp_send_cb.is_none()); - data.udp_send_cb = Some(cb); - } + unsafe { + assert_eq!(uvll::uv_udp_recv_stop(handle), 0) + } - let req = UdpSendRequest::new(); - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::udp_send(req.native_handle(), - self.native_handle(), [buf], addr, send_cb), - UvIpv6SocketAddr(addr) => uvll::udp_send6(req.native_handle(), - self.native_handle(), [buf], addr, send_cb), - } + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) }; - assert_eq!(0, result); - } + let addr = if addr == ptr::null() { + None + } else { + Some(sockaddr_to_socket_addr(addr)) + }; + cx.result = Some((nread, addr)); - extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { - let send_request: UdpSendRequest = NativeHandle::from_native_handle(req); - let mut udp_watcher = send_request.handle(); - send_request.delete(); - let cb = udp_watcher.get_watcher_data().udp_send_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(udp_watcher, status); + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); } } -} -impl NativeHandle<*uvll::uv_udp_t> for UdpWatcher { - fn from_native_handle(handle: *uvll::uv_udp_t) -> UdpWatcher { - UdpWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_udp_t { - match self { &UdpWatcher(ptr) => ptr } - } -} + fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { + struct Ctx { task: Option, result: c_int } -// uv_connect_t is a subclass of uv_req_t -pub struct ConnectRequest(*uvll::uv_connect_t); -impl Request for ConnectRequest { } + let _m = self.fire_homing_missile(); -impl ConnectRequest { + let mut req = Request::new(uvll::UV_UDP_SEND); + let buf = slice_to_uv_buf(buf); + let result = socket_addr_as_sockaddr(dst, |dst| unsafe { + uvll::uv_udp_send(req.handle, self.handle, [buf], dst, send_cb) + }); - pub fn new() -> ConnectRequest { - let connect_handle = unsafe { malloc_req(UV_CONNECT) }; - assert!(connect_handle.is_not_null()); - ConnectRequest(connect_handle as *uvll::uv_connect_t) - } + return match result { + 0 => { + req.defuse(); // uv callback now owns this request + let mut cx = Ctx { task: None, result: 0 }; + do wait_until_woken_after(&mut cx.task) { + req.set_data(&cx); + } + match cx.result { + 0 => Ok(()), + n => Err(uv_error_to_io_error(UvError(n))) + } + } + n => Err(uv_error_to_io_error(UvError(n))) + }; - fn stream(&self) -> StreamWatcher { - unsafe { - let stream_handle = uvll::get_stream_handle_from_connect_req(self.native_handle()); - NativeHandle::from_native_handle(stream_handle) - } - } + extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { + let req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; + cx.result = status; - fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + } } -} -impl NativeHandle<*uvll::uv_connect_t> for ConnectRequest { - fn from_native_handle(handle: *uvll:: uv_connect_t) -> ConnectRequest { - ConnectRequest(handle) - } - fn native_handle(&self) -> *uvll::uv_connect_t { - match self { &ConnectRequest(ptr) => ptr } + fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.handle, + m_addr, ptr::null(), + uvll::UV_JOIN_GROUP) + } + }) } -} - -pub struct WriteRequest(*uvll::uv_write_t); - -impl Request for WriteRequest { } -impl WriteRequest { - pub fn new() -> WriteRequest { - let write_handle = unsafe { malloc_req(UV_WRITE) }; - assert!(write_handle.is_not_null()); - WriteRequest(write_handle as *uvll::uv_write_t) + fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.handle, + m_addr, ptr::null(), + uvll::UV_LEAVE_GROUP) + } + }) } - pub fn stream(&self) -> StreamWatcher { - unsafe { - let stream_handle = uvll::get_stream_handle_from_write_req(self.native_handle()); - NativeHandle::from_native_handle(stream_handle) - } + fn loop_multicast_locally(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.handle, + 1 as c_int) + }) } - pub fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.handle, + 0 as c_int) + }) } -} -impl NativeHandle<*uvll::uv_write_t> for WriteRequest { - fn from_native_handle(handle: *uvll:: uv_write_t) -> WriteRequest { - WriteRequest(handle) - } - fn native_handle(&self) -> *uvll::uv_write_t { - match self { &WriteRequest(ptr) => ptr } + fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_ttl(self.handle, + ttl as c_int) + }) } -} - -pub struct UdpSendRequest(*uvll::uv_udp_send_t); -impl Request for UdpSendRequest { } -impl UdpSendRequest { - pub fn new() -> UdpSendRequest { - let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; - assert!(send_handle.is_not_null()); - UdpSendRequest(send_handle as *uvll::uv_udp_send_t) + fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_ttl(self.handle, ttl as c_int) + }) } - pub fn handle(&self) -> UdpWatcher { - let send_request_handle = unsafe { - uvll::get_udp_handle_from_send_req(self.native_handle()) - }; - NativeHandle::from_native_handle(send_request_handle) + fn hear_broadcasts(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.handle, + 1 as c_int) + }) } - pub fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + fn ignore_broadcasts(&mut self) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.handle, + 0 as c_int) + }) } } -impl NativeHandle<*uvll::uv_udp_send_t> for UdpSendRequest { - fn from_native_handle(handle: *uvll::uv_udp_send_t) -> UdpSendRequest { - UdpSendRequest(handle) - } - fn native_handle(&self) -> *uvll::uv_udp_send_t { - match self { &UdpSendRequest(ptr) => ptr } +impl Drop for UdpWatcher { + fn drop(&mut self) { + // Send ourselves home to close this handle (blocking while doing so). + let _m = self.fire_homing_missile(); + self.close(); } } +//////////////////////////////////////////////////////////////////////////////// +/// UV request support +//////////////////////////////////////////////////////////////////////////////// + #[cfg(test)] mod test { - use super::*; - use std::util::ignore; use std::cell::Cell; - use std::vec; - use std::unstable::run_in_bare_thread; - use std::rt::thread::Thread; + use std::comm::oneshot; use std::rt::test::*; - use super::super::{Loop, AllocCallback}; - use super::super::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf}; + use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioTcpAcceptor, + RtioUdpSocket}; + use std::task; + + use super::*; + use super::super::local_loop; #[test] fn connect_close_ip4() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - // Connect to a port where nobody is listening - let addr = next_test_ip4(); - do tcp_watcher.connect(addr) |stream_watcher, status| { - uvdebug!("tcp_watcher.connect!"); - assert!(status.is_some()); - assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); - stream_watcher.close(||()); - } - loop_.run(); - loop_.close(); + match TcpWatcher::connect(local_loop(), next_test_ip4()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } } #[test] fn connect_close_ip6() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - // Connect to a port where nobody is listening - let addr = next_test_ip6(); - do tcp_watcher.connect(addr) |stream_watcher, status| { - uvdebug!("tcp_watcher.connect!"); - assert!(status.is_some()); - assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); - stream_watcher.close(||()); - } - loop_.run(); - loop_.close(); + match TcpWatcher::connect(local_loop(), next_test_ip6()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } } #[test] fn udp_bind_close_ip4() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; - let addr = next_test_ip4(); - udp_watcher.bind(addr); - udp_watcher.close(||()); - loop_.run(); - loop_.close(); + match UdpWatcher::bind(local_loop(), next_test_ip4()) { + Ok(*) => {} + Err(*) => fail!() } } #[test] fn udp_bind_close_ip6() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; - let addr = next_test_ip6(); - udp_watcher.bind(addr); - udp_watcher.close(||()); - loop_.run(); - loop_.close(); + match UdpWatcher::bind(local_loop(), next_test_ip6()) { + Ok(*) => {} + Err(*) => fail!() } } #[test] fn listen_ip4() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; - let addr = next_test_ip4(); - server_tcp_watcher.bind(addr); - let loop_ = loop_; - uvdebug!("listening"); - let mut stream = server_tcp_watcher.as_stream(); - let res = do stream.listen |mut server_stream_watcher, status| { - uvdebug!("listened!"); - assert!(status.is_none()); - let mut loop_ = loop_; - let client_tcp_watcher = TcpWatcher::new(&mut loop_); - let mut client_tcp_watcher = client_tcp_watcher.as_stream(); - server_stream_watcher.accept(client_tcp_watcher); - let count_cell = Cell::new(0); - let server_stream_watcher = server_stream_watcher; - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) - }; - do client_tcp_watcher.read_start(alloc) |stream_watcher, nread, buf, status| { - - uvdebug!("i'm reading!"); - let buf = vec_from_uv_buf(buf); - let mut count = count_cell.take(); - if status.is_none() { - uvdebug!("got {} bytes", nread); - let buf = buf.unwrap(); - for byte in buf.slice(0, nread as uint).iter() { - assert!(*byte == count as u8); - uvdebug!("{}", *byte as uint); - count += 1; - } - } else { - assert_eq!(count, MAX); - do stream_watcher.close { - server_stream_watcher.close(||()); - } + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let w = match TcpListener::bind(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), + }; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } - count_cell.put_back(count); } - }; + Err(e) => fail!("{:?}", e) + } + } - assert!(res.is_ok()); - - let client_thread = do Thread::start { - uvdebug!("starting client thread"); - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - do tcp_watcher.connect(addr) |mut stream_watcher, status| { - uvdebug!("connecting"); - assert!(status.is_none()); - let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; - let buf = slice_to_uv_buf(msg); - let msg_cell = Cell::new(msg); - do stream_watcher.write(buf) |stream_watcher, status| { - uvdebug!("writing"); - assert!(status.is_none()); - let msg_cell = Cell::new(msg_cell.take()); - stream_watcher.close(||ignore(msg_cell.take())); + port.recv(); + let mut w = match TcpWatcher::connect(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } + } + + #[test] + fn listen_ip6() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip6(); + + do spawn { + let w = match TcpListener::bind(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), + }; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } } - loop_.run(); - loop_.close(); - }; + Err(e) => fail!("{:?}", e) + } + } - let mut loop_ = loop_; - loop_.run(); - loop_.close(); - client_thread.join(); + port.recv(); + let mut w = match TcpWatcher::connect(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) }; + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } } #[test] - fn listen_ip6() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; - let addr = next_test_ip6(); - server_tcp_watcher.bind(addr); - let loop_ = loop_; - uvdebug!("listening"); - let mut stream = server_tcp_watcher.as_stream(); - let res = do stream.listen |mut server_stream_watcher, status| { - uvdebug!("listened!"); - assert!(status.is_none()); - let mut loop_ = loop_; - let client_tcp_watcher = TcpWatcher::new(&mut loop_); - let mut client_tcp_watcher = client_tcp_watcher.as_stream(); - server_stream_watcher.accept(client_tcp_watcher); - let count_cell = Cell::new(0); - let server_stream_watcher = server_stream_watcher; - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) - }; - do client_tcp_watcher.read_start(alloc) - |stream_watcher, nread, buf, status| { - - uvdebug!("i'm reading!"); - let buf = vec_from_uv_buf(buf); - let mut count = count_cell.take(); - if status.is_none() { - uvdebug!("got {} bytes", nread); - let buf = buf.unwrap(); - let r = buf.slice(0, nread as uint); - for byte in r.iter() { - assert!(*byte == count as u8); - uvdebug!("{}", *byte as uint); - count += 1; - } - } else { - assert_eq!(count, MAX); - do stream_watcher.close { - server_stream_watcher.close(||()); - } + fn udp_recv_ip4() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip4(); + let server = next_test_ip4(); + + do spawn { + match UdpWatcher::bind(local_loop(), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } - count_cell.put_back(count); } - }; - assert!(res.is_ok()); - - let client_thread = do Thread::start { - uvdebug!("starting client thread"); - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - do tcp_watcher.connect(addr) |mut stream_watcher, status| { - uvdebug!("connecting"); - assert!(status.is_none()); - let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; - let buf = slice_to_uv_buf(msg); - let msg_cell = Cell::new(msg); - do stream_watcher.write(buf) |stream_watcher, status| { - uvdebug!("writing"); - assert!(status.is_none()); - let msg_cell = Cell::new(msg_cell.take()); - stream_watcher.close(||ignore(msg_cell.take())); + Err(e) => fail!("{:?}", e) + } + } + + port.recv(); + let mut w = match UdpWatcher::bind(local_loop(), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } + } + + #[test] + fn udp_recv_ip6() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip6(); + let server = next_test_ip6(); + + do spawn { + match UdpWatcher::bind(local_loop(), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } } - loop_.run(); - loop_.close(); - }; + Err(e) => fail!("{:?}", e) + } + } - let mut loop_ = loop_; - loop_.run(); - loop_.close(); - client_thread.join(); + port.recv(); + let mut w = match UdpWatcher::bind(local_loop(), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] - fn udp_recv_ip4() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - - let mut server = UdpWatcher::new(&loop_); - assert!(server.bind(server_addr).is_ok()); - - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) - }; + fn test_read_read_read() { + use std::rt::rtio::*; + let addr = next_test_ip4(); + static MAX: uint = 5000; + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawn { + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let buf = [1, .. 2048]; + let mut total_bytes_written = 0; + while total_bytes_written < MAX { + assert!(stream.write(buf).is_ok()); + uvdebug!("wrote bytes"); + total_bytes_written += buf.len(); + } + } - do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { - server.recv_stop(); - uvdebug!("i'm reading!"); - assert!(status.is_none()); - assert_eq!(flags, 0); - assert_eq!(src, client_addr); - - let buf = vec_from_uv_buf(buf); - let mut count = 0; - uvdebug!("got {} bytes", nread); - - let buf = buf.unwrap(); - for &byte in buf.slice(0, nread as uint).iter() { - assert!(byte == count as u8); - uvdebug!("{}", byte as uint); - count += 1; + do spawn { + port.take().recv(); + let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); + let mut buf = [0, .. 2048]; + let mut total_bytes_read = 0; + while total_bytes_read < MAX { + let nread = stream.read(buf).unwrap(); + total_bytes_read += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } - assert_eq!(count, MAX); + } + uvdebug!("read {} bytes total", total_bytes_read); + } + } + + #[test] + #[ignore(cfg(windows))] // FIXME(#10102) server never sees second packet + fn test_udp_twice() { + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawn { + let mut client = UdpWatcher::bind(local_loop(), client_addr).unwrap(); + port.take().recv(); + assert!(client.sendto([1], server_addr).is_ok()); + assert!(client.sendto([2], server_addr).is_ok()); + } - server.close(||{}); + let mut server = UdpWatcher::bind(local_loop(), server_addr).unwrap(); + chan.take().send(()); + let mut buf1 = [0]; + let mut buf2 = [0]; + let (nread1, src1) = server.recvfrom(buf1).unwrap(); + let (nread2, src2) = server.recvfrom(buf2).unwrap(); + assert_eq!(nread1, 1); + assert_eq!(nread2, 1); + assert_eq!(src1, client_addr); + assert_eq!(src2, client_addr); + assert_eq!(buf1[0], 1); + assert_eq!(buf2[0], 2); + } + + #[test] + fn test_udp_many_read() { + let server_out_addr = next_test_ip4(); + let server_in_addr = next_test_ip4(); + let client_out_addr = next_test_ip4(); + let client_in_addr = next_test_ip4(); + static MAX: uint = 500_000; + + let (p1, c1) = oneshot(); + let (p2, c2) = oneshot(); + + let first = Cell::new((p1, c2)); + let second = Cell::new((p2, c1)); + + do spawn { + let l = local_loop(); + let mut server_out = UdpWatcher::bind(l, server_out_addr).unwrap(); + let mut server_in = UdpWatcher::bind(l, server_in_addr).unwrap(); + let (port, chan) = first.take(); + chan.send(()); + port.recv(); + let msg = [1, .. 2048]; + let mut total_bytes_sent = 0; + let mut buf = [1]; + while buf[0] == 1 { + // send more data + assert!(server_out.sendto(msg, client_in_addr).is_ok()); + total_bytes_sent += msg.len(); + // check if the client has received enough + let res = server_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(nread, 1); + assert_eq!(src, client_out_addr); } + assert!(total_bytes_sent >= MAX); + } - let thread = do Thread::start { - let mut loop_ = Loop::new(); - let mut client = UdpWatcher::new(&loop_); - assert!(client.bind(client_addr).is_ok()); - let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let buf = slice_to_uv_buf(msg); - do client.send(buf, server_addr) |client, status| { - uvdebug!("writing"); - assert!(status.is_none()); - client.close(||{}); + do spawn { + let l = local_loop(); + let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); + let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); + let (port, chan) = second.take(); + port.recv(); + chan.send(()); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } + } + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); + } + } - loop_.run(); - loop_.close(); - }; + #[test] + fn test_read_and_block() { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawn { + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let (port2, chan2) = stream(); + chan.take().send(port2); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + + let expected = 32; + let mut current = 0; + let mut reads = 0; + + while current < expected { + let nread = stream.read(buf).unwrap(); + for i in range(0u, nread) { + let val = buf[i] as uint; + assert_eq!(val, current % 8); + current += 1; + } + reads += 1; + + chan2.send(()); + } + + // Make sure we had multiple reads + assert!(reads > 1); + } - loop_.run(); - loop_.close(); - thread.join(); + do spawn { + let port2 = port.take().recv(); + let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); } } #[test] - fn udp_recv_ip6() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let server_addr = next_test_ip6(); - let client_addr = next_test_ip6(); - - let mut server = UdpWatcher::new(&loop_); - assert!(server.bind(server_addr).is_ok()); - - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) + fn test_simple_tcp_server_and_client_on_diff_threads() { + let addr = next_test_ip4(); + + do task::spawn_sched(task::SingleThreaded) { + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + let nread = stream.read(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + assert_eq!(buf[i], i as u8); + } + } + + do task::spawn_sched(task::SingleThreaded) { + let mut stream = TcpWatcher::connect(local_loop(), addr); + while stream.is_err() { + stream = TcpWatcher::connect(local_loop(), addr); + } + stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); + } + } + + // On one thread, create a udp socket. Then send that socket to another + // thread and destroy the socket on the remote thread. This should make sure + // that homing kicks in for the socket to go back home to the original + // thread, close itself, and then come back to the last thread. + #[test] + fn test_homing_closes_correctly() { + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do task::spawn_sched(task::SingleThreaded) { + let chan = Cell::new(chan.take()); + let listener = UdpWatcher::bind(local_loop(), next_test_ip4()).unwrap(); + chan.take().send(listener); + } + + do task::spawn_sched(task::SingleThreaded) { + let port = Cell::new(port.take()); + port.take().recv(); + } + } + + // This is a bit of a crufty old test, but it has its uses. + #[test] + fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { + use std::cast; + use std::rt::local::Local; + use std::rt::rtio::{EventLoop, IoFactory}; + use std::rt::sched::Scheduler; + use std::rt::sched::{Shutdown, TaskFromFriend}; + use std::rt::sleeper_list::SleeperList; + use std::rt::task::Task; + use std::rt::task::UnwindResult; + use std::rt::thread::Thread; + use std::rt::work_queue::WorkQueue; + use std::unstable::run_in_bare_thread; + use uvio::UvEventLoop; + + do run_in_bare_thread { + let sleepers = SleeperList::new(); + let work_queue1 = WorkQueue::new(); + let work_queue2 = WorkQueue::new(); + let queues = ~[work_queue1.clone(), work_queue2.clone()]; + + let loop1 = ~UvEventLoop::new() as ~EventLoop; + let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), + sleepers.clone()); + let loop2 = ~UvEventLoop::new() as ~EventLoop; + let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), + sleepers.clone()); + + let handle1 = Cell::new(sched1.make_handle()); + let handle2 = Cell::new(sched2.make_handle()); + let tasksFriendHandle = Cell::new(sched2.make_handle()); + + let on_exit: ~fn(UnwindResult) = |exit_status| { + handle1.take().send(Shutdown); + handle2.take().send(Shutdown); + assert!(exit_status.is_success()); }; - do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { - server.recv_stop(); - uvdebug!("i'm reading!"); - assert!(status.is_none()); - assert_eq!(flags, 0); - assert_eq!(src, client_addr); - - let buf = vec_from_uv_buf(buf); - let mut count = 0; - uvdebug!("got {} bytes", nread); - - let buf = buf.unwrap(); - for &byte in buf.slice(0, nread as uint).iter() { - assert!(byte == count as u8); - uvdebug!("{}", byte as uint); - count += 1; + unsafe fn local_io() -> &'static mut IoFactory { + do Local::borrow |sched: &mut Scheduler| { + let mut io = None; + sched.event_loop.io(|i| io = Some(i)); + cast::transmute(io.unwrap()) } - assert_eq!(count, MAX); - - server.close(||{}); } - let thread = do Thread::start { - let mut loop_ = Loop::new(); - let mut client = UdpWatcher::new(&loop_); - assert!(client.bind(client_addr).is_ok()); - let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let buf = slice_to_uv_buf(msg); - do client.send(buf, server_addr) |client, status| { - uvdebug!("writing"); - assert!(status.is_none()); - client.close(||{}); + let test_function: ~fn() = || { + let io = unsafe { local_io() }; + let addr = next_test_ip4(); + let maybe_socket = io.udp_bind(addr); + // this socket is bound to this event loop + assert!(maybe_socket.is_ok()); + + // block self on sched1 + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + // unblock task + do task.wake().map |task| { + // send self to sched2 + tasksFriendHandle.take().send(TaskFromFriend(task)); + }; + // sched1 should now sleep since it has nothing else to do + } } + // sched2 will wake up and get the task as we do nothing else, + // the function ends and the socket goes out of scope sched2 + // will start to run the destructor the destructor will first + // block the task, set it's home as sched1, then enqueue it + // sched2 will dequeue the task, see that it has a home, and + // send it to sched1 sched1 will wake up, exec the close + // function on the correct loop, and then we're done + }; - loop_.run(); - loop_.close(); + let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, + test_function); + main_task.death.on_exit = Some(on_exit); + let main_task = Cell::new(main_task); + + let null_task = Cell::new(~do Task::new_root(&mut sched2.stack_pool, + None) || {}); + + let sched1 = Cell::new(sched1); + let sched2 = Cell::new(sched2); + + let thread1 = do Thread::start { + sched1.take().bootstrap(main_task.take()); + }; + let thread2 = do Thread::start { + sched2.take().bootstrap(null_task.take()); }; - loop_.run(); - loop_.close(); - thread.join(); + thread1.join(); + thread2.join(); + } + } + + #[should_fail] #[test] + fn tcp_listener_fail_cleanup() { + let addr = next_test_ip4(); + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let _w = w.listen().unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn tcp_stream_fail_cleanup() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do task::spawn_unlinked { // please no linked failure + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + w.accept(); + } + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn udp_listener_fail_cleanup() { + let addr = next_test_ip4(); + let _w = UdpWatcher::bind(local_loop(), addr).unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn udp_fail_other_task() { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + // force the handle to be created on a different scheduler, failure in + // the original task will force a homing operation back to this + // scheduler. + do task::spawn_sched(task::SingleThreaded) { + let w = UdpWatcher::bind(local_loop(), addr).unwrap(); + chan.take().send(w); + } + + let _w = port.recv(); + fail!(); + } + + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure1() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + w.accept(); + } + + port.recv(); + fail!(); + } + + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure2() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + let mut buf = [0]; + w.accept().unwrap().read(buf); } + + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + + fail!(); + } + + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure3() { + let (port, chan) = stream(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let chan = chan.take(); + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.send(()); + let mut conn = w.accept().unwrap(); + chan.send(()); + let buf = [0, ..65536]; + conn.write(buf); + } + + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + port.recv(); + fail!(); } } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index b453da0cc9ea2..c123f916ef23f 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -8,91 +8,327 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc; use std::c_str::CString; +use std::libc; +use std::rt::BlockedTask; +use std::rt::io::IoError; +use std::rt::local::Local; +use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor}; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::tube::Tube; +use std::task; -use super::{Loop, UvError, Watcher, NativeHandle, status_to_maybe_uv_error}; -use super::ConnectionCallback; -use net; +use stream::StreamWatcher; +use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error, + wait_until_woken_after}; +use uvio::HomingIO; use uvll; -pub struct Pipe(*uvll::uv_pipe_t); +pub struct PipeWatcher { + stream: StreamWatcher, + home: SchedHandle, + priv defused: bool, +} -impl Watcher for Pipe {} +pub struct PipeListener { + home: SchedHandle, + pipe: *uvll::uv_pipe_t, + priv outgoing: Tube>, +} -impl Pipe { - pub fn new(loop_: &Loop, ipc: bool) -> Pipe { - unsafe { +pub struct PipeAcceptor { + listener: ~PipeListener, + priv incoming: Tube>, +} + +// PipeWatcher implementation and traits + +impl PipeWatcher { + // Creates an uninitialized pipe watcher. The underlying uv pipe is ready to + // get bound to some other source (this is normally a helper method paired + // with another call). + pub fn new(loop_: &Loop, ipc: bool) -> PipeWatcher { + let handle = unsafe { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); - assert!(handle.is_not_null()); + assert!(!handle.is_null()); let ipc = ipc as libc::c_int; - assert_eq!(uvll::pipe_init(loop_.native_handle(), handle, ipc), 0); - let mut ret: Pipe = - NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - ret + assert_eq!(uvll::uv_pipe_init(loop_.handle, handle, ipc), 0); + handle + }; + PipeWatcher { + stream: StreamWatcher::new(handle), + home: get_handle_to_current_scheduler!(), + defused: false, } } - pub fn as_stream(&self) -> net::StreamWatcher { - net::StreamWatcher(**self as *uvll::uv_stream_t) - } - - #[fixed_stack_segment] #[inline(never)] - pub fn open(&mut self, file: libc::c_int) -> Result<(), UvError> { - match unsafe { uvll::pipe_open(self.native_handle(), file) } { - 0 => Ok(()), + pub fn open(loop_: &Loop, file: libc::c_int) -> Result + { + let pipe = PipeWatcher::new(loop_, false); + match unsafe { uvll::uv_pipe_open(pipe.handle(), file) } { + 0 => Ok(pipe), n => Err(UvError(n)) } } - #[fixed_stack_segment] #[inline(never)] - pub fn bind(&mut self, name: &CString) -> Result<(), UvError> { - do name.with_ref |name| { - match unsafe { uvll::pipe_bind(self.native_handle(), name) } { - 0 => Ok(()), + pub fn connect(loop_: &Loop, name: &CString) -> Result + { + struct Ctx { task: Option, result: libc::c_int, } + return do task::unkillable { + let mut cx = Ctx { task: None, result: 0 }; + let mut req = Request::new(uvll::UV_CONNECT); + let pipe = PipeWatcher::new(loop_, false); + + do wait_until_woken_after(&mut cx.task) { + unsafe { + uvll::uv_pipe_connect(req.handle, + pipe.handle(), + name.with_ref(|p| p), + connect_cb) + } + req.set_data(&cx); + req.defuse(); // uv callback now owns this request + } + match cx.result { + 0 => Ok(pipe), n => Err(UvError(n)) } + + }; + + extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) {; + let req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; + cx.result = status; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); } } - #[fixed_stack_segment] #[inline(never)] - pub fn connect(&mut self, name: &CString, cb: ConnectionCallback) { - { - let data = self.get_watcher_data(); - assert!(data.connect_cb.is_none()); - data.connect_cb = Some(cb); + pub fn handle(&self) -> *uvll::uv_pipe_t { self.stream.handle } + + // Unwraps the underlying uv pipe. This cancels destruction of the pipe and + // allows the pipe to get moved elsewhere + fn unwrap(mut self) -> *uvll::uv_pipe_t { + self.defused = true; + return self.stream.handle; + } +} + +impl RtioPipe for PipeWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_homing_missile(); + self.stream.read(buf).map_err(uv_error_to_io_error) + } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + self.stream.write(buf).map_err(uv_error_to_io_error) + } +} + +impl HomingIO for PipeWatcher { + fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } +} + +impl UvHandle for PipeWatcher { + fn uv_handle(&self) -> *uvll::uv_pipe_t { self.stream.handle } +} + +impl Drop for PipeWatcher { + fn drop(&mut self) { + if !self.defused { + let _m = self.fire_homing_missile(); + self.close(); } + } +} + +extern fn pipe_close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } +} - let connect = net::ConnectRequest::new(); - let name = do name.with_ref |p| { p }; +// PipeListener implementation and traits - unsafe { - uvll::pipe_connect(connect.native_handle(), - self.native_handle(), - name, - connect_cb) +impl PipeListener { + pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> { + do task::unkillable { + let pipe = PipeWatcher::new(loop_, false); + match unsafe { + uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p)) + } { + 0 => { + // If successful, unwrap the PipeWatcher because we control how + // we close the pipe differently. We can't rely on + // StreamWatcher's default close method. + let p = ~PipeListener { + home: get_handle_to_current_scheduler!(), + pipe: pipe.unwrap(), + outgoing: Tube::new(), + }; + Ok(p.install()) + } + n => Err(UvError(n)) + } } + } +} - extern "C" fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { - let connect_request: net::ConnectRequest = - NativeHandle::from_native_handle(req); - let mut stream_watcher = connect_request.stream(); - connect_request.delete(); +impl RtioUnixListener for PipeListener { + fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> { + // create the acceptor object from ourselves + let incoming = self.outgoing.clone(); + let mut acceptor = ~PipeAcceptor { + listener: self, + incoming: incoming, + }; - let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); + let _m = acceptor.fire_homing_missile(); + // XXX: the 128 backlog should be configurable + match unsafe { uvll::uv_listen(acceptor.listener.pipe, 128, listen_cb) } { + 0 => Ok(acceptor as ~RtioUnixAcceptor), + n => Err(uv_error_to_io_error(UvError(n))), } } +} +impl HomingIO for PipeListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } } -impl NativeHandle<*uvll::uv_pipe_t> for Pipe { - fn from_native_handle(handle: *uvll::uv_pipe_t) -> Pipe { - Pipe(handle) +impl UvHandle for PipeListener { + fn uv_handle(&self) -> *uvll::uv_pipe_t { self.pipe } +} + +extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { + assert!(status != uvll::ECANCELED); + let msg = match status { + 0 => { + let loop_ = Loop::wrap(unsafe { + uvll::get_loop_for_uv_handle(server) + }); + let client = PipeWatcher::new(&loop_, false); + assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0); + Ok(~client as ~RtioPipe) + } + n => Err(uv_error_to_io_error(UvError(n))) + }; + + let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) }; + pipe.outgoing.send(msg); +} + +impl Drop for PipeListener { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + self.close(); + } +} + +// PipeAcceptor implementation and traits + +impl RtioUnixAcceptor for PipeAcceptor { + fn accept(&mut self) -> Result<~RtioPipe, IoError> { + let _m = self.fire_homing_missile(); + self.incoming.recv() + } +} + +impl HomingIO for PipeAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} + +#[cfg(test)] +mod tests { + use std::cell::Cell; + use std::comm::oneshot; + use std::rt::rtio::{RtioUnixListener, RtioUnixAcceptor, RtioPipe}; + use std::rt::test::next_test_unix; + use std::task; + + use super::*; + use super::super::local_loop; + + #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn connect_err() { + match PipeWatcher::connect(local_loop(), &"path/to/nowhere".to_c_str()) { + Ok(*) => fail!(), + Err(*) => {} + } + } + + #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn bind_err() { + match PipeListener::bind(local_loop(), &"path/to/nowhere".to_c_str()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"EACCES"), + } + } + + #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn bind() { + let p = next_test_unix().to_c_str(); + match PipeListener::bind(local_loop(), &p) { + Ok(*) => {} + Err(*) => fail!(), + } + } + + #[test] #[should_fail] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn bind_fail() { + let p = next_test_unix().to_c_str(); + let _w = PipeListener::bind(local_loop(), &p).unwrap(); + fail!(); } - fn native_handle(&self) -> *uvll::uv_pipe_t { - match self { &Pipe(ptr) => ptr } + + #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn connect() { + let path = next_test_unix(); + let path2 = path.clone(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + do spawn { + let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap(); + let mut p = p.listen().unwrap(); + chan.take().send(()); + let mut client = p.accept().unwrap(); + let mut buf = [0]; + assert!(client.read(buf).unwrap() == 1); + assert_eq!(buf[0], 1); + assert!(client.write([2]).is_ok()); + } + port.recv(); + let mut c = PipeWatcher::connect(local_loop(), &path.to_c_str()).unwrap(); + assert!(c.write([1]).is_ok()); + let mut buf = [0]; + assert!(c.read(buf).unwrap() == 1); + assert_eq!(buf[0], 2); + } + + #[test] #[should_fail] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work + fn connect_fail() { + let path = next_test_unix(); + let path2 = path.clone(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + do task::spawn_unlinked { // plz no linked failure + let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap(); + let mut p = p.listen().unwrap(); + chan.take().send(()); + p.accept(); + } + port.recv(); + let _c = PipeWatcher::connect(local_loop(), &path.to_c_str()).unwrap(); + fail!() + } } diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 2d746e329f44a..7e75515972cb8 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -8,59 +8,44 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::Cell; +use std::libc::c_int; use std::libc; use std::ptr; -use std::vec; +use std::rt::BlockedTask; +use std::rt::io::IoError; use std::rt::io::process::*; +use std::rt::local::Local; +use std::rt::rtio::RtioProcess; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::vec; -use super::{Watcher, Loop, NativeHandle, UvError}; -use super::{status_to_maybe_uv_error, ExitCallback}; -use uvio::{UvPipeStream, UvUnboundPipe}; +use super::{Loop, UvHandle, UvError, uv_error_to_io_error, + wait_until_woken_after}; +use uvio::HomingIO; use uvll; +use pipe::PipeWatcher; -/// A process wraps the handle of the underlying uv_process_t. -pub struct Process(*uvll::uv_process_t); +pub struct Process { + handle: *uvll::uv_process_t, + home: SchedHandle, -impl Watcher for Process {} + /// Task to wake up (may be null) for when the process exits + to_wake: Option, -impl Process { - /// Creates a new process, ready to spawn inside an event loop - pub fn new() -> Process { - let handle = unsafe { uvll::malloc_handle(uvll::UV_PROCESS) }; - assert!(handle.is_not_null()); - let mut ret: Process = NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - return ret; - } + /// Collected from the exit_cb + exit_status: Option, + term_signal: Option, +} +impl Process { /// Spawn a new process inside the specified event loop. /// - /// The `config` variable will be passed down to libuv, and the `exit_cb` - /// will be run only once, when the process exits. - /// /// Returns either the corresponding process object or an error which /// occurred. - pub fn spawn(&mut self, loop_: &Loop, config: ProcessConfig, - exit_cb: ExitCallback) - -> Result<~[Option<~UvPipeStream>], UvError> + pub fn spawn(loop_: &Loop, config: ProcessConfig) + -> Result<(~Process, ~[Option]), UvError> { let cwd = config.cwd.map(|s| s.to_c_str()); - - extern fn on_exit(p: *uvll::uv_process_t, - exit_status: libc::c_int, - term_signal: libc::c_int) { - let mut p: Process = NativeHandle::from_native_handle(p); - let err = match exit_status { - 0 => None, - _ => status_to_maybe_uv_error(-1) - }; - p.get_watcher_data().exit_cb.take_unwrap()(p, - exit_status as int, - term_signal as int, - err); - } - let io = config.io; let mut stdio = vec::with_capacity::(io.len()); let mut ret_io = vec::with_capacity(io.len()); @@ -73,9 +58,7 @@ impl Process { } } - let exit_cb = Cell::new(exit_cb); - let ret_io = Cell::new(ret_io); - do with_argv(config.program, config.args) |argv| { + let ret = do with_argv(config.program, config.args) |argv| { do with_env(config.env) |envp| { let options = uvll::uv_process_options_t { exit_cb: on_exit, @@ -93,40 +76,52 @@ impl Process { gid: 0, }; + let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); + let process = ~Process { + handle: handle, + home: get_handle_to_current_scheduler!(), + to_wake: None, + exit_status: None, + term_signal: None, + }; match unsafe { - uvll::spawn(loop_.native_handle(), **self, options) + uvll::uv_spawn(loop_.handle, handle, &options) } { - 0 => { - (*self).get_watcher_data().exit_cb = Some(exit_cb.take()); - Ok(ret_io.take()) - } - err => Err(UvError(err)) + 0 => Ok(process.install()), + err => Err(UvError(err)), } } - } - } + }; - /// Sends a signal to this process. - /// - /// This is a wrapper around `uv_process_kill` - pub fn kill(&self, signum: int) -> Result<(), UvError> { - match unsafe { - uvll::process_kill(self.native_handle(), signum as libc::c_int) - } { - 0 => Ok(()), - err => Err(UvError(err)) + match ret { + Ok(p) => Ok((p, ret_io)), + Err(e) => Err(e), } } +} + +extern fn on_exit(handle: *uvll::uv_process_t, + exit_status: i64, + term_signal: libc::c_int) { + let p: &mut Process = unsafe { UvHandle::from_uv_handle(&handle) }; - /// Returns the process id of a spawned process - pub fn pid(&self) -> libc::pid_t { - unsafe { uvll::process_pid(**self) as libc::pid_t } + assert!(p.exit_status.is_none()); + assert!(p.term_signal.is_none()); + p.exit_status = Some(exit_status as int); + p.term_signal = Some(term_signal as int); + + match p.to_wake.take() { + Some(task) => { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task); + } + None => {} } } unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, io: &StdioContainer, - loop_: &Loop) -> Option<~UvPipeStream> { + loop_: &Loop) -> Option { match *io { Ignored => { uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE); @@ -145,11 +140,10 @@ unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, if writable { flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int; } - let pipe = UvUnboundPipe::new(loop_); - let handle = pipe.pipe.as_stream().native_handle(); + let pipe = PipeWatcher::new(loop_, false); uvll::set_stdio_container_flags(dst, flags); - uvll::set_stdio_container_stream(dst, handle); - Some(~UvPipeStream::new(pipe)) + uvll::set_stdio_container_stream(dst, pipe.handle()); + Some(pipe) } } } @@ -192,11 +186,52 @@ fn with_env(env: Option<&[(~str, ~str)]>, f: &fn(**libc::c_char) -> T) -> T { c_envp.as_imm_buf(|buf, _| f(buf)) } -impl NativeHandle<*uvll::uv_process_t> for Process { - fn from_native_handle(handle: *uvll::uv_process_t) -> Process { - Process(handle) +impl HomingIO for Process { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for Process { + fn uv_handle(&self) -> *uvll::uv_process_t { self.handle } +} + +impl RtioProcess for Process { + fn id(&self) -> libc::pid_t { + unsafe { uvll::process_pid(self.handle) as libc::pid_t } } - fn native_handle(&self) -> *uvll::uv_process_t { - match self { &Process(ptr) => ptr } + + fn kill(&mut self, signal: int) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + match unsafe { + uvll::uv_process_kill(self.handle, signal as libc::c_int) + } { + 0 => Ok(()), + err => Err(uv_error_to_io_error(UvError(err))) + } + } + + fn wait(&mut self) -> int { + // Make sure (on the home scheduler) that we have an exit status listed + let _m = self.fire_homing_missile(); + match self.exit_status { + Some(*) => {} + None => { + // If there's no exit code previously listed, then the + // process's exit callback has yet to be invoked. We just + // need to deschedule ourselves and wait to be reawoken. + wait_until_woken_after(&mut self.to_wake, || {}); + assert!(self.exit_status.is_some()); + } + } + + // FIXME(#10109): this is wrong + self.exit_status.unwrap() + } +} + +impl Drop for Process { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + assert!(self.to_wake.is_none()); + self.close(); } } diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index 3fcf449959dba..da2e1d8837c45 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -8,65 +8,93 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cast; use std::libc::c_int; use std::rt::io::signal::Signum; +use std::rt::sched::{SchedHandle, Scheduler}; +use std::comm::{SharedChan, SendDeferred}; +use std::rt::local::Local; +use std::rt::rtio::RtioSignal; -use super::{Loop, NativeHandle, SignalCallback, UvError, Watcher}; +use super::{Loop, UvError, UvHandle}; use uvll; +use uvio::HomingIO; -pub struct SignalWatcher(*uvll::uv_signal_t); +pub struct SignalWatcher { + handle: *uvll::uv_signal_t, + home: SchedHandle, -impl Watcher for SignalWatcher { } + channel: SharedChan, + signal: Signum, +} impl SignalWatcher { - pub fn new(loop_: &mut Loop) -> SignalWatcher { - unsafe { - let handle = uvll::malloc_handle(uvll::UV_SIGNAL); - assert!(handle.is_not_null()); - assert!(0 == uvll::signal_init(loop_.native_handle(), handle)); - let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } - } - - pub fn start(&mut self, signum: Signum, callback: SignalCallback) - -> Result<(), UvError> - { - return unsafe { - match uvll::signal_start(self.native_handle(), signal_cb, - signum as c_int) { - 0 => { - let data = self.get_watcher_data(); - data.signal_cb = Some(callback); - Ok(()) - } - n => Err(UvError(n)), - } + pub fn new(loop_: &mut Loop, signum: Signum, + channel: SharedChan) -> Result<~SignalWatcher, UvError> { + let s = ~SignalWatcher { + handle: UvHandle::alloc(None::, uvll::UV_SIGNAL), + home: get_handle_to_current_scheduler!(), + channel: channel, + signal: signum, }; + assert_eq!(unsafe { + uvll::uv_signal_init(loop_.handle, s.handle) + }, 0); - extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { - let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); - let data = watcher.get_watcher_data(); - let cb = data.signal_cb.get_ref(); - (*cb)(watcher, unsafe { cast::transmute(signum as int) }); + match unsafe { + uvll::uv_signal_start(s.handle, signal_cb, signum as c_int) + } { + 0 => Ok(s.install()), + n => Err(UvError(n)), } - } - pub fn stop(&mut self) { - unsafe { - uvll::signal_stop(self.native_handle()); - } } } -impl NativeHandle<*uvll::uv_signal_t> for SignalWatcher { - fn from_native_handle(handle: *uvll::uv_signal_t) -> SignalWatcher { - SignalWatcher(handle) +extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { + let s: &mut SignalWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + assert_eq!(signum as int, s.signal as int); + s.channel.send_deferred(s.signal); +} + +impl HomingIO for SignalWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for SignalWatcher { + fn uv_handle(&self) -> *uvll::uv_signal_t { self.handle } +} + +impl RtioSignal for SignalWatcher {} + +impl Drop for SignalWatcher { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + self.close_async_(); } +} + +#[cfg(test)] +mod test { + use super::*; + use std::cell::Cell; + use super::super::local_loop; + use std::rt::io::signal; + use std::comm::{SharedChan, stream}; + + #[test] + fn closing_channel_during_drop_doesnt_kill_everything() { + // see issue #10375, relates to timers as well. + let (port, chan) = stream(); + let chan = SharedChan::new(chan); + let _signal = SignalWatcher::new(local_loop(), signal::Interrupt, + chan); + + let port = Cell::new(port); + do spawn { + port.take().try_recv(); + } - fn native_handle(&self) -> *uvll::uv_signal_t { - match self { &SignalWatcher(ptr) => ptr } + // when we drop the SignalWatcher we're going to destroy the channel, + // which must wake up the task on the other end } } diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs new file mode 100644 index 0000000000000..08b307700c7cd --- /dev/null +++ b/src/librustuv/stream.rs @@ -0,0 +1,180 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cast; +use std::libc::{c_int, size_t, ssize_t}; +use std::ptr; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; + +use super::{UvError, Buf, slice_to_uv_buf, Request, wait_until_woken_after, + ForbidUnwind}; +use uvll; + +// This is a helper structure which is intended to get embedded into other +// Watcher structures. This structure will retain a handle to the underlying +// uv_stream_t instance, and all I/O operations assume that it's already located +// on the appropriate scheduler. +pub struct StreamWatcher { + handle: *uvll::uv_stream_t, + + // Cache the last used uv_write_t so we don't have to allocate a new one on + // every call to uv_write(). Ideally this would be a stack-allocated + // structure, but currently we don't have mappings for all the structures + // defined in libuv, so we're foced to malloc this. + priv last_write_req: Option, +} + +struct ReadContext { + buf: Option, + result: ssize_t, + task: Option, +} + +struct WriteContext { + result: c_int, + task: Option, +} + +impl StreamWatcher { + // Creates a new helper structure which should be then embedded into another + // watcher. This provides the generic read/write methods on streams. + // + // This structure will *not* close the stream when it is dropped. It is up + // to the enclosure structure to be sure to call the close method (which + // will block the task). Note that this is also required to prevent memory + // leaks. + // + // It should also be noted that the `data` field of the underlying uv handle + // will be manipulated on each of the methods called on this watcher. + // Wrappers should ensure to always reset the field to an appropriate value + // if they rely on the field to perform an action. + pub fn new(stream: *uvll::uv_stream_t) -> StreamWatcher { + StreamWatcher { + handle: stream, + last_write_req: None, + } + } + + pub fn read(&mut self, buf: &mut [u8]) -> Result { + // This read operation needs to get canceled on an unwind via libuv's + // uv_read_stop function + let _f = ForbidUnwind::new("stream read"); + + // Send off the read request, but don't block until we're sure that the + // read request is queued. + match unsafe { + uvll::uv_read_start(self.handle, alloc_cb, read_cb) + } { + 0 => { + let mut rcx = ReadContext { + buf: Some(slice_to_uv_buf(buf)), + result: 0, + task: None, + }; + do wait_until_woken_after(&mut rcx.task) { + unsafe { + uvll::set_data_for_uv_handle(self.handle, &rcx) + } + } + match rcx.result { + n if n < 0 => Err(UvError(n as c_int)), + n => Ok(n as uint), + } + } + n => Err(UvError(n)) + } + } + + pub fn write(&mut self, buf: &[u8]) -> Result<(), UvError> { + // The ownership of the write request is dubious if this function + // unwinds. I believe that if the write_cb fails to re-schedule the task + // then the write request will be leaked. + let _f = ForbidUnwind::new("stream write"); + + // Prepare the write request, either using a cached one or allocating a + // new one + let mut req = match self.last_write_req.take() { + Some(req) => req, None => Request::new(uvll::UV_WRITE), + }; + req.set_data(ptr::null::<()>()); + + // Send off the request, but be careful to not block until we're sure + // that the write reqeust is queued. If the reqeust couldn't be queued, + // then we should return immediately with an error. + match unsafe { + uvll::uv_write(req.handle, self.handle, [slice_to_uv_buf(buf)], + write_cb) + } { + 0 => { + let mut wcx = WriteContext { result: 0, task: None, }; + req.defuse(); // uv callback now owns this request + + do wait_until_woken_after(&mut wcx.task) { + req.set_data(&wcx); + } + self.last_write_req = Some(Request::wrap(req.handle)); + match wcx.result { + 0 => Ok(()), + n => Err(UvError(n)), + } + } + n => Err(UvError(n)), + } + } +} + +// This allocation callback expects to be invoked once and only once. It will +// unwrap the buffer in the ReadContext stored in the stream and return it. This +// will fail if it is called more than once. +extern fn alloc_cb(stream: *uvll::uv_stream_t, _hint: size_t, buf: *mut Buf) { + uvdebug!("alloc_cb"); + unsafe { + let rcx: &mut ReadContext = + cast::transmute(uvll::get_data_for_uv_handle(stream)); + *buf = rcx.buf.take().expect("stream alloc_cb called more than once"); + } +} + +// When a stream has read some data, we will always forcibly stop reading and +// return all the data read (even if it didn't fill the whole buffer). +extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: *Buf) { + uvdebug!("read_cb {}", nread); + assert!(nread != uvll::ECANCELED as ssize_t); + let rcx: &mut ReadContext = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + // Stop reading so that no read callbacks are + // triggered before the user calls `read` again. + // XXX: Is there a performance impact to calling + // stop here? + unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } + rcx.result = nread; + + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(rcx.task.take_unwrap()); +} + +// Unlike reading, the WriteContext is stored in the uv_write_t request. Like +// reading, however, all this does is wake up the blocked task after squirreling +// away the error code as a result. +extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { + let mut req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + // Remember to not free the request because it is re-used between writes on + // the same stream. + let wcx: &mut WriteContext = unsafe { req.get_data() }; + wcx.result = status; + req.defuse(); + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); +} diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 9a693f6a27d35..0176399030517 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -8,150 +8,297 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::comm::{oneshot, stream, PortOne, ChanOne, SendDeferred}; use std::libc::c_int; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::rtio::RtioTimer; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::util; use uvll; -use super::{Watcher, Loop, NativeHandle, TimerCallback, status_to_maybe_uv_error}; +use super::{Loop, UvHandle, ForbidUnwind, ForbidSwitch}; +use uvio::HomingIO; -pub struct TimerWatcher(*uvll::uv_timer_t); -impl Watcher for TimerWatcher { } +pub struct TimerWatcher { + handle: *uvll::uv_timer_t, + home: SchedHandle, + action: Option, +} + +pub enum NextAction { + WakeTask(BlockedTask), + SendOnce(ChanOne<()>), + SendMany(Chan<()>), +} impl TimerWatcher { - pub fn new(loop_: &mut Loop) -> TimerWatcher { - unsafe { - let handle = uvll::malloc_handle(uvll::UV_TIMER); - assert!(handle.is_not_null()); - assert!(0 == uvll::timer_init(loop_.native_handle(), handle)); - let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } + pub fn new(loop_: &mut Loop) -> ~TimerWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_TIMER); + assert_eq!(unsafe { + uvll::uv_timer_init(loop_.handle, handle) + }, 0); + let me = ~TimerWatcher { + handle: handle, + action: None, + home: get_handle_to_current_scheduler!(), + }; + return me.install(); } - pub fn start(&mut self, timeout: u64, repeat: u64, cb: TimerCallback) { - { - let data = self.get_watcher_data(); - data.timer_cb = Some(cb); - } + fn start(&mut self, msecs: u64, period: u64) { + assert_eq!(unsafe { + uvll::uv_timer_start(self.handle, timer_cb, msecs, period) + }, 0) + } - unsafe { - uvll::timer_start(self.native_handle(), timer_cb, timeout, repeat); - } + fn stop(&mut self) { + assert_eq!(unsafe { uvll::uv_timer_stop(self.handle) }, 0) + } +} - extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { - let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); - let data = watcher.get_watcher_data(); - let cb = data.timer_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(watcher, status); +impl HomingIO for TimerWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for TimerWatcher { + fn uv_handle(&self) -> *uvll::uv_timer_t { self.handle } +} + +impl RtioTimer for TimerWatcher { + fn sleep(&mut self, msecs: u64) { + // As with all of the below functions, we must be extra careful when + // destroying the previous action. If the previous action was a channel, + // destroying it could invoke a context switch. For these situtations, + // we must temporarily un-home ourselves, then destroy the action, and + // then re-home again. + let missile = self.fire_homing_missile(); + self.stop(); + let _missile = match util::replace(&mut self.action, None) { + None => missile, // no need to do a homing dance + Some(action) => { + util::ignore(missile); // un-home ourself + util::ignore(action); // destroy the previous action + self.fire_homing_missile() // re-home ourself + } + }; + + // If the descheduling operation unwinds after the timer has been + // started, then we need to call stop on the timer. + let _f = ForbidUnwind::new("timer"); + + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_sched, task| { + self.action = Some(WakeTask(task)); + self.start(msecs, 0); } + self.stop(); } - pub fn stop(&mut self) { - unsafe { - uvll::timer_stop(self.native_handle()); - } + fn oneshot(&mut self, msecs: u64) -> PortOne<()> { + let (port, chan) = oneshot(); + + // similarly to the destructor, we must drop the previous action outside + // of the homing missile + let _prev_action = { + let _m = self.fire_homing_missile(); + self.stop(); + self.start(msecs, 0); + util::replace(&mut self.action, Some(SendOnce(chan))) + }; + + return port; + } + + fn period(&mut self, msecs: u64) -> Port<()> { + let (port, chan) = stream(); + + // similarly to the destructor, we must drop the previous action outside + // of the homing missile + let _prev_action = { + let _m = self.fire_homing_missile(); + self.stop(); + self.start(msecs, msecs); + util::replace(&mut self.action, Some(SendMany(chan))) + }; + + return port; } } -impl NativeHandle<*uvll::uv_timer_t> for TimerWatcher { - fn from_native_handle(handle: *uvll::uv_timer_t) -> TimerWatcher { - TimerWatcher(handle) +extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { + let _f = ForbidSwitch::new("timer callback can't switch"); + assert_eq!(status, 0); + let timer: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + + match timer.action.take_unwrap() { + WakeTask(task) => { + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(task); + } + SendOnce(chan) => chan.send_deferred(()), + SendMany(chan) => { + chan.send_deferred(()); + timer.action = Some(SendMany(chan)); + } } - fn native_handle(&self) -> *uvll::uv_idle_t { - match self { &TimerWatcher(ptr) => ptr } +} + +impl Drop for TimerWatcher { + fn drop(&mut self) { + // note that this drop is a little subtle. Dropping a channel which is + // held internally may invoke some scheduling operations. We can't take + // the channel unless we're on the home scheduler, but once we're on the + // home scheduler we should never move. Hence, we take the timer's + // action item and then move it outside of the homing block. + let _action = { + let _m = self.fire_homing_missile(); + self.stop(); + self.close_async_(); + self.action.take() + }; } } #[cfg(test)] mod test { use super::*; - use Loop; - use std::unstable::run_in_bare_thread; + use std::cell::Cell; + use std::rt::rtio::RtioTimer; + use super::super::local_loop; #[test] - fn smoke_test() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(10, 0) |timer, status| { - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - timer.close(||()); - } - loop_.run(); - loop_.close(); - assert!(count == 1); - } + fn oneshot() { + let mut timer = TimerWatcher::new(local_loop()); + let port = timer.oneshot(1); + port.recv(); + let port = timer.oneshot(1); + port.recv(); } #[test] - fn start_twice() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(10, 0) |timer, status| { - let mut timer = timer; - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - do timer.start(10, 0) |timer, status| { - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - timer.close(||()); - } - } - loop_.run(); - loop_.close(); - assert!(count == 2); + fn override() { + let mut timer = TimerWatcher::new(local_loop()); + let oport = timer.oneshot(1); + let pport = timer.period(1); + timer.sleep(1); + assert_eq!(oport.try_recv(), None); + assert_eq!(pport.try_recv(), None); + timer.oneshot(1).recv(); + } + + #[test] + fn period() { + let mut timer = TimerWatcher::new(local_loop()); + let port = timer.period(1); + port.recv(); + port.recv(); + let port = timer.period(1); + port.recv(); + port.recv(); + } + + #[test] + fn sleep() { + let mut timer = TimerWatcher::new(local_loop()); + timer.sleep(1); + timer.sleep(1); + } + + #[test] #[should_fail] + fn oneshot_fail() { + let mut timer = TimerWatcher::new(local_loop()); + let _port = timer.oneshot(1); + fail!(); + } + + #[test] #[should_fail] + fn period_fail() { + let mut timer = TimerWatcher::new(local_loop()); + let _port = timer.period(1); + fail!(); + } + + #[test] #[should_fail] + fn normal_fail() { + let _timer = TimerWatcher::new(local_loop()); + fail!(); + } + + #[test] + fn closing_channel_during_drop_doesnt_kill_everything() { + // see issue #10375 + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); } + + // when we drop the TimerWatcher we're going to destroy the channel, + // which must wake up the task on the other end } #[test] - fn repeat_stop() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(1, 2) |timer, status| { - assert!(status.is_none()); - unsafe { - *count_ptr += 1; - - if *count_ptr == 10 { - - // Stop the timer and do something else - let mut timer = timer; - timer.stop(); - // Freeze timer so it can be captured - let timer = timer; - - let mut loop_ = timer.event_loop(); - let mut timer2 = TimerWatcher::new(&mut loop_); - do timer2.start(10, 0) |timer2, _| { - - *count_ptr += 1; - - timer2.close(||()); - - // Restart the original timer - let mut timer = timer; - do timer.start(1, 0) |timer, _| { - *count_ptr += 1; - timer.close(||()); - } - } - } - }; - } - loop_.run(); - loop_.close(); - assert!(count == 12); + fn reset_doesnt_switch_tasks() { + // similar test to the one above. + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); } + + timer.oneshot(1); + } + #[test] + fn reset_doesnt_switch_tasks2() { + // similar test to the one above. + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); + } + + timer.sleep(1); + } + + #[test] + fn sender_goes_away_oneshot() { + let port = { + let mut timer = TimerWatcher::new(local_loop()); + timer.oneshot(1000) + }; + assert_eq!(port.try_recv(), None); } + #[test] + fn sender_goes_away_period() { + let port = { + let mut timer = TimerWatcher::new(local_loop()); + timer.period(1000) + }; + assert_eq!(port.try_recv(), None); + } + + #[test] + fn receiver_goes_away_oneshot() { + let mut timer1 = TimerWatcher::new(local_loop()); + timer1.oneshot(1); + let mut timer2 = TimerWatcher::new(local_loop()); + // while sleeping, the prevous timer should fire and not have its + // callback do something terrible. + timer2.sleep(2); + } + + #[test] + fn receiver_goes_away_period() { + let mut timer1 = TimerWatcher::new(local_loop()); + timer1.period(1); + let mut timer2 = TimerWatcher::new(local_loop()); + // while sleeping, the prevous timer should fire and not have its + // callback do something terrible. + timer2.sleep(2); + } } diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 65ba09376c14d..d3f001f39312f 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -9,75 +9,110 @@ // except according to those terms. use std::libc; +use std::rt::io::IoError; +use std::rt::local::Local; +use std::rt::rtio::RtioTTY; +use std::rt::sched::{Scheduler, SchedHandle}; -use super::{Watcher, Loop, NativeHandle, UvError}; -use net; +use stream::StreamWatcher; +use super::{Loop, UvError, UvHandle, uv_error_to_io_error}; +use uvio::HomingIO; use uvll; -/// A process wraps the handle of the underlying uv_process_t. -pub struct TTY(*uvll::uv_tty_t); - -impl Watcher for TTY {} +pub struct TtyWatcher{ + tty: *uvll::uv_tty_t, + stream: StreamWatcher, + home: SchedHandle, + fd: libc::c_int, +} -impl TTY { - #[fixed_stack_segment] #[inline(never)] - pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) -> - Result +impl TtyWatcher { + pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) + -> Result { - let handle = unsafe { uvll::malloc_handle(uvll::UV_TTY) }; - assert!(handle.is_not_null()); + // libuv may succeed in giving us a handle (via uv_tty_init), but if the + // handle isn't actually connected to a terminal there are frequently + // many problems in using it with libuv. To get around this, always + // return a failure if the specified file descriptor isn't actually a + // TTY. + // + // Related: + // - https://github.com/joyent/libuv/issues/982 + // - https://github.com/joyent/libuv/issues/988 + if unsafe { uvll::guess_handle(fd) != uvll::UV_TTY as libc::c_int } { + return Err(UvError(uvll::EBADF)); + } - let ret = unsafe { - uvll::tty_init(loop_.native_handle(), handle, fd as libc::c_int, - readable as libc::c_int) - }; - match ret { + // If this file descriptor is indeed guessed to be a tty, then go ahead + // with attempting to open it as a tty. + let handle = UvHandle::alloc(None::, uvll::UV_TTY); + match unsafe { + uvll::uv_tty_init(loop_.handle, handle, fd as libc::c_int, + readable as libc::c_int) + } { 0 => { - let mut ret: TTY = NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - Ok(ret) + Ok(TtyWatcher { + tty: handle, + stream: StreamWatcher::new(handle), + home: get_handle_to_current_scheduler!(), + fd: fd, + }) } n => { - unsafe { uvll::free_handle(handle); } + unsafe { uvll::free_handle(handle) } Err(UvError(n)) } } } +} + +impl RtioTTY for TtyWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_homing_missile(); + self.stream.read(buf).map_err(uv_error_to_io_error) + } - pub fn as_stream(&self) -> net::StreamWatcher { - net::StreamWatcher(**self as *uvll::uv_stream_t) + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_homing_missile(); + self.stream.write(buf).map_err(uv_error_to_io_error) } - #[fixed_stack_segment] #[inline(never)] - pub fn set_mode(&self, raw: bool) -> Result<(), UvError> { + fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { let raw = raw as libc::c_int; - match unsafe { uvll::tty_set_mode(self.native_handle(), raw) } { + let _m = self.fire_homing_missile(); + match unsafe { uvll::uv_tty_set_mode(self.tty, raw) } { 0 => Ok(()), - n => Err(UvError(n)) + n => Err(uv_error_to_io_error(UvError(n))) } } - #[fixed_stack_segment] #[inline(never)] #[allow(unused_mut)] - pub fn get_winsize(&self) -> Result<(int, int), UvError> { + #[allow(unused_mut)] + fn get_winsize(&mut self) -> Result<(int, int), IoError> { let mut width: libc::c_int = 0; let mut height: libc::c_int = 0; let widthptr: *libc::c_int = &width; let heightptr: *libc::c_int = &width; - match unsafe { uvll::tty_get_winsize(self.native_handle(), - widthptr, heightptr) } { + let _m = self.fire_homing_missile(); + match unsafe { uvll::uv_tty_get_winsize(self.tty, + widthptr, heightptr) } { 0 => Ok((width as int, height as int)), - n => Err(UvError(n)) + n => Err(uv_error_to_io_error(UvError(n))) } } } -impl NativeHandle<*uvll::uv_tty_t> for TTY { - fn from_native_handle(handle: *uvll::uv_tty_t) -> TTY { - TTY(handle) - } - fn native_handle(&self) -> *uvll::uv_tty_t { - match self { &TTY(ptr) => ptr } - } +impl UvHandle for TtyWatcher { + fn uv_handle(&self) -> *uvll::uv_tty_t { self.tty } +} + +impl HomingIO for TtyWatcher { + fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } } +impl Drop for TtyWatcher { + fn drop(&mut self) { + let _m = self.fire_homing_missile(); + self.close_async_(); + } +} diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 3f119bc8ccbf0..75ec5f26b336c 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -9,47 +9,32 @@ // except according to those terms. use std::c_str::CString; -use std::cast::transmute; -use std::cast; -use std::cell::Cell; -use std::comm::{SendDeferred, SharedChan, Port, PortOne, GenericChan}; +use std::comm::SharedChan; +use std::libc::c_int; use std::libc; -use std::libc::{c_int, c_uint, c_void, pid_t}; -use std::ptr; -use std::str; -use std::rt::io; +use std::path::Path; use std::rt::io::IoError; -use std::rt::io::net::ip::{SocketAddr, IpAddr}; -use std::rt::io::{standard_error, OtherIoError, SeekStyle, SeekSet, SeekCur, - SeekEnd}; +use std::rt::io::net::ip::SocketAddr; use std::rt::io::process::ProcessConfig; -use std::rt::BlockedTask; +use std::rt::io; use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; -use std::rt::tube::Tube; use std::rt::task::Task; -use std::unstable::sync::Exclusive; -use std::libc::{lseek, off_t}; -use std::rt::io::{FileMode, FileAccess, FileStat}; +use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, + S_IRUSR, S_IWUSR}; +use std::rt::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, + ReadWrite, FileStat}; use std::rt::io::signal::Signum; -use std::task; +use std::util; use ai = std::rt::io::net::addrinfo; #[cfg(test)] use std::unstable::run_in_bare_thread; -#[cfg(test)] use std::rt::test::{spawntask, - next_test_ip4, - run_in_mt_newsched_task}; -#[cfg(test)] use std::rt::comm::oneshot; use super::*; -use idle::IdleWatcher; -use net::{UvIpv4SocketAddr, UvIpv6SocketAddr}; -use addrinfo::{GetAddrInfoRequest, accum_addrinfo}; +use addrinfo::GetAddrInfoRequest; -// XXX we should not be calling uvll functions in here. - -trait HomingIO { +pub trait HomingIO { fn home<'r>(&'r mut self) -> &'r mut SchedHandle; @@ -59,6 +44,13 @@ trait HomingIO { fn go_to_IO_home(&mut self) -> uint { use std::rt::sched::RunOnce; + unsafe { + let task: *mut Task = Local::unsafe_borrow(); + (*task).death.inhibit_kill((*task).unwinder.unwinding); + } + + let _f = ForbidUnwind::new("going home"); + let current_sched_id = do Local::borrow |sched: &mut Scheduler| { sched.sched_id() }; @@ -66,116 +58,81 @@ trait HomingIO { // Only need to invoke a context switch if we're not on the right // scheduler. if current_sched_id != self.home().sched_id { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - /* FIXME(#8674) if the task was already killed then wake - * will return None. In that case, the home pointer will - * never be set. - * - * RESOLUTION IDEA: Since the task is dead, we should - * just abort the IO action. - */ - do task.wake().map |task| { - self.home().send(RunOnce(task)); - }; - } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + do task.wake().map |task| { + self.home().send(RunOnce(task)); + }; } } + let current_sched_id = do Local::borrow |sched: &mut Scheduler| { + sched.sched_id() + }; + assert!(current_sched_id == self.home().sched_id); self.home().sched_id } - // XXX: dummy self parameter - fn restore_original_home(_: Option, io_home: uint) { - // It would truly be a sad day if we had moved off the home I/O - // scheduler while we were doing I/O. - assert_eq!(Local::borrow(|sched: &mut Scheduler| sched.sched_id()), - io_home); - - // If we were a homed task, then we must send ourselves back to the - // original scheduler. Otherwise, we can just return and keep running - if !Task::on_appropriate_sched() { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - do task.wake().map |task| { - Scheduler::run_task(task); - }; - } - } - } - } - - fn home_for_io(&mut self, io: &fn(&mut Self) -> A) -> A { - let home = self.go_to_IO_home(); - let a = io(self); // do IO - HomingIO::restore_original_home(None::, home); - a // return the result of the IO + /// Fires a single homing missile, returning another missile targeted back + /// at the original home of this task. In other words, this function will + /// move the local task to its I/O scheduler and then return an RAII wrapper + /// which will return the task home. + fn fire_homing_missile(&mut self) -> HomingMissile { + HomingMissile { io_home: self.go_to_IO_home() } } - fn home_for_io_consume(mut self, io: &fn(Self) -> A) -> A { - let home = self.go_to_IO_home(); - let a = io(self); // do IO - HomingIO::restore_original_home(None::, home); - a // return the result of the IO - } + /// Same as `fire_homing_missile`, but returns the local I/O scheduler as + /// well (the one that was homed to). + fn fire_homing_missile_sched(&mut self) -> (HomingMissile, ~Scheduler) { + // First, transplant ourselves to the home I/O scheduler + let missile = self.fire_homing_missile(); + // Next (must happen next), grab the local I/O scheduler + let io_sched: ~Scheduler = Local::take(); - fn home_for_io_with_sched(&mut self, io_sched: &fn(&mut Self, ~Scheduler) -> A) -> A { - let home = self.go_to_IO_home(); - let a = do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - io_sched(self, scheduler) // do IO and scheduling action - }; - HomingIO::restore_original_home(None::, home); - a // return result of IO + (missile, io_sched) } } -// get a handle for the current scheduler -macro_rules! get_handle_to_current_scheduler( - () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() }) -) - -enum SocketNameKind { - TcpPeer, - Tcp, - Udp +/// After a homing operation has been completed, this will return the current +/// task back to its appropriate home (if applicable). The field is used to +/// assert that we are where we think we are. +struct HomingMissile { + priv io_home: uint, } -fn socket_name>(sk: SocketNameKind, - handle: U) -> Result { - let getsockname = match sk { - TcpPeer => uvll::tcp_getpeername, - Tcp => uvll::tcp_getsockname, - Udp => uvll::udp_getsockname, - }; - - // Allocate a sockaddr_storage - // since we don't know if it's ipv4 or ipv6 - let r_addr = unsafe { uvll::malloc_sockaddr_storage() }; +impl HomingMissile { + pub fn check(&self, msg: &'static str) { + let local_id = Local::borrow(|sched: &mut Scheduler| sched.sched_id()); + assert!(local_id == self.io_home, "{}", msg); + } +} - let r = unsafe { - getsockname(handle.native_handle() as *c_void, r_addr as *uvll::sockaddr_storage) - }; +impl Drop for HomingMissile { + fn drop(&mut self) { + let f = ForbidUnwind::new("leaving home"); - if r != 0 { - let status = status_to_maybe_uv_error(r); - return Err(uv_error_to_io_error(status.unwrap())); - } + // It would truly be a sad day if we had moved off the home I/O + // scheduler while we were doing I/O. + self.check("task moved away from the home scheduler"); - let addr = unsafe { - if uvll::is_ip6_addr(r_addr as *uvll::sockaddr) { - net::uv_socket_addr_to_socket_addr(UvIpv6SocketAddr(r_addr as *uvll::sockaddr_in6)) - } else { - net::uv_socket_addr_to_socket_addr(UvIpv4SocketAddr(r_addr as *uvll::sockaddr_in)) + // If we were a homed task, then we must send ourselves back to the + // original scheduler. Otherwise, we can just return and keep running + if !Task::on_appropriate_sched() { + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + do task.wake().map |task| { + Scheduler::run_task(task); + }; + } } - }; - - unsafe { uvll::free_sockaddr_storage(r_addr); } - Ok(addr) + util::ignore(f); + unsafe { + let task: *mut Task = Local::unsafe_borrow(); + (*task).death.allow_kill((*task).unwinder.unwinding); + } + } } // Obviously an Event Loop is always home. @@ -202,27 +159,16 @@ impl EventLoop for UvEventLoop { self.uvio.uv_loop().run(); } - fn callback(&mut self, f: ~fn()) { - let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - do idle_watcher.start |mut idle_watcher, status| { - assert!(status.is_none()); - idle_watcher.stop(); - idle_watcher.close(||()); - f(); - } + fn callback(&mut self, f: proc()) { + IdleWatcher::onetime(self.uvio.uv_loop(), f); } - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { - let idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - ~UvPausibleIdleCallback { - watcher: idle_watcher, - idle_flag: false, - closed: false - } as ~PausibleIdleCallback + fn pausible_idle_callback(&mut self, cb: ~Callback) -> ~PausibleIdleCallback { + IdleWatcher::new(self.uvio.uv_loop(), cb) as ~PausibleIdleCallback } - fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback { - ~UvRemoteCallback::new(self.uvio.uv_loop(), f) as ~RemoteCallback + fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { + ~AsyncWatcher::new(self.uvio.uv_loop(), f) as ~RemoteCallback } fn io<'a>(&'a mut self, f: &fn(&'a mut IoFactory)) { @@ -236,44 +182,6 @@ pub extern "C" fn new_loop() -> ~EventLoop { ~UvEventLoop::new() as ~EventLoop } -pub struct UvPausibleIdleCallback { - priv watcher: IdleWatcher, - priv idle_flag: bool, - priv closed: bool -} - -impl PausibleIdleCallback for UvPausibleIdleCallback { - #[inline] - fn start(&mut self, f: ~fn()) { - do self.watcher.start |_idle_watcher, _status| { - f(); - }; - self.idle_flag = true; - } - #[inline] - fn pause(&mut self) { - if self.idle_flag == true { - self.watcher.stop(); - self.idle_flag = false; - } - } - #[inline] - fn resume(&mut self) { - if self.idle_flag == false { - self.watcher.restart(); - self.idle_flag = true; - } - } - #[inline] - fn close(&mut self) { - self.pause(); - if !self.closed { - self.closed = true; - self.watcher.close(||{}); - } - } -} - #[test] fn test_callback_run_once() { do run_in_bare_thread { @@ -288,119 +196,6 @@ fn test_callback_run_once() { } } -// The entire point of async is to call into a loop from other threads so it does not need to home. -pub struct UvRemoteCallback { - // The uv async handle for triggering the callback - priv async: AsyncWatcher, - // A flag to tell the callback to exit, set from the dtor. This is - // almost never contested - only in rare races with the dtor. - priv exit_flag: Exclusive -} - -impl UvRemoteCallback { - pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback { - let exit_flag = Exclusive::new(false); - let exit_flag_clone = exit_flag.clone(); - let async = do AsyncWatcher::new(loop_) |watcher, status| { - assert!(status.is_none()); - - // The synchronization logic here is subtle. To review, - // the uv async handle type promises that, after it is - // triggered the remote callback is definitely called at - // least once. UvRemoteCallback needs to maintain those - // semantics while also shutting down cleanly from the - // dtor. In our case that means that, when the - // UvRemoteCallback dtor calls `async.send()`, here `f` is - // always called later. - - // In the dtor both the exit flag is set and the async - // callback fired under a lock. Here, before calling `f`, - // we take the lock and check the flag. Because we are - // checking the flag before calling `f`, and the flag is - // set under the same lock as the send, then if the flag - // is set then we're guaranteed to call `f` after the - // final send. - - // If the check was done after `f()` then there would be a - // period between that call and the check where the dtor - // could be called in the other thread, missing the final - // callback while still destroying the handle. - - let should_exit = unsafe { - exit_flag_clone.with_imm(|&should_exit| should_exit) - }; - - f(); - - if should_exit { - watcher.close(||()); - } - - }; - UvRemoteCallback { - async: async, - exit_flag: exit_flag - } - } -} - -impl RemoteCallback for UvRemoteCallback { - fn fire(&mut self) { self.async.send() } -} - -impl Drop for UvRemoteCallback { - fn drop(&mut self) { - unsafe { - let this: &mut UvRemoteCallback = cast::transmute_mut(self); - do this.exit_flag.with |should_exit| { - // NB: These two things need to happen atomically. Otherwise - // the event handler could wake up due to a *previous* - // signal and see the exit flag, destroying the handle - // before the final send. - *should_exit = true; - this.async.send(); - } - } - } -} - -#[cfg(test)] -mod test_remote { - use std::cell::Cell; - use std::rt::test::*; - use std::rt::thread::Thread; - use std::rt::tube::Tube; - use std::rt::rtio::EventLoop; - use std::rt::local::Local; - use std::rt::sched::Scheduler; - - #[test] - fn test_uv_remote() { - do run_in_mt_newsched_task { - let mut tube = Tube::new(); - let tube_clone = tube.clone(); - let remote_cell = Cell::new_empty(); - do Local::borrow |sched: &mut Scheduler| { - let tube_clone = tube_clone.clone(); - let tube_clone_cell = Cell::new(tube_clone); - let remote = do sched.event_loop.remote_callback { - // This could be called multiple times - if !tube_clone_cell.is_empty() { - tube_clone_cell.take().send(1); - } - }; - remote_cell.put_back(remote); - } - let thread = do Thread::start { - remote_cell.take().fire(); - }; - - assert!(tube.recv() == 1); - thread.join(); - } - } -} - pub struct UvIoFactory(Loop); impl UvIoFactory { @@ -409,219 +204,47 @@ impl UvIoFactory { } } -/// Helper for a variety of simple uv_fs_* functions that have no ret val. This -/// function takes the loop that it will act on, and then invokes the specified -/// callback in a situation where the task wil be immediately blocked -/// afterwards. The `FsCallback` yielded must be invoked to reschedule the task -/// (once the result of the operation is known). -fn uv_fs_helper(loop_: &mut Loop, - retfn: extern "Rust" fn(&mut FsRequest) -> T, - cb: &fn(&mut FsRequest, &mut Loop, FsCallback)) - -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let mut new_req = FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do cb(&mut new_req, loop_) |req, err| { - let res = match err { - None => Ok(retfn(req)), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - }; - } - } - assert!(!result_cell.is_empty()); - return result_cell.take(); -} - -fn unit(_: &mut FsRequest) {} - -fn fs_mkstat(f: &mut FsRequest) -> FileStat { - let path = unsafe { Path::new(CString::new(f.get_path(), false)) }; - let stat = f.get_stat(); - fn to_msec(stat: uvll::uv_timespec_t) -> u64 { - (stat.tv_sec * 1000 + stat.tv_nsec / 1000000) as u64 - } - let kind = match (stat.st_mode as c_int) & libc::S_IFMT { - libc::S_IFREG => io::TypeFile, - libc::S_IFDIR => io::TypeDirectory, - libc::S_IFIFO => io::TypeNamedPipe, - libc::S_IFBLK => io::TypeBlockSpecial, - libc::S_IFLNK => io::TypeSymlink, - _ => io::TypeUnknown, - }; - FileStat { - path: path, - size: stat.st_size as u64, - kind: kind, - perm: (stat.st_mode as io::FilePermission) & io::AllPermissions, - created: to_msec(stat.st_birthtim), - modified: to_msec(stat.st_mtim), - accessed: to_msec(stat.st_atim), - unstable: io::UnstableFileStat { - device: stat.st_dev as u64, - inode: stat.st_ino as u64, - rdev: stat.st_rdev as u64, - nlink: stat.st_nlink as u64, - uid: stat.st_uid as u64, - gid: stat.st_gid as u64, - blksize: stat.st_blksize as u64, - blocks: stat.st_blocks as u64, - flags: stat.st_flags as u64, - gen: stat.st_gen as u64, - } - } -} - impl IoFactory for UvIoFactory { // Connect to an address and return a new stream // NB: This blocks the task waiting on the connection. // It would probably be better to return a future - fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError> { - // Create a cell in the task to hold the result. We will fill - // the cell before resuming the task. - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - - // Block this task and take ownership, switch to scheduler context - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - - let mut tcp = TcpWatcher::new(self.uv_loop()); - let task_cell = Cell::new(task); - - // Wait for a connection - do tcp.connect(addr) |stream, status| { - match status { - None => { - let tcp = NativeHandle::from_native_handle(stream.native_handle()); - let home = get_handle_to_current_scheduler!(); - let res = Ok(~UvTcpStream { watcher: tcp, home: home } - as ~RtioTcpStream); - - // Store the stream in the task's stack - unsafe { (*result_cell_ptr).put_back(res); } - - // Context switch - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - Some(_) => { - let task_cell = Cell::new(task_cell.take()); - do stream.close { - let res = Err(uv_error_to_io_error(status.unwrap())); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - } + fn tcp_connect(&mut self, addr: SocketAddr) + -> Result<~RtioTcpStream, IoError> + { + match TcpWatcher::connect(self.uv_loop(), addr) { + Ok(t) => Ok(~t as ~RtioTcpStream), + Err(e) => Err(uv_error_to_io_error(e)), } - - assert!(!result_cell.is_empty()); - return result_cell.take(); } fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError> { - let mut watcher = TcpWatcher::new(self.uv_loop()); - match watcher.bind(addr) { - Ok(_) => { - let home = get_handle_to_current_scheduler!(); - Ok(~UvTcpListener::new(watcher, home) as ~RtioTcpListener) - } - Err(uverr) => { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - Err(uv_error_to_io_error(uverr)) - } - } + match TcpListener::bind(self.uv_loop(), addr) { + Ok(t) => Ok(t as ~RtioTcpListener), + Err(e) => Err(uv_error_to_io_error(e)), } } fn udp_bind(&mut self, addr: SocketAddr) -> Result<~RtioUdpSocket, IoError> { - let mut watcher = UdpWatcher::new(self.uv_loop()); - match watcher.bind(addr) { - Ok(_) => { - let home = get_handle_to_current_scheduler!(); - Ok(~UvUdpSocket { watcher: watcher, home: home } as ~RtioUdpSocket) - } - Err(uverr) => { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - Err(uv_error_to_io_error(uverr)) - } - } + match UdpWatcher::bind(self.uv_loop(), addr) { + Ok(u) => Ok(~u as ~RtioUdpSocket), + Err(e) => Err(uv_error_to_io_error(e)), } } fn timer_init(&mut self) -> Result<~RtioTimer, IoError> { - let watcher = TimerWatcher::new(self.uv_loop()); - let home = get_handle_to_current_scheduler!(); - Ok(~UvTimer::new(watcher, home) as ~RtioTimer) + Ok(TimerWatcher::new(self.uv_loop()) as ~RtioTimer) } fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, hint: Option) -> Result<~[ai::Info], IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let host_ptr: *Option<&str> = &host; - let servname_ptr: *Option<&str> = &servname; - let hint_ptr: *Option = &hint; - let addrinfo_req = GetAddrInfoRequest::new(); - let addrinfo_req_cell = Cell::new(addrinfo_req); - - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let mut addrinfo_req = addrinfo_req_cell.take(); - unsafe { - do addrinfo_req.getaddrinfo(self.uv_loop(), - *host_ptr, *servname_ptr, - *hint_ptr) |_, addrinfo, err| { - let res = match err { - None => Ok(accum_addrinfo(addrinfo)), - Some(err) => Err(uv_error_to_io_error(err)) - }; - (*result_cell_ptr).put_back(res); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - addrinfo_req.delete(); - assert!(!result_cell.is_empty()); - return result_cell.take(); + let r = GetAddrInfoRequest::run(self.uv_loop(), host, servname, hint); + r.map_err(uv_error_to_io_error) } - fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior) -> ~RtioFileStream { - let loop_ = Loop {handle: self.uv_loop().native_handle()}; - let home = get_handle_to_current_scheduler!(); - ~UvFileStream::new(loop_, fd, close, home) as ~RtioFileStream + fn fs_from_raw_fd(&mut self, fd: c_int, + close: CloseBehavior) -> ~RtioFileStream { + let loop_ = Loop::wrap(self.uv_loop().handle); + ~FileWatcher::new(loop_, fd, close) as ~RtioFileStream } fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess) @@ -639,1918 +262,119 @@ impl IoFactory for UvIoFactory { io::ReadWrite => (flags | libc::O_RDWR | libc::O_CREAT, libc::S_IRUSR | libc::S_IWUSR), }; - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let open_req = file::FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do open_req.open(self.uv_loop(), path, flags as int, mode as int) - |req,err| { - if err.is_none() { - let loop_ = Loop {handle: req.get_loop().native_handle()}; - let home = get_handle_to_current_scheduler!(); - let fd = req.get_result() as c_int; - let fs = ~UvFileStream::new( - loop_, fd, CloseSynchronously, home) as ~RtioFileStream; - let res = Ok(fs); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } else { - let res = Err(uv_error_to_io_error(err.unwrap())); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - }; - }; - }; - assert!(!result_cell.is_empty()); - return result_cell.take(); + + match FsRequest::open(self.uv_loop(), path, flags as int, mode as int) { + Ok(fs) => Ok(~fs as ~RtioFileStream), + Err(e) => Err(uv_error_to_io_error(e)) + } } fn fs_unlink(&mut self, path: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.unlink(l, path, cb) - } + let r = FsRequest::unlink(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_lstat(&mut self, path: &CString) -> Result { - do uv_fs_helper(self.uv_loop(), fs_mkstat) |req, l, cb| { - req.lstat(l, path, cb) - } + let r = FsRequest::lstat(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_stat(&mut self, path: &CString) -> Result { - do uv_fs_helper(self.uv_loop(), fs_mkstat) |req, l, cb| { - req.stat(l, path, cb) - } + let r = FsRequest::stat(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_mkdir(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.mkdir(l, path, perm as c_int, cb) - } + let r = FsRequest::mkdir(self.uv_loop(), path, perm as c_int); + r.map_err(uv_error_to_io_error) } fn fs_rmdir(&mut self, path: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.rmdir(l, path, cb) - } + let r = FsRequest::rmdir(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_rename(&mut self, path: &CString, to: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.rename(l, path, to, cb) - } + let r = FsRequest::rename(self.uv_loop(), path, to); + r.map_err(uv_error_to_io_error) } fn fs_chmod(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.chmod(l, path, perm as c_int, cb) - } + let r = FsRequest::chmod(self.uv_loop(), path, perm as c_int); + r.map_err(uv_error_to_io_error) } - fn fs_readdir(&mut self, path: &CString, flags: c_int) -> - Result<~[Path], IoError> { - use str::StrSlice; - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let path_cell = Cell::new(path); - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let stat_req = file::FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let path = path_cell.take(); - // Don't pick up the null byte - let slice = path.as_bytes().slice(0, path.len()); - let path_parent = Cell::new(Path::new(slice)); - do stat_req.readdir(self.uv_loop(), path, flags) |req,err| { - let parent = path_parent.take(); - let res = match err { - None => { - let mut paths = ~[]; - do req.each_path |rel_path| { - let p = rel_path.as_bytes(); - paths.push(parent.join(p.slice_to(rel_path.len()))); - } - Ok(paths) - }, - Some(e) => { - Err(uv_error_to_io_error(e)) - } - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - }; - }; - }; - assert!(!result_cell.is_empty()); - return result_cell.take(); + fn fs_readdir(&mut self, path: &CString, flags: c_int) + -> Result<~[Path], IoError> + { + let r = FsRequest::readdir(self.uv_loop(), path, flags); + r.map_err(uv_error_to_io_error) } fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.link(l, src, dst, cb) - } + let r = FsRequest::link(self.uv_loop(), src, dst); + r.map_err(uv_error_to_io_error) } fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.symlink(l, src, dst, cb) - } + let r = FsRequest::symlink(self.uv_loop(), src, dst); + r.map_err(uv_error_to_io_error) } fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.chown(l, path, uid, gid, cb) - } + let r = FsRequest::chown(self.uv_loop(), path, uid, gid); + r.map_err(uv_error_to_io_error) } fn fs_readlink(&mut self, path: &CString) -> Result { - fn getlink(f: &mut FsRequest) -> Path { - Path::new(unsafe { CString::new(f.get_ptr() as *libc::c_char, false) }) - } - do uv_fs_helper(self.uv_loop(), getlink) |req, l, cb| { - req.readlink(l, path, cb) - } + let r = FsRequest::readlink(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) + } + fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64) + -> Result<(), IoError> + { + let r = FsRequest::utime(self.uv_loop(), path, atime, mtime); + r.map_err(uv_error_to_io_error) } fn spawn(&mut self, config: ProcessConfig) -> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError> { - // Sadly, we must create the UvProcess before we actually call uv_spawn - // so that the exit_cb can close over it and notify it when the process - // has exited. - let mut ret = ~UvProcess { - process: Process::new(), - home: None, - exit_status: None, - term_signal: None, - exit_error: None, - descheduled: None, - }; - let ret_ptr = unsafe { - *cast::transmute::<&~UvProcess, &*mut UvProcess>(&ret) - }; - - // The purpose of this exit callback is to record the data about the - // exit and then wake up the task which may be waiting for the process - // to exit. This is all performed in the current io-loop, and the - // implementation of UvProcess ensures that reading these fields always - // occurs on the current io-loop. - let exit_cb: ExitCallback = |_, exit_status, term_signal, error| { - unsafe { - assert!((*ret_ptr).exit_status.is_none()); - (*ret_ptr).exit_status = Some(exit_status); - (*ret_ptr).term_signal = Some(term_signal); - (*ret_ptr).exit_error = error; - match (*ret_ptr).descheduled.take() { - Some(task) => { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task); - } - None => {} - } - } - }; - - match ret.process.spawn(self.uv_loop(), config, exit_cb) { - Ok(io) => { - // Only now do we actually get a handle to this scheduler. - ret.home = Some(get_handle_to_current_scheduler!()); - Ok((ret as ~RtioProcess, - io.move_iter().map(|p| p.map(|p| p as ~RtioPipe)).collect())) - } - Err(uverr) => { - // We still need to close the process handle we created, but - // that's taken care for us in the destructor of UvProcess - Err(uv_error_to_io_error(uverr)) + match Process::spawn(self.uv_loop(), config) { + Ok((p, io)) => { + Ok((p as ~RtioProcess, + io.move_iter().map(|i| i.map(|p| ~p as ~RtioPipe)).collect())) } + Err(e) => Err(uv_error_to_io_error(e)), } } - fn unix_bind(&mut self, path: &CString) -> - Result<~RtioUnixListener, IoError> { - let mut pipe = UvUnboundPipe::new(self.uv_loop()); - match pipe.pipe.bind(path) { - Ok(()) => Ok(~UvUnixListener::new(pipe) as ~RtioUnixListener), + fn unix_bind(&mut self, path: &CString) -> Result<~RtioUnixListener, IoError> + { + match PipeListener::bind(self.uv_loop(), path) { + Ok(p) => Ok(p as ~RtioUnixListener), Err(e) => Err(uv_error_to_io_error(e)), } } fn unix_connect(&mut self, path: &CString) -> Result<~RtioPipe, IoError> { - let pipe = UvUnboundPipe::new(self.uv_loop()); - let mut rawpipe = pipe.pipe; - - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let pipe_cell = Cell::new(pipe); - let pipe_cell_ptr: *Cell = &pipe_cell; - - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do rawpipe.connect(path) |_stream, err| { - let res = match err { - None => { - let pipe = unsafe { (*pipe_cell_ptr).take() }; - Ok(~UvPipeStream::new(pipe) as ~RtioPipe) - } - Some(e) => Err(uv_error_to_io_error(e)), - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + match PipeWatcher::connect(self.uv_loop(), path) { + Ok(p) => Ok(~p as ~RtioPipe), + Err(e) => Err(uv_error_to_io_error(e)), } - - assert!(!result_cell.is_empty()); - return result_cell.take(); } fn tty_open(&mut self, fd: c_int, readable: bool) -> Result<~RtioTTY, IoError> { - match tty::TTY::new(self.uv_loop(), fd, readable) { - Ok(tty) => Ok(~UvTTY { - home: get_handle_to_current_scheduler!(), - tty: tty, - fd: fd, - } as ~RtioTTY), + match TtyWatcher::new(self.uv_loop(), fd, readable) { + Ok(tty) => Ok(~tty as ~RtioTTY), Err(e) => Err(uv_error_to_io_error(e)) } } fn pipe_open(&mut self, fd: c_int) -> Result<~RtioPipe, IoError> { - let mut pipe = UvUnboundPipe::new(self.uv_loop()); - match pipe.pipe.open(fd) { - Ok(()) => Ok(~UvPipeStream::new(pipe) as ~RtioPipe), + match PipeWatcher::open(self.uv_loop(), fd) { + Ok(s) => Ok(~s as ~RtioPipe), Err(e) => Err(uv_error_to_io_error(e)) } } fn signal(&mut self, signum: Signum, channel: SharedChan) -> Result<~RtioSignal, IoError> { - let watcher = SignalWatcher::new(self.uv_loop()); - let home = get_handle_to_current_scheduler!(); - let mut signal = ~UvSignal::new(watcher, home); - match signal.watcher.start(signum, |_, _| channel.send_deferred(signum)) { - Ok(()) => Ok(signal as ~RtioSignal), + match SignalWatcher::new(self.uv_loop(), signum, channel) { + Ok(s) => Ok(s as ~RtioSignal), Err(e) => Err(uv_error_to_io_error(e)), } } } - -pub struct UvTcpListener { - priv watcher : TcpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTcpListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvTcpListener { - fn new(watcher: TcpWatcher, home: SchedHandle) -> UvTcpListener { - UvTcpListener { watcher: watcher, home: home } - } -} - -impl Drop for UvTcpListener { - fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - do self_.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - } - } -} - -impl RtioSocket for UvTcpListener { - fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.watcher) - } - } -} - -impl RtioTcpListener for UvTcpListener { - fn listen(~self) -> Result<~RtioTcpAcceptor, IoError> { - do self.home_for_io_consume |self_| { - let acceptor = ~UvTcpAcceptor::new(self_); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.watcher.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(_) => Err(standard_error(OtherIoError)), - None => { - let inc = TcpWatcher::new(&server.event_loop()); - // first accept call in the callback guarenteed to succeed - server.accept(inc.as_stream()); - let home = get_handle_to_current_scheduler!(); - Ok(~UvTcpStream { watcher: inc, home: home } - as ~RtioTcpStream) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioTcpAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), - } - } - } -} - -pub struct UvTcpAcceptor { - priv listener: UvTcpListener, - priv incoming: Tube>, -} - -impl HomingIO for UvTcpAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } -} - -impl UvTcpAcceptor { - fn new(listener: UvTcpListener) -> UvTcpAcceptor { - UvTcpAcceptor { listener: listener, incoming: Tube::new() } - } -} - -impl RtioSocket for UvTcpAcceptor { - fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.listener.watcher) - } - } -} - -fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { - let r = unsafe { - uvll::tcp_simultaneous_accepts(stream.native_handle(), a as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } -} - -impl RtioTcpAcceptor for UvTcpAcceptor { - fn accept(&mut self) -> Result<~RtioTcpStream, IoError> { - do self.home_for_io |self_| { - self_.incoming.recv() - } - } - - fn accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.watcher.as_stream(), 1) - } - } - - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.watcher.as_stream(), 0) - } - } -} - -fn read_stream(mut watcher: StreamWatcher, - scheduler: ~Scheduler, - buf: &mut [u8]) -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - - let uv_buf = slice_to_uv_buf(buf); - do scheduler.deschedule_running_task_and_then |_sched, task| { - let task_cell = Cell::new(task); - // XXX: We shouldn't reallocate these callbacks every - // call to read - let alloc: AllocCallback = |_| uv_buf; - do watcher.read_start(alloc) |mut watcher, nread, _buf, status| { - - // Stop reading so that no read callbacks are - // triggered before the user calls `read` again. - // XXX: Is there a performance impact to calling - // stop here? - watcher.read_stop(); - - let result = if status.is_none() { - assert!(nread >= 0); - Ok(nread as uint) - } else { - Err(uv_error_to_io_error(status.unwrap())) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() -} - -fn write_stream(mut watcher: StreamWatcher, - scheduler: ~Scheduler, - buf: &[u8]) -> Result<(), IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.write(buf) |_watcher, status| { - let result = if status.is_none() { - Ok(()) - } else { - Err(uv_error_to_io_error(status.unwrap())) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() -} - -pub struct UvUnboundPipe { - pipe: Pipe, - priv home: SchedHandle, -} - -impl UvUnboundPipe { - /// Creates a new unbound pipe homed to the current scheduler, placed on the - /// specified event loop - pub fn new(loop_: &Loop) -> UvUnboundPipe { - UvUnboundPipe { - pipe: Pipe::new(loop_, false), - home: get_handle_to_current_scheduler!(), - } - } -} - -impl HomingIO for UvUnboundPipe { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvUnboundPipe { - fn drop(&mut self) { - do self.home_for_io |self_| { - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.pipe.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } -} - -pub struct UvPipeStream { - priv inner: UvUnboundPipe, -} - -impl UvPipeStream { - pub fn new(inner: UvUnboundPipe) -> UvPipeStream { - UvPipeStream { inner: inner } - } -} - -impl RtioPipe for UvPipeStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - do self.inner.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.pipe.as_stream(), scheduler, buf) - } - } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.inner.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.pipe.as_stream(), scheduler, buf) - } - } -} - -pub struct UvTcpStream { - priv watcher: TcpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTcpStream { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvTcpStream { - fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } -} - -impl RtioSocket for UvTcpStream { - fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.watcher) - } - } -} - -impl RtioTcpStream for UvTcpStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - do self.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.watcher.as_stream(), scheduler, buf) - } - } - - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.watcher.as_stream(), scheduler, buf) - } - } - - fn peer_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(TcpPeer, self_.watcher) - } - } - - fn control_congestion(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn nodelay(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, - delay_in_seconds as c_uint) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn letdie(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::tcp_keepalive(self_.watcher.native_handle(), 0 as c_int, 0 as c_uint) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } -} - -pub struct UvUdpSocket { - priv watcher: UdpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvUdpSocket { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvUdpSocket { - fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } -} - -impl RtioSocket for UvUdpSocket { - fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Udp, self_.watcher) - } - } -} - -impl RtioUdpSocket for UvUdpSocket { - fn recvfrom(&mut self, buf: &mut [u8]) -> Result<(uint, SocketAddr), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let uv_buf = slice_to_uv_buf(buf); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let alloc: AllocCallback = |_| uv_buf; - do self_.watcher.recv_start(alloc) |mut watcher, nread, _buf, addr, flags, status| { - let _ = flags; // /XXX add handling for partials? - - watcher.recv_stop(); - - let result = match status { - None => { - assert!(nread >= 0); - Ok((nread as uint, addr)) - } - Some(err) => Err(uv_error_to_io_error(err)), - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() - } - } - - fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do self_.watcher.send(buf, dst) |_watcher, status| { - - let result = match status { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)), - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() - } - } - - fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, - ptr::null(), uvll::UV_JOIN_GROUP) - } - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, - ptr::null(), uvll::UV_LEAVE_GROUP) - } - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn loop_multicast_locally(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 1 as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 0 as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_multicast_ttl(self_.watcher.native_handle(), ttl as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn hear_broadcasts(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_broadcast(self_.watcher.native_handle(), 1 as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } - - fn ignore_broadcasts(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - - let r = unsafe { - uvll::udp_set_broadcast(self_.watcher.native_handle(), 0 as c_int) - }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } - } - } -} - -pub struct UvTimer { - priv watcher: timer::TimerWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTimer { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvTimer { - fn new(w: timer::TimerWatcher, home: SchedHandle) -> UvTimer { - UvTimer { watcher: w, home: home } - } -} - -impl Drop for UvTimer { - fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - uvdebug!("closing UvTimer"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } -} - -impl RtioTimer for UvTimer { - fn sleep(&mut self, msecs: u64) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_sched, task| { - uvdebug!("sleep: entered scheduler context"); - let task_cell = Cell::new(task); - do self_.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - self_.watcher.stop(); - } - } - - fn oneshot(&mut self, msecs: u64) -> PortOne<()> { - use std::comm::oneshot; - - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - do self.home_for_io |self_| { - let chan = Cell::new(chan.take()); - do self_.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - assert!(!chan.is_empty()); - chan.take().send_deferred(()); - } - } - - return port; - } - - fn period(&mut self, msecs: u64) -> Port<()> { - use std::comm::stream; - - let (port, chan) = stream(); - let chan = Cell::new(chan); - do self.home_for_io |self_| { - let chan = Cell::new(chan.take()); - do self_.watcher.start(msecs, msecs) |_, status| { - assert!(status.is_none()); - do chan.with_ref |chan| { - chan.send_deferred(()); - } - } - } - - return port; - } -} - -pub struct UvFileStream { - priv loop_: Loop, - priv fd: c_int, - priv close: CloseBehavior, - priv home: SchedHandle, -} - -impl HomingIO for UvFileStream { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvFileStream { - fn new(loop_: Loop, fd: c_int, close: CloseBehavior, - home: SchedHandle) -> UvFileStream { - UvFileStream { - loop_: loop_, - fd: fd, - close: close, - home: home, - } - } - fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&mut [u8] = &buf; - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - let task_cell = Cell::new(task); - let read_req = file::FsRequest::new(); - do read_req.read(&self_.loop_, self_.fd, buf, offset) |req, uverr| { - let res = match uverr { - None => Ok(req.get_result() as int), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - result_cell.take() - } - fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.write(&self_.loop_, self_.fd, slice_to_uv_buf(buf), offset, cb) - } - } - fn seek_common(&mut self, pos: i64, whence: c_int) -> - Result{ - #[fixed_stack_segment]; #[inline(never)]; - unsafe { - match lseek(self.fd, pos as off_t, whence) { - -1 => { - Err(IoError { - kind: OtherIoError, - desc: "Failed to lseek.", - detail: None - }) - }, - n => Ok(n as u64) - } - } - } - fn nop_req(&mut self, f: &fn(&mut UvFileStream, file::FsRequest, FsCallback)) - -> Result<(), IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - do self.home_for_io_with_sched |self_, sched| { - do sched.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - let req = file::FsRequest::new(); - do f(self_, req) |_, uverr| { - let res = match uverr { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - } - result_cell.take() - } -} - -impl Drop for UvFileStream { - fn drop(&mut self) { - match self.close { - DontClose => {} - CloseAsynchronously => { - let close_req = file::FsRequest::new(); - do close_req.close(&self.loop_, self.fd) |_,_| {} - } - CloseSynchronously => { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let close_req = file::FsRequest::new(); - do close_req.close(&self_.loop_, self_.fd) |_,_| { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - } - } -} - -impl RtioFileStream for UvFileStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - self.base_read(buf, -1) - } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - self.base_write(buf, -1) - } - fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result { - self.base_read(buf, offset as i64) - } - fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> { - self.base_write(buf, offset as i64) - } - fn seek(&mut self, pos: i64, whence: SeekStyle) -> Result { - use std::libc::{SEEK_SET, SEEK_CUR, SEEK_END}; - let whence = match whence { - SeekSet => SEEK_SET, - SeekCur => SEEK_CUR, - SeekEnd => SEEK_END - }; - self.seek_common(pos, whence) - } - fn tell(&self) -> Result { - use std::libc::SEEK_CUR; - // this is temporary - let self_ = unsafe { cast::transmute::<&UvFileStream, &mut UvFileStream>(self) }; - self_.seek_common(0, SEEK_CUR) - } - fn fsync(&mut self) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.fsync(&self_.loop_, self_.fd, cb) - } - } - fn datasync(&mut self) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.datasync(&self_.loop_, self_.fd, cb) - } - } - fn truncate(&mut self, offset: i64) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.truncate(&self_.loop_, self_.fd, offset, cb) - } - } -} - -pub struct UvProcess { - priv process: process::Process, - - // Sadly, this structure must be created before we return it, so in that - // brief interim the `home` is None. - priv home: Option, - - // All None until the process exits (exit_error may stay None) - priv exit_status: Option, - priv term_signal: Option, - priv exit_error: Option, - - // Used to store which task to wake up from the exit_cb - priv descheduled: Option, -} - -impl HomingIO for UvProcess { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.home.get_mut_ref() } -} - -impl Drop for UvProcess { - fn drop(&mut self) { - let close = |self_: &mut UvProcess| { - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - do self_.process.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - }; - - // If home is none, then this process never actually successfully - // spawned, so there's no need to switch event loops - if self.home.is_none() { - close(self) - } else { - self.home_for_io(close) - } - } -} - -impl RtioProcess for UvProcess { - fn id(&self) -> pid_t { - self.process.pid() - } - - fn kill(&mut self, signal: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - match self_.process.kill(signal) { - Ok(()) => Ok(()), - Err(uverr) => Err(uv_error_to_io_error(uverr)) - } - } - } - - fn wait(&mut self) -> int { - // Make sure (on the home scheduler) that we have an exit status listed - do self.home_for_io |self_| { - match self_.exit_status { - Some(*) => {} - None => { - // If there's no exit code previously listed, then the - // process's exit callback has yet to be invoked. We just - // need to deschedule ourselves and wait to be reawoken. - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - assert!(self_.descheduled.is_none()); - self_.descheduled = Some(task); - } - assert!(self_.exit_status.is_some()); - } - } - } - - self.exit_status.unwrap() - } -} - -pub struct UvUnixListener { - priv inner: UvUnboundPipe -} - -impl HomingIO for UvUnixListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.inner.home() } -} - -impl UvUnixListener { - fn new(pipe: UvUnboundPipe) -> UvUnixListener { - UvUnixListener { inner: pipe } - } -} - -impl RtioUnixListener for UvUnixListener { - fn listen(~self) -> Result<~RtioUnixAcceptor, IoError> { - do self.home_for_io_consume |self_| { - let acceptor = ~UvUnixAcceptor::new(self_); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.inner.pipe.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(e) => Err(uv_error_to_io_error(e)), - None => { - let pipe = UvUnboundPipe::new(&server.event_loop()); - server.accept(pipe.pipe.as_stream()); - Ok(~UvPipeStream::new(pipe) as ~RtioPipe) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioUnixAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), - } - } - } -} - -pub struct UvTTY { - tty: tty::TTY, - home: SchedHandle, - fd: c_int, -} - -impl HomingIO for UvTTY { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvTTY { - fn drop(&mut self) { - // TTY handles are used for the logger in a task, so this destructor is - // run when a task is destroyed. When a task is being destroyed, a local - // scheduler isn't available, so we can't do the normal "take the - // scheduler and resume once close is done". Instead close operations on - // a TTY are asynchronous. - self.tty.close_async(); - } -} - -impl RtioTTY for UvTTY { - fn read(&mut self, buf: &mut [u8]) -> Result { - do self.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.tty.as_stream(), scheduler, buf) - } - } - - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.tty.as_stream(), scheduler, buf) - } - } - - fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { - do self.home_for_io |self_| { - match self_.tty.set_mode(raw) { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } - } - } - - fn get_winsize(&mut self) -> Result<(int, int), IoError> { - do self.home_for_io |self_| { - match self_.tty.get_winsize() { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } - } - } - - fn isatty(&self) -> bool { - unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as c_int } - } -} - -pub struct UvUnixAcceptor { - listener: UvUnixListener, - incoming: Tube>, -} - -impl HomingIO for UvUnixAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } -} - -impl UvUnixAcceptor { - fn new(listener: UvUnixListener) -> UvUnixAcceptor { - UvUnixAcceptor { listener: listener, incoming: Tube::new() } - } -} - -impl RtioUnixAcceptor for UvUnixAcceptor { - fn accept(&mut self) -> Result<~RtioPipe, IoError> { - do self.home_for_io |self_| { - self_.incoming.recv() - } - } - - fn accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.inner.pipe.as_stream(), 1) - } - } - - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.inner.pipe.as_stream(), 0) - } - } -} - -pub struct UvSignal { - watcher: signal::SignalWatcher, - home: SchedHandle, -} - -impl HomingIO for UvSignal { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvSignal { - fn new(w: signal::SignalWatcher, home: SchedHandle) -> UvSignal { - UvSignal { watcher: w, home: home } - } -} - -impl RtioSignal for UvSignal {} - -impl Drop for UvSignal { - fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - uvdebug!("closing UvSignal"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } -} - -// this function is full of lies -unsafe fn local_io() -> &'static mut IoFactory { - do Local::borrow |sched: &mut Scheduler| { - let mut io = None; - sched.event_loop.io(|i| io = Some(i)); - cast::transmute(io.unwrap()) - } -} - -#[test] -fn test_simple_io_no_connect() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let addr = next_test_ip4(); - let maybe_chan = io.tcp_connect(addr); - assert!(maybe_chan.is_err()); - } - } -} - -#[test] -fn test_simple_udp_io_bind_only() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let addr = next_test_ip4(); - let maybe_socket = io.udp_bind(addr); - assert!(maybe_socket.is_ok()); - } - } -} - -#[test] -fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::sched::{Shutdown, TaskFromFriend}; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - let work_queue1 = WorkQueue::new(); - let work_queue2 = WorkQueue::new(); - let queues = ~[work_queue1.clone(), work_queue2.clone()]; - - let loop1 = ~UvEventLoop::new() as ~EventLoop; - let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), - sleepers.clone()); - let loop2 = ~UvEventLoop::new() as ~EventLoop; - let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), - sleepers.clone()); - - let handle1 = Cell::new(sched1.make_handle()); - let handle2 = Cell::new(sched2.make_handle()); - let tasksFriendHandle = Cell::new(sched2.make_handle()); - - let on_exit: ~fn(UnwindResult) = |exit_status| { - handle1.take().send(Shutdown); - handle2.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let test_function: ~fn() = || { - let io = unsafe { local_io() }; - let addr = next_test_ip4(); - let maybe_socket = io.udp_bind(addr); - // this socket is bound to this event loop - assert!(maybe_socket.is_ok()); - - // block self on sched1 - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - // unblock task - do task.wake().map |task| { - // send self to sched2 - tasksFriendHandle.take().send(TaskFromFriend(task)); - }; - // sched1 should now sleep since it has nothing else to do - } - } - // sched2 will wake up and get the task - // as we do nothing else, the function ends and the socket goes out of scope - // sched2 will start to run the destructor - // the destructor will first block the task, set it's home as sched1, then enqueue it - // sched2 will dequeue the task, see that it has a home, and send it to sched1 - // sched1 will wake up, exec the close function on the correct loop, and then we're done - }; - - let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, test_function); - main_task.death.on_exit = Some(on_exit); - let main_task = Cell::new(main_task); - - let null_task = Cell::new(~do Task::new_root(&mut sched2.stack_pool, None) || {}); - - let sched1 = Cell::new(sched1); - let sched2 = Cell::new(sched2); - - let thread1 = do Thread::start { - sched1.take().bootstrap(main_task.take()); - }; - let thread2 = do Thread::start { - sched2.take().bootstrap(null_task.take()); - }; - - thread1.join(); - thread2.join(); - } -} - -#[test] -fn test_simple_homed_udp_io_bind_then_move_handle_then_home_and_close() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::comm::oneshot; - use std::rt::sched::Shutdown; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - let work_queue1 = WorkQueue::new(); - let work_queue2 = WorkQueue::new(); - let queues = ~[work_queue1.clone(), work_queue2.clone()]; - - let loop1 = ~UvEventLoop::new() as ~EventLoop; - let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), - sleepers.clone()); - let loop2 = ~UvEventLoop::new() as ~EventLoop; - let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), - sleepers.clone()); - - let handle1 = Cell::new(sched1.make_handle()); - let handle2 = Cell::new(sched2.make_handle()); - - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - let body1: ~fn() = || { - let io = unsafe { local_io() }; - let addr = next_test_ip4(); - let socket = io.udp_bind(addr); - assert!(socket.is_ok()); - chan.take().send(socket); - }; - - let body2: ~fn() = || { - let socket = port.take().recv(); - assert!(socket.is_ok()); - /* The socket goes out of scope and the destructor is called. - * The destructor: - * - sends itself back to sched1 - * - frees the socket - * - resets the home of the task to whatever it was previously - */ - }; - - let on_exit: ~fn(UnwindResult) = |exit| { - handle1.take().send(Shutdown); - handle2.take().send(Shutdown); - assert!(exit.is_success()); - }; - - let task1 = Cell::new(~Task::new_root(&mut sched1.stack_pool, None, body1)); - - let mut task2 = ~Task::new_root(&mut sched2.stack_pool, None, body2); - task2.death.on_exit = Some(on_exit); - let task2 = Cell::new(task2); - - let sched1 = Cell::new(sched1); - let sched2 = Cell::new(sched2); - - let thread1 = do Thread::start { - sched1.take().bootstrap(task1.take()); - }; - let thread2 = do Thread::start { - sched2.take().bootstrap(task2.take()); - }; - - thread1.join(); - thread2.join(); - } -} - -#[test] -fn test_simple_tcp_server_and_client() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - // Start the server first so it's listening when we connect - do spawntask { - unsafe { - let io = local_io(); - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - uvdebug!("{}", buf[i]); - assert_eq!(buf[i], i as u8); - } - } - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - } - } - } -} - -#[test] -fn test_simple_tcp_server_and_client_on_diff_threads() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::sched::{Shutdown}; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - - let server_addr = next_test_ip4(); - let client_addr = server_addr.clone(); - - let server_work_queue = WorkQueue::new(); - let client_work_queue = WorkQueue::new(); - let queues = ~[server_work_queue.clone(), client_work_queue.clone()]; - - let sloop = ~UvEventLoop::new() as ~EventLoop; - let mut server_sched = ~Scheduler::new(sloop, server_work_queue, - queues.clone(), sleepers.clone()); - let cloop = ~UvEventLoop::new() as ~EventLoop; - let mut client_sched = ~Scheduler::new(cloop, client_work_queue, - queues.clone(), sleepers.clone()); - - let server_handle = Cell::new(server_sched.make_handle()); - let client_handle = Cell::new(client_sched.make_handle()); - - let server_on_exit: ~fn(UnwindResult) = |exit_status| { - server_handle.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let client_on_exit: ~fn(UnwindResult) = |exit_status| { - client_handle.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let server_fn: ~fn() = || { - let io = unsafe { local_io() }; - let listener = io.tcp_bind(server_addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - assert_eq!(buf[i], i as u8); - } - }; - - let client_fn: ~fn() = || { - let io = unsafe { local_io() }; - let mut stream = io.tcp_connect(client_addr); - while stream.is_err() { - stream = io.tcp_connect(client_addr); - } - stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); - }; - - let mut server_task = ~Task::new_root(&mut server_sched.stack_pool, None, server_fn); - server_task.death.on_exit = Some(server_on_exit); - let server_task = Cell::new(server_task); - - let mut client_task = ~Task::new_root(&mut client_sched.stack_pool, None, client_fn); - client_task.death.on_exit = Some(client_on_exit); - let client_task = Cell::new(client_task); - - let server_sched = Cell::new(server_sched); - let client_sched = Cell::new(client_sched); - - let server_thread = do Thread::start { - server_sched.take().bootstrap(server_task.take()); - }; - let client_thread = do Thread::start { - client_sched.take().bootstrap(client_task.take()); - }; - - server_thread.join(); - client_thread.join(); - } -} - -#[test] -fn test_simple_udp_server_and_client() { - do run_in_mt_newsched_task { - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let mut server_socket = io.udp_bind(server_addr).unwrap(); - chan.take().send(()); - let mut buf = [0, .. 2048]; - let (nread,src) = server_socket.recvfrom(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - uvdebug!("{}", buf[i]); - assert_eq!(buf[i], i as u8); - } - assert_eq!(src, client_addr); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut client_socket = io.udp_bind(client_addr).unwrap(); - port.take().recv(); - client_socket.sendto([0, 1, 2, 3, 4, 5, 6, 7], server_addr); - } - } - } -} - -#[test] #[ignore(reason = "busted")] -fn test_read_and_block() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - let io = unsafe { local_io() }; - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for i in range(0u, nread) { - let val = buf[i] as uint; - assert_eq!(val, current % 8); - current += 1; - } - reads += 1; - - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - // Yield to the other task in hopes that it - // will trigger a read callback while we are - // not ready for it - do scheduler.deschedule_running_task_and_then |sched, task| { - let task = Cell::new(task); - sched.enqueue_blocked_task(task.take()); - } - } - } - - // Make sure we had multiple reads - assert!(reads > 1); - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - } - } - - } -} - -#[test] -fn test_read_read_read() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: uint = 500000; - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let buf = [1, .. 2048]; - let mut total_bytes_written = 0; - while total_bytes_written < MAX { - stream.write(buf); - total_bytes_written += buf.len(); - } - } - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - uvdebug!("read {} bytes", nread); - total_bytes_read += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } - } - uvdebug!("read {} bytes total", total_bytes_read); - } - } - } -} - -#[test] -#[ignore(cfg(windows))] // FIXME(#10102) the server never sees the second send -fn test_udp_twice() { - do run_in_mt_newsched_task { - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let mut client = io.udp_bind(client_addr).unwrap(); - port.take().recv(); - assert!(client.sendto([1], server_addr).is_ok()); - assert!(client.sendto([2], server_addr).is_ok()); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut server = io.udp_bind(server_addr).unwrap(); - chan.take().send(()); - let mut buf1 = [0]; - let mut buf2 = [0]; - let (nread1, src1) = server.recvfrom(buf1).unwrap(); - let (nread2, src2) = server.recvfrom(buf2).unwrap(); - assert_eq!(nread1, 1); - assert_eq!(nread2, 1); - assert_eq!(src1, client_addr); - assert_eq!(src2, client_addr); - assert_eq!(buf1[0], 1); - assert_eq!(buf2[0], 2); - } - } - } -} - -#[test] -fn test_udp_many_read() { - do run_in_mt_newsched_task { - let server_out_addr = next_test_ip4(); - let server_in_addr = next_test_ip4(); - let client_out_addr = next_test_ip4(); - let client_in_addr = next_test_ip4(); - static MAX: uint = 500_000; - - let (p1, c1) = oneshot(); - let (p2, c2) = oneshot(); - - let first = Cell::new((p1, c2)); - let second = Cell::new((p2, c1)); - - do spawntask { - unsafe { - let io = local_io(); - let mut server_out = io.udp_bind(server_out_addr).unwrap(); - let mut server_in = io.udp_bind(server_in_addr).unwrap(); - let (port, chan) = first.take(); - chan.send(()); - port.recv(); - let msg = [1, .. 2048]; - let mut total_bytes_sent = 0; - let mut buf = [1]; - while buf[0] == 1 { - // send more data - assert!(server_out.sendto(msg, client_in_addr).is_ok()); - total_bytes_sent += msg.len(); - // check if the client has received enough - let res = server_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(nread, 1); - assert_eq!(src, client_out_addr); - } - assert!(total_bytes_sent >= MAX); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut client_out = io.udp_bind(client_out_addr).unwrap(); - let mut client_in = io.udp_bind(client_in_addr).unwrap(); - let (port, chan) = second.take(); - port.recv(); - chan.send(()); - let mut total_bytes_recv = 0; - let mut buf = [0, .. 2048]; - while total_bytes_recv < MAX { - // ask for more - assert!(client_out.sendto([1], server_in_addr).is_ok()); - // wait for data - let res = client_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(src, server_out_addr); - total_bytes_recv += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } - } - // tell the server we're done - assert!(client_out.sendto([0], server_in_addr).is_ok()); - } - } - } -} - -#[test] -fn test_timer_sleep_simple() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let timer = io.timer_init(); - do timer.map |mut t| { t.sleep(1) }; - } - } -} - -fn file_test_uvio_full_simple_impl() { - use std::rt::io::{Open, ReadWrite, Read}; - unsafe { - let io = local_io(); - let write_val = "hello uvio!"; - let path = "./tmp/file_test_uvio_full.txt"; - { - let create_fm = Open; - let create_fa = ReadWrite; - let mut fd = io.fs_open(&path.to_c_str(), create_fm, create_fa).unwrap(); - let write_buf = write_val.as_bytes(); - fd.write(write_buf); - } - { - let ro_fm = Open; - let ro_fa = Read; - let mut fd = io.fs_open(&path.to_c_str(), ro_fm, ro_fa).unwrap(); - let mut read_vec = [0, .. 1028]; - let nread = fd.read(read_vec).unwrap(); - let read_val = str::from_utf8(read_vec.slice(0, nread as uint)); - assert!(read_val == write_val.to_owned()); - } - io.fs_unlink(&path.to_c_str()); - } -} - -#[test] -fn file_test_uvio_full_simple() { - do run_in_mt_newsched_task { - file_test_uvio_full_simple_impl(); - } -} - -fn uvio_naive_print(input: &str) { - unsafe { - use std::libc::{STDOUT_FILENO}; - let io = local_io(); - { - let mut fd = io.fs_from_raw_fd(STDOUT_FILENO, DontClose); - let write_buf = input.as_bytes(); - fd.write(write_buf); - } - } -} - -#[test] -fn file_test_uvio_write_to_stdout() { - do run_in_mt_newsched_task { - uvio_naive_print("jubilation\n"); - } -} diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 2d850383766f5..c76d03bfe6c33 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -29,11 +29,10 @@ #[allow(non_camel_case_types)]; // C types -use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; +use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t, c_double}; use std::libc::ssize_t; use std::libc::{malloc, free}; use std::libc; -use std::ptr; use std::vec; pub use self::errors::*; @@ -48,12 +47,14 @@ pub static UNKNOWN: c_int = -4094; pub mod errors { use std::libc::c_int; - pub static EACCES: c_int = -4093; - pub static ECONNREFUSED: c_int = -4079; - pub static ECONNRESET: c_int = -4078; - pub static ENOTCONN: c_int = -4054; - pub static EPIPE: c_int = -4048; - pub static ECONNABORTED: c_int = -4080; + pub static EACCES: c_int = -4092; + pub static ECONNREFUSED: c_int = -4078; + pub static ECONNRESET: c_int = -4077; + pub static ENOTCONN: c_int = -4053; + pub static EPIPE: c_int = -4047; + pub static ECONNABORTED: c_int = -4079; + pub static ECANCELED: c_int = -4081; + pub static EBADF: c_int = -4083; } #[cfg(not(windows))] pub mod errors { @@ -66,6 +67,8 @@ pub mod errors { pub static ENOTCONN: c_int = -libc::ENOTCONN; pub static EPIPE: c_int = -libc::EPIPE; pub static ECONNABORTED: c_int = -libc::ECONNABORTED; + pub static ECANCELED : c_int = -libc::ECANCELED; + pub static EBADF : c_int = -libc::EBADF; } pub static PROCESS_SETUID: c_int = 1 << 0; @@ -81,20 +84,32 @@ pub static STDIO_INHERIT_STREAM: c_int = 0x04; pub static STDIO_READABLE_PIPE: c_int = 0x10; pub static STDIO_WRITABLE_PIPE: c_int = 0x20; +#[cfg(unix)] +pub type uv_buf_len_t = libc::size_t; +#[cfg(windows)] +pub type uv_buf_len_t = libc::c_ulong; + // see libuv/include/uv-unix.h #[cfg(unix)] pub struct uv_buf_t { base: *u8, - len: libc::size_t, + len: uv_buf_len_t, } // see libuv/include/uv-win.h #[cfg(windows)] pub struct uv_buf_t { - len: u32, + len: uv_buf_len_t, base: *u8, } +#[repr(C)] +pub enum uv_run_mode { + RUN_DEFAULT = 0, + RUN_ONCE, + RUN_NOWAIT, +} + pub struct uv_process_options_t { exit_cb: uv_exit_cb, file: *libc::c_char, @@ -116,6 +131,7 @@ pub struct uv_stdio_container_t { } pub type uv_handle_t = c_void; +pub type uv_req_t = c_void; pub type uv_loop_t = c_void; pub type uv_idle_t = c_void; pub type uv_tcp_t = c_void; @@ -190,15 +206,16 @@ impl uv_stat_t { pub type uv_idle_cb = extern "C" fn(handle: *uv_idle_t, status: c_int); pub type uv_alloc_cb = extern "C" fn(stream: *uv_stream_t, - suggested_size: size_t) -> uv_buf_t; + suggested_size: size_t, + buf: *mut uv_buf_t); pub type uv_read_cb = extern "C" fn(stream: *uv_stream_t, nread: ssize_t, - buf: uv_buf_t); + buf: *uv_buf_t); pub type uv_udp_send_cb = extern "C" fn(req: *uv_udp_send_t, status: c_int); pub type uv_udp_recv_cb = extern "C" fn(handle: *uv_udp_t, nread: ssize_t, - buf: uv_buf_t, + buf: *uv_buf_t, addr: *sockaddr, flags: c_uint); pub type uv_close_cb = extern "C" fn(handle: *uv_handle_t); @@ -218,16 +235,13 @@ pub type uv_getaddrinfo_cb = extern "C" fn(req: *uv_getaddrinfo_t, status: c_int, res: *addrinfo); pub type uv_exit_cb = extern "C" fn(handle: *uv_process_t, - exit_status: c_int, + exit_status: i64, term_signal: c_int); pub type uv_signal_cb = extern "C" fn(handle: *uv_signal_t, signum: c_int); pub type uv_fs_cb = extern "C" fn(req: *uv_fs_t); pub type sockaddr = c_void; -pub type sockaddr_in = c_void; -pub type sockaddr_in6 = c_void; -pub type sockaddr_storage = c_void; #[cfg(unix)] pub type socklen_t = c_int; @@ -276,6 +290,7 @@ pub struct addrinfo { #[cfg(windows)] pub type uv_uid_t = libc::c_uchar; #[cfg(windows)] pub type uv_gid_t = libc::c_uchar; +#[repr(C)] #[deriving(Eq)] pub enum uv_handle_type { UV_UNKNOWN_HANDLE, @@ -299,6 +314,7 @@ pub enum uv_handle_type { UV_HANDLE_TYPE_MAX } +#[repr(C)] #[cfg(unix)] #[deriving(Eq)] pub enum uv_req_type { @@ -316,6 +332,7 @@ pub enum uv_req_type { // uv_req_type may have additional fields defined by UV_REQ_TYPE_PRIVATE. // See UV_REQ_TYPE_PRIVATE at libuv/include/uv-win.h +#[repr(C)] #[cfg(windows)] #[deriving(Eq)] pub enum uv_req_type { @@ -339,6 +356,7 @@ pub enum uv_req_type { UV_REQ_TYPE_MAX } +#[repr(C)] #[deriving(Eq)] pub enum uv_membership { UV_LEAVE_GROUP, @@ -349,7 +367,7 @@ pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { #[fixed_stack_segment]; #[inline(never)]; assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); - let size = rust_uv_handle_size(handle as uint); + let size = uv_handle_size(handle); let p = malloc(size); assert!(p.is_not_null()); return p; @@ -365,7 +383,7 @@ pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { #[fixed_stack_segment]; #[inline(never)]; assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); - let size = rust_uv_req_size(req as uint); + let size = uv_req_size(req); let p = malloc(size); assert!(p.is_not_null()); return p; @@ -400,452 +418,23 @@ pub unsafe fn loop_new() -> *c_void { return rust_uv_loop_new(); } -pub unsafe fn loop_delete(loop_handle: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_loop_delete(loop_handle); -} - -pub unsafe fn run(loop_handle: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_run(loop_handle); -} - -pub unsafe fn close(handle: *T, cb: uv_close_cb) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_close(handle as *c_void, cb); -} - -pub unsafe fn walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_walk(loop_handle, cb, arg); -} - -pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_init(loop_handle, handle) -} - -pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_start(handle, cb) -} - -pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_stop(handle) -} - -pub unsafe fn udp_init(loop_handle: *uv_loop_t, handle: *uv_udp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_init(loop_handle, handle); -} - -pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_bind(server, addr, flags); -} - -pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_bind6(server, addr, flags); -} - -pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); -} - -pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); -} - -pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, - on_recv: uv_udp_recv_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_recv_start(server, on_alloc, on_recv); -} - -pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_recv_stop(server); -} - pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t { #[fixed_stack_segment]; #[inline(never)]; return rust_uv_get_udp_handle_from_send_req(send_req); } -pub unsafe fn udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_getsockname(handle, name); -} - -pub unsafe fn udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, - interface_addr: *c_char, membership: uv_membership) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_membership(handle, multicast_addr, interface_addr, membership as c_int); -} - -pub unsafe fn udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_multicast_loop(handle, on); -} - -pub unsafe fn udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_multicast_ttl(handle, ttl); -} - -pub unsafe fn udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_ttl(handle, ttl); -} - -pub unsafe fn udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_broadcast(handle, on); -} - -pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_init(loop_handle, handle); -} - -pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in, after_connect_cb: uv_connect_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); -} - -pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in6, after_connect_cb: uv_connect_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); -} - -pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_getpeername(tcp_handle_ptr, name); -} - -pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_getsockname(handle, name); -} - -pub unsafe fn tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_nodelay(handle, enable); -} - -pub unsafe fn tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_keepalive(handle, enable, delay); -} - -pub unsafe fn tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_simultaneous_accepts(handle, enable); -} - -pub unsafe fn listen(stream: *T, backlog: c_int, - cb: uv_connection_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_listen(stream as *c_void, backlog, cb); -} - -pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_accept(server as *c_void, client as *c_void); -} - -pub unsafe fn write(req: *uv_write_t, - stream: *T, +pub unsafe fn uv_write(req: *uv_write_t, + stream: *uv_stream_t, buf_in: &[uv_buf_t], cb: uv_write_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; + externfn!(fn uv_write(req: *uv_write_t, stream: *uv_stream_t, + buf_in: *uv_buf_t, buf_cnt: c_int, + cb: uv_write_cb) -> c_int) let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; - return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); -} -pub unsafe fn read_start(stream: *uv_stream_t, - on_alloc: uv_alloc_cb, - on_read: uv_read_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_read_start(stream as *c_void, on_alloc, on_read); -} - -pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_read_stop(stream as *c_void); -} - -pub unsafe fn strerror(err: c_int) -> *c_char { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_strerror(err); -} -pub unsafe fn err_name(err: c_int) -> *c_char { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_err_name(err); -} - -pub unsafe fn async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: uv_async_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_async_init(loop_handle, async_handle, cb); -} - -pub unsafe fn async_send(async_handle: *uv_async_t) { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_async_send(async_handle); -} -pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t { - #[fixed_stack_segment]; #[inline(never)]; - - let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t }; - let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf); - rust_uv_buf_init(out_buf_ptr, input, len as size_t); - return out_buf; -} - -pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_init(loop_ptr, timer_ptr); -} -pub unsafe fn timer_start(timer_ptr: *uv_timer_t, - cb: uv_timer_cb, timeout: u64, - repeat: u64) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_start(timer_ptr, cb, timeout, repeat); -} -pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_stop(timer_ptr); -} - -pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool { - #[fixed_stack_segment]; #[inline(never)]; - - match rust_uv_is_ipv4_sockaddr(addr) { 0 => false, _ => true } -} - -pub unsafe fn is_ip6_addr(addr: *sockaddr) -> bool { - #[fixed_stack_segment]; #[inline(never)]; - - match rust_uv_is_ipv6_sockaddr(addr) { 0 => false, _ => true } -} - -pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { - #[fixed_stack_segment]; #[inline(never)]; - do ip.with_c_str |ip_buf| { - rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) - } -} -pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 { - #[fixed_stack_segment]; #[inline(never)]; - do ip.with_c_str |ip_buf| { - rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int) - } -} - -pub unsafe fn malloc_sockaddr_storage() -> *sockaddr_storage { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_malloc_sockaddr_storage() -} - -pub unsafe fn free_sockaddr_storage(ss: *sockaddr_storage) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_sockaddr_storage(ss); -} - -pub unsafe fn free_ip4_addr(addr: *sockaddr_in) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_ip4_addr(addr); -} - -pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_ip6_addr(addr); -} - -pub unsafe fn ip4_name(addr: *sockaddr_in, dst: *u8, size: size_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip4_name(addr, dst, size); -} - -pub unsafe fn ip6_name(addr: *sockaddr_in6, dst: *u8, size: size_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip6_name(addr, dst, size); -} - -pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip4_port(addr); -} - -pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip6_port(addr); -} - -pub unsafe fn fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, flags: int, mode: int, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_open(loop_ptr, req, path, flags as c_int, mode as c_int, cb) -} - -pub unsafe fn fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_unlink(loop_ptr, req, path, cb) -} -pub unsafe fn fs_write(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: uint, offset: i64, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_write(loop_ptr, req, fd, buf, len as c_uint, offset, cb) -} -pub unsafe fn fs_read(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: uint, offset: i64, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_read(loop_ptr, req, fd, buf, len as c_uint, offset, cb) -} -pub unsafe fn fs_close(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_close(loop_ptr, req, fd, cb) -} -pub unsafe fn fs_stat(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_stat(loop_ptr, req, path, cb) -} -pub unsafe fn fs_fstat(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_fstat(loop_ptr, req, fd, cb) -} -pub unsafe fn fs_mkdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_mkdir(loop_ptr, req, path, mode as c_int, cb) -} -pub unsafe fn fs_rmdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_rmdir(loop_ptr, req, path, cb) -} -pub unsafe fn fs_rename(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - to: *c_char, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_rename(loop_ptr, req, path, to, cb) -} -pub unsafe fn fs_chmod(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_chmod(loop_ptr, req, path, mode as c_int, cb) -} -pub unsafe fn fs_readdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - flags: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_readdir(loop_ptr, req, path, flags, cb) -} -pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_populate_uv_stat(req_in, stat_out) -} -pub unsafe fn fs_req_cleanup(req: *uv_fs_t) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_req_cleanup(req); -} - -pub unsafe fn spawn(loop_ptr: *c_void, result: *uv_process_t, - options: uv_process_options_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_spawn(loop_ptr, result, options); -} - -pub unsafe fn process_kill(p: *uv_process_t, signum: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_process_kill(p, signum); + return uv_write(req, stream, buf_ptr, buf_cnt, cb); } pub unsafe fn process_pid(p: *uv_process_t) -> c_int { @@ -871,11 +460,6 @@ pub unsafe fn set_stdio_container_stream(c: *uv_stdio_container_t, rust_set_stdio_container_stream(c, stream); } -pub unsafe fn pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_init(loop_ptr, p, ipc) -} - // data access helpers pub unsafe fn get_result_from_fs_req(req: *uv_fs_t) -> c_int { #[fixed_stack_segment]; #[inline(never)]; @@ -947,200 +531,56 @@ pub unsafe fn set_data_for_req(req: *T, data: *U) { rust_uv_set_data_for_req(req as *c_void, data as *c_void); } -pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_get_base_from_buf(buf); -} -pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t { +pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_get_len_from_buf(buf); -} -pub unsafe fn getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, - getaddrinfo_cb: uv_getaddrinfo_cb, - node: *c_char, service: *c_char, - hints: *addrinfo) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_getaddrinfo(loop_, req, getaddrinfo_cb, node, service, hints); -} -pub unsafe fn freeaddrinfo(ai: *addrinfo) { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_freeaddrinfo(ai); -} -pub unsafe fn pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_open(pipe, file) -} -pub unsafe fn pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_bind(pipe, name) -} -pub unsafe fn pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, - name: *c_char, cb: uv_connect_cb) { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_connect(req, handle, name, cb) -} -pub unsafe fn tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, - readable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_init(loop_ptr, tty, fd, readable) -} -pub unsafe fn tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_set_mode(tty, mode) -} -pub unsafe fn tty_get_winsize(tty: *uv_tty_t, width: *c_int, - height: *c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_get_winsize(tty, width, height) + rust_uv_populate_uv_stat(req_in, stat_out) } -// FIXME(#9613) this should return uv_handle_type, not a c_int -pub unsafe fn guess_handle(fd: c_int) -> c_int { +pub unsafe fn guess_handle(handle: c_int) -> c_int { #[fixed_stack_segment]; #[inline(never)]; - rust_uv_guess_handle(fd) -} -pub unsafe fn signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_init(loop_, handle); -} -pub unsafe fn signal_start(handle: *uv_signal_t, - signal_cb: uv_signal_cb, - signum: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_start(handle, signal_cb, signum); -} -pub unsafe fn signal_stop(handle: *uv_signal_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_stop(handle); + rust_uv_guess_handle(handle) } -pub struct uv_err_data { - err_name: ~str, - err_msg: ~str, -} // uv_support is the result of compiling rust_uv.cpp +// +// Note that this is in a cfg'd block so it doesn't get linked during testing. +// There's a bit of a conundrum when testing in that we're actually assuming +// that the tests are running in a uv loop, but they were created from the +// statically linked uv to the original rustuv crate. When we create the test +// executable, on some platforms if we re-link against uv, it actually creates +// second copies of everything. We obviously don't want this, so instead of +// dying horribly during testing, we allow all of the test rustuv's references +// to get resolved to the original rustuv crate. #[link_args = "-luv_support -luv"] +#[cfg(not(test))] +extern {} + extern { + fn rust_uv_loop_new() -> *c_void; + + // dealing with sockaddr things + pub fn rust_sockaddr_size() -> c_int; + pub fn rust_malloc_ip4_addr(s: *c_char, port: c_int) -> *sockaddr; + pub fn rust_malloc_ip6_addr(s: *c_char, port: c_int) -> *sockaddr; + pub fn rust_ip4_port(src: *sockaddr) -> c_uint; + pub fn rust_ip6_port(src: *sockaddr) -> c_uint; + pub fn rust_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; + pub fn rust_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; - fn rust_uv_handle_size(type_: uintptr_t) -> size_t; - fn rust_uv_req_size(type_: uintptr_t) -> size_t; fn rust_uv_handle_type_max() -> uintptr_t; fn rust_uv_req_type_max() -> uintptr_t; - - // libuv public API - fn rust_uv_loop_new() -> *c_void; - fn rust_uv_loop_delete(lp: *c_void); - fn rust_uv_run(loop_handle: *c_void); - fn rust_uv_close(handle: *c_void, cb: uv_close_cb); - fn rust_uv_walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void); - - fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int; - fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int; - fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; - - fn rust_uv_async_send(handle: *uv_async_t); - fn rust_uv_async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: uv_async_cb) -> c_int; - fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; - fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); - fn rust_uv_strerror(err: c_int) -> *c_char; - fn rust_uv_err_name(err: c_int) -> *c_char; - fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; - fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; - fn rust_uv_free_ip4_addr(addr: *sockaddr_in); - fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); - fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; - fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, - cb: uv_connect_cb, - addr: *sockaddr_in) -> c_int; - fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - fn rust_uv_tcp_connect6(req: *uv_connect_t, handle: *uv_tcp_t, - cb: uv_connect_cb, - addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int; - fn rust_uv_tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int; - fn rust_uv_tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int; - - fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; - fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; - fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; - fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int; - fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int; - fn rust_uv_udp_recv_start(server: *uv_udp_t, - on_alloc: uv_alloc_cb, - on_recv: uv_udp_recv_cb) -> c_int; - fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; - fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, - interface_addr: *c_char, membership: c_int) -> c_int; - fn rust_uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int; - fn rust_uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; - fn rust_uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; - fn rust_uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int; - - fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; - fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; - fn rust_uv_malloc_sockaddr_storage() -> *sockaddr_storage; - fn rust_uv_free_sockaddr_storage(ss: *sockaddr_storage); - - fn rust_uv_listen(stream: *c_void, backlog: c_int, - cb: uv_connection_cb) -> c_int; - fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; - fn rust_uv_write(req: *c_void, stream: *c_void, buf_in: *uv_buf_t, buf_cnt: c_int, - cb: uv_write_cb) -> c_int; - fn rust_uv_read_start(stream: *c_void, - on_alloc: uv_alloc_cb, - on_read: uv_read_cb) -> c_int; - fn rust_uv_read_stop(stream: *c_void) -> c_int; - fn rust_uv_timer_init(loop_handle: *c_void, timer_handle: *uv_timer_t) -> c_int; - fn rust_uv_timer_start(timer_handle: *uv_timer_t, cb: uv_timer_cb, timeout: libc::uint64_t, - repeat: libc::uint64_t) -> c_int; - fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; - fn rust_uv_fs_open(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - flags: c_int, mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_unlink(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int; - fn rust_uv_fs_write(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; - fn rust_uv_fs_read(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; - fn rust_uv_fs_close(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - cb: *u8) -> c_int; - fn rust_uv_fs_stat(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int; - fn rust_uv_fs_fstat(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_mkdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_rmdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int; - fn rust_uv_fs_rename(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - to: *c_char, cb: *u8) -> c_int; - fn rust_uv_fs_chmod(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_readdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - flags: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_req_cleanup(req: *uv_fs_t); + fn rust_uv_populate_uv_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t); fn rust_uv_get_result_from_fs_req(req: *uv_fs_t) -> c_int; fn rust_uv_get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void; fn rust_uv_get_path_from_fs_req(req: *uv_fs_t) -> *c_char; fn rust_uv_get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t; fn rust_uv_get_loop_from_getaddrinfo_req(req: *uv_fs_t) -> *uv_loop_t; - - fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t; - fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t; + fn rust_uv_get_stream_handle_from_connect_req(req: *uv_connect_t) -> *uv_stream_t; + fn rust_uv_get_stream_handle_from_write_req(req: *uv_write_t) -> *uv_stream_t; fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void; fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void; fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void); @@ -1148,72 +588,177 @@ extern { fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void); fn rust_uv_get_data_for_req(req: *c_void) -> *c_void; fn rust_uv_set_data_for_req(req: *c_void, data: *c_void); - fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8; - fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t; - fn rust_uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, - getaddrinfo_cb: uv_getaddrinfo_cb, - node: *c_char, service: *c_char, - hints: *addrinfo) -> c_int; - fn rust_uv_freeaddrinfo(ai: *addrinfo); - fn rust_uv_spawn(loop_ptr: *c_void, outptr: *uv_process_t, - options: uv_process_options_t) -> c_int; - fn rust_uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int; - fn rust_uv_process_pid(p: *uv_process_t) -> c_int; fn rust_set_stdio_container_flags(c: *uv_stdio_container_t, flags: c_int); fn rust_set_stdio_container_fd(c: *uv_stdio_container_t, fd: c_int); fn rust_set_stdio_container_stream(c: *uv_stdio_container_t, stream: *uv_stream_t); - fn rust_uv_pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int; - - fn rust_uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int; - fn rust_uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int; - fn rust_uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, - name: *c_char, cb: uv_connect_cb); - fn rust_uv_tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, - readable: c_int) -> c_int; - fn rust_uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int; - fn rust_uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, - height: *c_int) -> c_int; + fn rust_uv_process_pid(p: *uv_process_t) -> c_int; fn rust_uv_guess_handle(fd: c_int) -> c_int; +} - // XXX: see comments in addrinfo.rs - // These should all really be constants... - //#[rust_stack] pub fn rust_SOCK_STREAM() -> c_int; - //#[rust_stack] pub fn rust_SOCK_DGRAM() -> c_int; - //#[rust_stack] pub fn rust_SOCK_RAW() -> c_int; - //#[rust_stack] pub fn rust_IPPROTO_UDP() -> c_int; - //#[rust_stack] pub fn rust_IPPROTO_TCP() -> c_int; - //#[rust_stack] pub fn rust_AI_ADDRCONFIG() -> c_int; - //#[rust_stack] pub fn rust_AI_ALL() -> c_int; - //#[rust_stack] pub fn rust_AI_CANONNAME() -> c_int; - //#[rust_stack] pub fn rust_AI_NUMERICHOST() -> c_int; - //#[rust_stack] pub fn rust_AI_NUMERICSERV() -> c_int; - //#[rust_stack] pub fn rust_AI_PASSIVE() -> c_int; - //#[rust_stack] pub fn rust_AI_V4MAPPED() -> c_int; - - fn rust_uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int; - fn rust_uv_signal_start(handle: *uv_signal_t, - signal_cb: uv_signal_cb, - signum: c_int) -> c_int; - fn rust_uv_signal_stop(handle: *uv_signal_t) -> c_int; +// generic uv functions +externfn!(fn uv_loop_delete(l: *uv_loop_t)) +externfn!(fn uv_handle_size(ty: uv_handle_type) -> size_t) +externfn!(fn uv_req_size(ty: uv_req_type) -> size_t) +externfn!(fn uv_run(l: *uv_loop_t, mode: uv_run_mode) -> c_int) +externfn!(fn uv_close(h: *uv_handle_t, cb: uv_close_cb)) +externfn!(fn uv_walk(l: *uv_loop_t, cb: uv_walk_cb, arg: *c_void)) +externfn!(fn uv_buf_init(base: *c_char, len: c_uint) -> uv_buf_t) +externfn!(fn uv_strerror(err: c_int) -> *c_char) +externfn!(fn uv_err_name(err: c_int) -> *c_char) +externfn!(fn uv_listen(s: *uv_stream_t, backlog: c_int, + cb: uv_connection_cb) -> c_int) +externfn!(fn uv_accept(server: *uv_stream_t, client: *uv_stream_t) -> c_int) +externfn!(fn uv_read_start(stream: *uv_stream_t, + on_alloc: uv_alloc_cb, + on_read: uv_read_cb) -> c_int) +externfn!(fn uv_read_stop(stream: *uv_stream_t) -> c_int) + +// idle bindings +externfn!(fn uv_idle_init(l: *uv_loop_t, i: *uv_idle_t) -> c_int) +externfn!(fn uv_idle_start(i: *uv_idle_t, cb: uv_idle_cb) -> c_int) +externfn!(fn uv_idle_stop(i: *uv_idle_t) -> c_int) + +// async bindings +externfn!(fn uv_async_init(l: *uv_loop_t, a: *uv_async_t, + cb: uv_async_cb) -> c_int) +externfn!(fn uv_async_send(a: *uv_async_t)) + +// tcp bindings +externfn!(fn uv_tcp_init(l: *uv_loop_t, h: *uv_tcp_t) -> c_int) +externfn!(fn uv_tcp_connect(c: *uv_connect_t, h: *uv_tcp_t, + addr: *sockaddr, cb: uv_connect_cb) -> c_int) +externfn!(fn uv_tcp_bind(t: *uv_tcp_t, addr: *sockaddr) -> c_int) +externfn!(fn uv_ip4_name(src: *sockaddr, dst: *c_char, + size: size_t) -> c_int) +externfn!(fn uv_ip6_name(src: *sockaddr, dst: *c_char, + size: size_t) -> c_int) +externfn!(fn uv_tcp_nodelay(h: *uv_tcp_t, enable: c_int) -> c_int) +externfn!(fn uv_tcp_keepalive(h: *uv_tcp_t, enable: c_int, + delay: c_uint) -> c_int) +externfn!(fn uv_tcp_simultaneous_accepts(h: *uv_tcp_t, enable: c_int) -> c_int) +externfn!(fn uv_tcp_getsockname(h: *uv_tcp_t, name: *sockaddr, + len: *mut c_int) -> c_int) +externfn!(fn uv_tcp_getpeername(h: *uv_tcp_t, name: *sockaddr, + len: *mut c_int) -> c_int) +externfn!(fn uv_ip4_addr(ip: *c_char, port: c_int, addr: *sockaddr) -> c_int) +externfn!(fn uv_ip6_addr(ip: *c_char, port: c_int, addr: *sockaddr) -> c_int) + +// udp bindings +externfn!(fn uv_udp_init(l: *uv_loop_t, h: *uv_udp_t) -> c_int) +externfn!(fn uv_udp_bind(h: *uv_udp_t, addr: *sockaddr, flags: c_uint) -> c_int) +externfn!(fn uv_udp_recv_start(server: *uv_udp_t, + on_alloc: uv_alloc_cb, + on_recv: uv_udp_recv_cb) -> c_int) +externfn!(fn uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, + membership: uv_membership) -> c_int) +externfn!(fn uv_udp_recv_stop(server: *uv_udp_t) -> c_int) +externfn!(fn uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int) +externfn!(fn uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) +externfn!(fn uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) +externfn!(fn uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int) +externfn!(fn uv_udp_getsockname(h: *uv_udp_t, name: *sockaddr, + len: *mut c_int) -> c_int) + +pub unsafe fn uv_udp_send(req: *uv_udp_send_t, + handle: *uv_udp_t, + buf_in: &[uv_buf_t], + addr: *sockaddr, + cb: uv_udp_send_cb) -> c_int { + externfn!(fn uv_udp_send(req: *uv_write_t, stream: *uv_stream_t, + buf_in: *uv_buf_t, buf_cnt: c_int, addr: *sockaddr, + cb: uv_udp_send_cb) -> c_int) -} + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); +} + +// timer bindings +externfn!(fn uv_timer_init(l: *uv_loop_t, t: *uv_timer_t) -> c_int) +externfn!(fn uv_timer_start(t: *uv_timer_t, cb: uv_timer_cb, + timeout: libc::uint64_t, + repeat: libc::uint64_t) -> c_int) +externfn!(fn uv_timer_stop(handle: *uv_timer_t) -> c_int) + +// fs operations +externfn!(fn uv_fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + flags: c_int, mode: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_write(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: size_t, offset: i64, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_read(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: size_t, offset: i64, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_close(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_stat(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_fstat(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_mkdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + mode: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_rmdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_readdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + flags: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_req_cleanup(req: *uv_fs_t)) externfn!(fn uv_fs_fsync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_fdatasync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_ftruncate(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - offset: i64, cb: *u8) -> c_int) + offset: i64, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_readlink(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_symlink(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - dst: *c_char, flags: c_int, cb: *u8) -> c_int) + dst: *c_char, flags: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_rename(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, + dst: *c_char, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_utime(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char, + atime: c_double, mtime: c_double, + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_link(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - dst: *c_char, cb: *u8) -> c_int) + dst: *c_char, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_chown(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - uid: uv_uid_t, gid: uv_gid_t, cb: *u8) -> c_int) + uid: uv_uid_t, gid: uv_gid_t, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_chmod(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char, + mode: c_int, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_lstat(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) + +// getaddrinfo +externfn!(fn uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, + getaddrinfo_cb: uv_getaddrinfo_cb, + node: *c_char, service: *c_char, + hints: *addrinfo) -> c_int) +externfn!(fn uv_freeaddrinfo(ai: *addrinfo)) + +// process spawning +externfn!(fn uv_spawn(loop_ptr: *uv_loop_t, outptr: *uv_process_t, + options: *uv_process_options_t) -> c_int) +externfn!(fn uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int) + +// pipes +externfn!(fn uv_pipe_init(l: *uv_loop_t, p: *uv_pipe_t, ipc: c_int) -> c_int) +externfn!(fn uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int) +externfn!(fn uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int) +externfn!(fn uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, + name: *c_char, cb: uv_connect_cb)) + +// tty +externfn!(fn uv_tty_init(l: *uv_loop_t, tty: *uv_tty_t, fd: c_int, + readable: c_int) -> c_int) +externfn!(fn uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int) +externfn!(fn uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, + height: *c_int) -> c_int) + +// signals +externfn!(fn uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int) +externfn!(fn uv_signal_start(h: *uv_signal_t, cb: uv_signal_cb, + signum: c_int) -> c_int) +externfn!(fn uv_signal_stop(handle: *uv_signal_t) -> c_int) // libuv requires various system libraries to successfully link on some // platforms diff --git a/src/libstd/logging.rs b/src/libstd/logging.rs index 35a3ca3cff05d..1c464110ce051 100644 --- a/src/libstd/logging.rs +++ b/src/libstd/logging.rs @@ -107,14 +107,16 @@ pub fn log(_level: u32, args: &fmt::Arguments) { let optional_task: Option<*mut Task> = Local::try_unsafe_borrow(); match optional_task { Some(local) => { - // Use the available logger - (*local).logger.log(args); - } - None => { - // There is no logger anywhere, just write to stderr - let mut logger = StdErrLogger::new(); - logger.log(args); + match (*local).logger { + // Use the available logger if we have one + Some(ref mut logger) => return logger.log(args), + None => {} + } } + None => {} } + // There is no logger anywhere, just write to stderr + let mut logger = StdErrLogger::new(); + logger.log(args); } } diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs index 86d3f8a52bace..322c58bc2b807 100644 --- a/src/libstd/rt/basic.rs +++ b/src/libstd/rt/basic.rs @@ -15,7 +15,8 @@ use prelude::*; use cast; -use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback}; +use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback, + Callback}; use unstable::sync::Exclusive; use util; @@ -25,9 +26,9 @@ pub fn event_loop() -> ~EventLoop { } struct BasicLoop { - work: ~[~fn()], // pending work - idle: Option<*BasicPausible>, // only one is allowed - remotes: ~[(uint, ~fn())], + work: ~[proc()], // pending work + idle: Option<*mut BasicPausible>, // only one is allowed + remotes: ~[(uint, ~Callback)], next_remote: uint, messages: Exclusive<~[Message]> } @@ -86,8 +87,8 @@ impl BasicLoop { fn message(&mut self, message: Message) { match message { RunRemote(i) => { - match self.remotes.iter().find(|& &(id, _)| id == i) { - Some(&(_, ref f)) => (*f)(), + match self.remotes.mut_iter().find(|& &(id, _)| id == i) { + Some(&(_, ref mut f)) => f.call(), None => unreachable!() } } @@ -106,7 +107,7 @@ impl BasicLoop { match self.idle { Some(idle) => { if (*idle).active { - (*(*idle).work.get_ref())(); + (*idle).work.call(); } } None => {} @@ -144,22 +145,22 @@ impl EventLoop for BasicLoop { } } - fn callback(&mut self, f: ~fn()) { + fn callback(&mut self, f: proc()) { self.work.push(f); } // XXX: Seems like a really weird requirement to have an event loop provide. - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { - let callback = ~BasicPausible::new(self); + fn pausible_idle_callback(&mut self, cb: ~Callback) -> ~PausibleIdleCallback { + let callback = ~BasicPausible::new(self, cb); rtassert!(self.idle.is_none()); unsafe { - let cb_ptr: &*BasicPausible = cast::transmute(&callback); + let cb_ptr: &*mut BasicPausible = cast::transmute(&callback); self.idle = Some(*cb_ptr); } return callback as ~PausibleIdleCallback; } - fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback { + fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { let id = self.next_remote; self.next_remote += 1; self.remotes.push((id, f)); @@ -203,36 +204,27 @@ impl Drop for BasicRemote { struct BasicPausible { eloop: *mut BasicLoop, - work: Option<~fn()>, + work: ~Callback, active: bool, } impl BasicPausible { - fn new(eloop: &mut BasicLoop) -> BasicPausible { + fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausible { BasicPausible { active: false, - work: None, + work: cb, eloop: eloop, } } } impl PausibleIdleCallback for BasicPausible { - fn start(&mut self, f: ~fn()) { - rtassert!(!self.active && self.work.is_none()); - self.active = true; - self.work = Some(f); - } fn pause(&mut self) { self.active = false; } fn resume(&mut self) { self.active = true; } - fn close(&mut self) { - self.active = false; - self.work = None; - } } impl Drop for BasicPausible { diff --git a/src/libstd/rt/io/fs.rs b/src/libstd/rt/io/fs.rs index 22d7ea55f3b45..06c07308cf634 100644 --- a/src/libstd/rt/io/fs.rs +++ b/src/libstd/rt/io/fs.rs @@ -587,6 +587,22 @@ pub fn rmdir_recursive(path: &Path) { rmdir(path); } +/// Changes the timestamps for a file's last modification and access time. +/// The file at the path specified will have its last access time set to +/// `atime` and its modification time set to `mtime`. The times specified should +/// be in milliseconds. +/// +/// # Errors +/// +/// This function will raise on the `io_error` condition if an error +/// happens. +// FIXME(#10301) these arguments should not be u64 +pub fn change_file_times(path: &Path, atime: u64, mtime: u64) { + do io_raise |io| { + io.fs_utime(&path.to_c_str(), atime, mtime) + }; +} + impl Reader for File { fn read(&mut self, buf: &mut [u8]) -> Option { match self.fd.read(buf) { @@ -704,8 +720,8 @@ mod test { use rt::io; use str; use super::{File, rmdir, mkdir, readdir, rmdir_recursive, mkdir_recursive, - copy, unlink, stat, symlink, link, readlink, chmod, chown, - lstat}; + copy, unlink, stat, symlink, link, readlink, chmod, + lstat, change_file_times}; fn tmpdir() -> Path { use os; @@ -1244,4 +1260,29 @@ mod test { rmdir_recursive(&tmpdir); } + + #[test] + fn utime() { + let tmpdir = tmpdir(); + let path = tmpdir.join("a"); + File::create(&path); + + change_file_times(&path, 1000, 2000); + assert_eq!(path.stat().accessed, 1000); + assert_eq!(path.stat().modified, 2000); + + rmdir_recursive(&tmpdir); + } + + #[test] + fn utime_noexist() { + let tmpdir = tmpdir(); + + match io::result(|| change_file_times(&tmpdir.join("a"), 100, 200)) { + Ok(*) => fail!(), + Err(*) => {} + } + + rmdir_recursive(&tmpdir); + } } diff --git a/src/libstd/rt/io/mod.rs b/src/libstd/rt/io/mod.rs index f01ce5012eb25..ce9504a5b43d9 100644 --- a/src/libstd/rt/io/mod.rs +++ b/src/libstd/rt/io/mod.rs @@ -423,7 +423,11 @@ pub fn ignore_io_error(cb: &fn() -> T) -> T { /// closure if no error occurred. pub fn result(cb: &fn() -> T) -> Result { let mut err = None; - let ret = io_error::cond.trap(|e| err = Some(e)).inside(cb); + let ret = io_error::cond.trap(|e| { + if err.is_none() { + err = Some(e); + } + }).inside(cb); match err { Some(e) => Err(e), None => Ok(ret), @@ -1142,8 +1146,9 @@ pub struct FileStat { /// The file permissions currently on the file perm: FilePermission, - // XXX: These time fields are pretty useless without an actual time - // representation, what are the milliseconds relative to? + // FIXME(#10301): These time fields are pretty useless without an actual + // time representation, what are the milliseconds relative + // to? /// The time that the file was created at, in platform-dependent /// milliseconds diff --git a/src/libstd/rt/io/native/file.rs b/src/libstd/rt/io/native/file.rs index 35057f475cf5a..6d4f29182dda6 100644 --- a/src/libstd/rt/io/native/file.rs +++ b/src/libstd/rt/io/native/file.rs @@ -80,18 +80,20 @@ pub type fd_t = libc::c_int; pub struct FileDesc { priv fd: fd_t, + priv close_on_drop: bool, } impl FileDesc { /// Create a `FileDesc` from an open C file descriptor. /// /// The `FileDesc` will take ownership of the specified file descriptor and - /// close it upon destruction. + /// close it upon destruction if the `close_on_drop` flag is true, otherwise + /// it will not close the file descriptor when this `FileDesc` is dropped. /// /// Note that all I/O operations done on this object will be *blocking*, but /// they do not require the runtime to be active. - pub fn new(fd: fd_t) -> FileDesc { - FileDesc { fd: fd } + pub fn new(fd: fd_t, close_on_drop: bool) -> FileDesc { + FileDesc { fd: fd, close_on_drop: close_on_drop } } } @@ -137,7 +139,9 @@ impl Writer for FileDesc { impl Drop for FileDesc { #[fixed_stack_segment] #[inline(never)] fn drop(&mut self) { - unsafe { libc::close(self.fd); } + if self.close_on_drop { + unsafe { libc::close(self.fd); } + } } } @@ -245,8 +249,8 @@ mod tests { // opening or closing files. unsafe { let os::Pipe { input, out } = os::pipe(); - let mut reader = FileDesc::new(input); - let mut writer = FileDesc::new(out); + let mut reader = FileDesc::new(input, true); + let mut writer = FileDesc::new(out, true); writer.write(bytes!("test")); let mut buf = [0u8, ..4]; diff --git a/src/libstd/rt/io/native/process.rs b/src/libstd/rt/io/native/process.rs index 0fa454b94d066..f5c39de1bf44e 100644 --- a/src/libstd/rt/io/native/process.rs +++ b/src/libstd/rt/io/native/process.rs @@ -105,9 +105,9 @@ impl Process { Process { pid: res.pid, handle: res.handle, - input: in_pipe.map(|pipe| file::FileDesc::new(pipe.out)), - output: out_pipe.map(|pipe| file::FileDesc::new(pipe.input)), - error: err_pipe.map(|pipe| file::FileDesc::new(pipe.input)), + input: in_pipe.map(|pipe| file::FileDesc::new(pipe.out, true)), + output: out_pipe.map(|pipe| file::FileDesc::new(pipe.input, true)), + error: err_pipe.map(|pipe| file::FileDesc::new(pipe.input, true)), exit_code: None, } } diff --git a/src/libstd/rt/io/native/stdio.rs b/src/libstd/rt/io/native/stdio.rs index 5661725d77baa..ddfbb9a8f8c28 100644 --- a/src/libstd/rt/io/native/stdio.rs +++ b/src/libstd/rt/io/native/stdio.rs @@ -36,10 +36,8 @@ pub struct StdIn { impl StdIn { /// Duplicates the stdin file descriptor, returning an io::Reader - #[fixed_stack_segment] #[inline(never)] pub fn new() -> StdIn { - let fd = unsafe { libc::dup(libc::STDIN_FILENO) }; - StdIn { fd: file::FileDesc::new(fd) } + StdIn { fd: file::FileDesc::new(libc::STDIN_FILENO, false) } } } @@ -54,10 +52,8 @@ pub struct StdOut { impl StdOut { /// Duplicates the specified file descriptor, returning an io::Writer - #[fixed_stack_segment] #[inline(never)] pub fn new(fd: file::fd_t) -> StdOut { - let fd = unsafe { libc::dup(fd) }; - StdOut { fd: file::FileDesc::new(fd) } + StdOut { fd: file::FileDesc::new(fd, false) } } } diff --git a/src/libstd/rt/io/stdio.rs b/src/libstd/rt/io/stdio.rs index d33821a34b1ee..acc2e11f067e6 100644 --- a/src/libstd/rt/io/stdio.rs +++ b/src/libstd/rt/io/stdio.rs @@ -33,7 +33,8 @@ use result::{Ok, Err}; use rt::io::buffered::LineBufferedWriter; use rt::rtio::{IoFactory, RtioTTY, RtioFileStream, with_local_io, CloseAsynchronously}; -use super::{Reader, Writer, io_error, IoError, OtherIoError}; +use super::{Reader, Writer, io_error, IoError, OtherIoError, + standard_error, EndOfFile}; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -203,6 +204,15 @@ impl Reader for StdReader { File(ref mut file) => file.read(buf).map(|i| i as uint), }; match ret { + // When reading a piped stdin, libuv will return 0-length reads when + // stdin reaches EOF. For pretty much all other streams it will + // return an actual EOF error, but apparently for stdin it's a + // little different. Hence, here we convert a 0 length read to an + // end-of-file indicator so the caller knows to stop reading. + Ok(0) => { + io_error::cond.raise(standard_error(EndOfFile)); + None + } Ok(amt) => Some(amt as uint), Err(e) => { io_error::cond.raise(e); @@ -277,12 +287,10 @@ impl StdWriter { } } - /// Returns whether this tream is attached to a TTY instance or not. - /// - /// This is similar to libc's isatty() function + /// Returns whether this stream is attached to a TTY instance or not. pub fn isatty(&self) -> bool { match self.inner { - TTY(ref tty) => tty.isatty(), + TTY(*) => true, File(*) => false, } } diff --git a/src/libstd/rt/io/timer.rs b/src/libstd/rt/io/timer.rs index 36092dfbe34e6..b0cf7dee10abb 100644 --- a/src/libstd/rt/io/timer.rs +++ b/src/libstd/rt/io/timer.rs @@ -142,14 +142,10 @@ mod test { fn oneshot_twice() { do run_in_mt_newsched_task { let mut timer = Timer::new().unwrap(); - let port1 = timer.oneshot(100000000000); + let port1 = timer.oneshot(10000); let port = timer.oneshot(1); port.recv(); - let port1 = Cell::new(port1); - let ret = do task::try { - port1.take().recv(); - }; - assert!(ret.is_err()); + assert_eq!(port1.try_recv(), None); } } @@ -160,11 +156,7 @@ mod test { let port = timer.oneshot(100000000000); timer.sleep(1); // this should invalidate the port - let port = Cell::new(port); - let ret = do task::try { - port.take().recv(); - }; - assert!(ret.is_err()); + assert_eq!(port.try_recv(), None); } } diff --git a/src/libstd/rt/logging.rs b/src/libstd/rt/logging.rs index cb66d6f6199ae..c37195a7b1553 100644 --- a/src/libstd/rt/logging.rs +++ b/src/libstd/rt/logging.rs @@ -172,20 +172,18 @@ pub trait Logger { /// This logger emits output to the stderr of the process, and contains a lazily /// initialized event-loop driven handle to the stream. pub struct StdErrLogger { - priv handle: Option>, + priv handle: LineBufferedWriter, } impl StdErrLogger { - pub fn new() -> StdErrLogger { StdErrLogger { handle: None } } + pub fn new() -> StdErrLogger { + StdErrLogger { handle: LineBufferedWriter::new(io::stderr()) } + } } impl Logger for StdErrLogger { fn log(&mut self, args: &fmt::Arguments) { - // First time logging? Get a handle to the stderr of this process. - if self.handle.is_none() { - self.handle = Some(LineBufferedWriter::new(io::stderr())); - } - fmt::writeln(self.handle.get_mut_ref() as &mut io::Writer, args); + fmt::writeln(&mut self.handle as &mut io::Writer, args); } } diff --git a/src/libstd/rt/macros.rs b/src/libstd/rt/macros.rs index c6ff3427c15f6..3ef57710344dc 100644 --- a/src/libstd/rt/macros.rs +++ b/src/libstd/rt/macros.rs @@ -34,7 +34,7 @@ macro_rules! rtassert ( ( $arg:expr ) => ( { if ::rt::util::ENFORCE_SANITY { if !$arg { - rtabort!("assertion failed: {}", stringify!($arg)); + rtabort!(" assertion failed: {}", stringify!($arg)); } } } ) @@ -42,7 +42,7 @@ macro_rules! rtassert ( macro_rules! rtabort ( - ($($msg:tt)*) => ( { - ::rt::util::abort(format!($($msg)*)); + ($($arg:tt)*) => ( { + ::rt::util::abort(format!($($arg)*)); } ) ) diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index d24de7cbfee51..d623914cdadc9 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -24,11 +24,15 @@ use path::Path; use super::io::{SeekStyle}; use super::io::{FileMode, FileAccess, FileStat, FilePermission}; +pub trait Callback { + fn call(&mut self); +} + pub trait EventLoop { fn run(&mut self); - fn callback(&mut self, ~fn()); - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback; - fn remote_callback(&mut self, ~fn()) -> ~RemoteCallback; + fn callback(&mut self, proc()); + fn pausible_idle_callback(&mut self, ~Callback) -> ~PausibleIdleCallback; + fn remote_callback(&mut self, ~Callback) -> ~RemoteCallback; /// The asynchronous I/O services. Not all event loops may provide one // FIXME(#9382) this is an awful interface @@ -121,6 +125,8 @@ pub trait IoFactory { fn fs_readlink(&mut self, path: &CString) -> Result; fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError>; fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError>; + fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) -> + Result<(), IoError>; // misc fn timer_init(&mut self) -> Result<~RtioTimer, IoError>; @@ -209,8 +215,6 @@ pub trait RtioUnixListener { pub trait RtioUnixAcceptor { fn accept(&mut self) -> Result<~RtioPipe, IoError>; - fn accept_simultaneously(&mut self) -> Result<(), IoError>; - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError>; } pub trait RtioTTY { @@ -218,14 +222,11 @@ pub trait RtioTTY { fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; fn set_raw(&mut self, raw: bool) -> Result<(), IoError>; fn get_winsize(&mut self) -> Result<(int, int), IoError>; - fn isatty(&self) -> bool; } pub trait PausibleIdleCallback { - fn start(&mut self, f: ~fn()); fn pause(&mut self); fn resume(&mut self); - fn close(&mut self); } pub trait RtioSignal {} diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index e71cd92589c33..c2e665f490307 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -23,7 +23,7 @@ use super::message_queue::MessageQueue; use rt::kill::BlockedTask; use rt::local_ptr; use rt::local::Local; -use rt::rtio::{RemoteCallback, PausibleIdleCallback}; +use rt::rtio::{RemoteCallback, PausibleIdleCallback, Callback}; use borrow::{to_uint}; use cell::Cell; use rand::{XorShiftRng, Rng, Rand}; @@ -169,7 +169,8 @@ impl Scheduler { pub fn bootstrap(mut ~self, task: ~Task) { // Build an Idle callback. - self.idle_callback = Some(self.event_loop.pausible_idle_callback()); + let cb = ~SchedRunner as ~Callback; + self.idle_callback = Some(self.event_loop.pausible_idle_callback(cb)); // Initialize the TLS key. local_ptr::init_tls_key(); @@ -184,7 +185,7 @@ impl Scheduler { // Before starting our first task, make sure the idle callback // is active. As we do not start in the sleep state this is // important. - self.idle_callback.get_mut_ref().start(Scheduler::run_sched_once); + self.idle_callback.get_mut_ref().resume(); // Now, as far as all the scheduler state is concerned, we are // inside the "scheduler" context. So we can act like the @@ -202,7 +203,7 @@ impl Scheduler { // Close the idle callback. let mut sched: ~Scheduler = Local::take(); - sched.idle_callback.get_mut_ref().close(); + sched.idle_callback.take(); // Make one go through the loop to run the close callback. sched.run(); @@ -454,8 +455,7 @@ impl Scheduler { // * Task Routing Functions - Make sure tasks send up in the right // place. - fn process_task(mut ~self, mut task: ~Task, - schedule_fn: SchedulingFn) { + fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) { rtdebug!("processing a task"); let home = task.take_unwrap_home(); @@ -767,7 +767,7 @@ impl Scheduler { } pub fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(Scheduler::run_sched_once); + let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback); return SchedHandle { remote: remote, @@ -779,7 +779,7 @@ impl Scheduler { // Supporting types -type SchedulingFn = ~fn(~Scheduler, ~Task); +type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task); pub enum SchedMessage { Wake, @@ -802,6 +802,14 @@ impl SchedHandle { } } +struct SchedRunner; + +impl Callback for SchedRunner { + fn call(&mut self) { + Scheduler::run_sched_once(); + } +} + struct CleanupJob { task: ~Task, f: UnsafeTaskReceiver diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index cf7c291d189e4..7e374fc602138 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -50,7 +50,7 @@ pub struct Task { heap: LocalHeap, priv gc: GarbageCollector, storage: LocalStorage, - logger: StdErrLogger, + logger: Option, unwinder: Unwinder, taskgroup: Option, death: Death, @@ -180,7 +180,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, death: Death::new(), @@ -215,7 +215,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, death: Death::new(), @@ -238,7 +238,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, // FIXME(#7544) make watching optional @@ -320,6 +320,7 @@ impl Task { } None => {} } + self.logger.take(); } } diff --git a/src/libstd/run.rs b/src/libstd/run.rs index 74f4ed3d55e4b..fe23944397d87 100644 --- a/src/libstd/run.rs +++ b/src/libstd/run.rs @@ -436,13 +436,13 @@ mod tests { } fn writeclose(fd: c_int, s: &str) { - let mut writer = file::FileDesc::new(fd); + let mut writer = file::FileDesc::new(fd, true); writer.write(s.as_bytes()); } fn readclose(fd: c_int) -> ~str { let mut res = ~[]; - let mut reader = file::FileDesc::new(fd); + let mut reader = file::FileDesc::new(fd, true); let mut buf = [0, ..1024]; loop { match reader.read(buf) { diff --git a/src/libuv b/src/libuv index d88cf5652a1af..7ac7e0248b347 160000 --- a/src/libuv +++ b/src/libuv @@ -1 +1 @@ -Subproject commit d88cf5652a1afb23939da0bae86c70ec521b9921 +Subproject commit 7ac7e0248b34732e9963cdb8e31f7e612d23d14b diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index a4361f14f69b2..f3be486a25ab2 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -31,244 +31,16 @@ rust_uv_loop_new() { return (void*)uv_loop_new(); } -extern "C" void -rust_uv_loop_delete(uv_loop_t* loop) { - // FIXME: This is a workaround for #1815. libev uses realloc(0) to - // free the loop, which valgrind doesn't like. We have suppressions - // to make valgrind ignore them. - // - // Valgrind also has a sanity check when collecting allocation backtraces - // that the stack pointer must be at least 512 bytes into the stack (at - // least 512 bytes of frames must have come before). When this is not - // the case it doesn't collect the backtrace. - // - // Unfortunately, with our spaghetti stacks that valgrind check triggers - // sometimes and we don't get the backtrace for the realloc(0), it - // fails to be suppressed, and it gets reported as 0 bytes lost - // from a malloc with no backtrace. - // - // This pads our stack with some extra space before deleting the loop - alloca(512); - uv_loop_delete(loop); -} - extern "C" void rust_uv_loop_set_data(uv_loop_t* loop, void* data) { loop->data = data; } -extern "C" void -rust_uv_run(uv_loop_t* loop) { - uv_run(loop, UV_RUN_DEFAULT); -} - -extern "C" void -rust_uv_close(uv_handle_t* handle, uv_close_cb cb) { - uv_close(handle, cb); -} - -extern "C" void -rust_uv_walk(uv_loop_t* loop, uv_walk_cb cb, void* arg) { - uv_walk(loop, cb, arg); -} - -extern "C" void -rust_uv_async_send(uv_async_t* handle) { - uv_async_send(handle); -} - -extern "C" int -rust_uv_async_init(uv_loop_t* loop_handle, - uv_async_t* async_handle, - uv_async_cb cb) { - return uv_async_init(loop_handle, async_handle, cb); -} - -extern "C" int -rust_uv_timer_init(uv_loop_t* loop, uv_timer_t* timer) { - return uv_timer_init(loop, timer); -} - -extern "C" int -rust_uv_timer_start(uv_timer_t* the_timer, uv_timer_cb cb, - int64_t timeout, int64_t repeat) { - return uv_timer_start(the_timer, cb, timeout, repeat); -} - -extern "C" int -rust_uv_timer_stop(uv_timer_t* the_timer) { - return uv_timer_stop(the_timer); -} - -extern "C" int -rust_uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) { - return uv_tcp_init(loop, handle); -} - -extern "C" int -rust_uv_tcp_connect(uv_connect_t* connect_ptr, - uv_tcp_t* tcp_ptr, - uv_connect_cb cb, - sockaddr_in* addr_ptr) { - // FIXME ref #2064 - sockaddr_in addr = *addr_ptr; - int result = uv_tcp_connect(connect_ptr, tcp_ptr, addr, cb); - return result; -} - -extern "C" int -rust_uv_tcp_bind(uv_tcp_t* tcp_server, sockaddr_in* addr_ptr) { - // FIXME ref #2064 - sockaddr_in addr = *addr_ptr; - return uv_tcp_bind(tcp_server, addr); -} -extern "C" int -rust_uv_tcp_connect6(uv_connect_t* connect_ptr, - uv_tcp_t* tcp_ptr, - uv_connect_cb cb, - sockaddr_in6* addr_ptr) { - // FIXME ref #2064 - sockaddr_in6 addr = *addr_ptr; - int result = uv_tcp_connect6(connect_ptr, tcp_ptr, addr, cb); - return result; -} - -extern "C" int -rust_uv_tcp_bind6 -(uv_tcp_t* tcp_server, sockaddr_in6* addr_ptr) { - // FIXME ref #2064 - sockaddr_in6 addr = *addr_ptr; - return uv_tcp_bind6(tcp_server, addr); -} - -extern "C" int -rust_uv_tcp_getpeername -(uv_tcp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_in); - return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen); -} - -extern "C" int -rust_uv_tcp_getsockname -(uv_tcp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_storage); - return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen); -} - -extern "C" int -rust_uv_tcp_nodelay -(uv_tcp_t* handle, int enable) { - return uv_tcp_nodelay(handle, enable); -} - -extern "C" int -rust_uv_tcp_keepalive -(uv_tcp_t* handle, int enable, unsigned int delay) { - return uv_tcp_keepalive(handle, enable, delay); -} - -extern "C" int -rust_uv_tcp_simultaneous_accepts -(uv_tcp_t* handle, int enable) { - return uv_tcp_simultaneous_accepts(handle, enable); -} - -extern "C" int -rust_uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { - return uv_udp_init(loop, handle); -} - -extern "C" int -rust_uv_udp_bind(uv_udp_t* server, sockaddr_in* addr_ptr, unsigned flags) { - return uv_udp_bind(server, *addr_ptr, flags); -} - -extern "C" int -rust_uv_udp_bind6(uv_udp_t* server, sockaddr_in6* addr_ptr, unsigned flags) { - return uv_udp_bind6(server, *addr_ptr, flags); -} - -extern "C" int -rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, - int buf_cnt, sockaddr_in* addr_ptr, uv_udp_send_cb cb) { - return uv_udp_send(req, handle, buf_in, buf_cnt, *addr_ptr, cb); -} - -extern "C" int -rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, - int buf_cnt, sockaddr_in6* addr_ptr, uv_udp_send_cb cb) { - return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb); -} - -extern "C" int -rust_uv_udp_recv_start(uv_udp_t* server, uv_alloc_cb on_alloc, uv_udp_recv_cb on_read) { - return uv_udp_recv_start(server, on_alloc, on_read); -} - -extern "C" int -rust_uv_udp_recv_stop(uv_udp_t* server) { - return uv_udp_recv_stop(server); -} - extern "C" uv_udp_t* rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) { return send_req->handle; } -extern "C" int -rust_uv_udp_getsockname -(uv_udp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_storage); - return uv_udp_getsockname(handle, (sockaddr*)name, &namelen); -} - -extern "C" int -rust_uv_udp_set_membership -(uv_udp_t* handle, const char* m_addr, const char* i_addr, uv_membership membership) { - return uv_udp_set_membership(handle, m_addr, i_addr, membership); -} - -extern "C" int -rust_uv_udp_set_multicast_loop -(uv_udp_t* handle, int on) { - return uv_udp_set_multicast_loop(handle, on); -} - -extern "C" int -rust_uv_udp_set_multicast_ttl -(uv_udp_t* handle, int ttl) { - return uv_udp_set_multicast_ttl(handle, ttl); -} - -extern "C" int -rust_uv_udp_set_ttl -(uv_udp_t* handle, int ttl) { - return uv_udp_set_ttl(handle, ttl); -} - -extern "C" int -rust_uv_udp_set_broadcast -(uv_udp_t* handle, int on) { - return uv_udp_set_broadcast(handle, on); -} - -extern "C" int -rust_uv_listen(uv_stream_t* stream, int backlog, - uv_connection_cb cb) { - return uv_listen(stream, backlog, cb); -} - -extern "C" int -rust_uv_accept(uv_stream_t* server, uv_stream_t* client) { - return uv_accept(server, client); -} - extern "C" uv_stream_t* rust_uv_get_stream_handle_from_connect_req(uv_connect_t* connect) { return connect->handle; @@ -278,11 +50,6 @@ rust_uv_get_stream_handle_from_write_req(uv_write_t* write_req) { return write_req->handle; } -extern "C" void -rust_uv_buf_init(uv_buf_t* out_buf, char* base, size_t len) { - *out_buf = uv_buf_init(base, len); -} - extern "C" uv_loop_t* rust_uv_get_loop_for_uv_handle(uv_handle_t* handle) { return handle->loop; @@ -319,178 +86,50 @@ rust_uv_set_data_for_req(uv_req_t* req, void* data) { req->data = data; } -extern "C" char* -rust_uv_get_base_from_buf(uv_buf_t buf) { - return buf.base; -} - -extern "C" size_t -rust_uv_get_len_from_buf(uv_buf_t buf) { - return buf.len; -} - -extern "C" const char* -rust_uv_strerror(int err) { - return uv_strerror(err); -} - -extern "C" const char* -rust_uv_err_name(int err) { - return uv_err_name(err); -} - -extern "C" int -rust_uv_write(uv_write_t* req, uv_stream_t* handle, - uv_buf_t* bufs, int buf_cnt, - uv_write_cb cb) { - return uv_write(req, handle, bufs, buf_cnt, cb); -} -extern "C" int -rust_uv_read_start(uv_stream_t* stream, uv_alloc_cb on_alloc, - uv_read_cb on_read) { - return uv_read_start(stream, on_alloc, on_read); -} - extern "C" int -rust_uv_read_stop(uv_stream_t* stream) { - return uv_read_stop(stream); -} - -extern "C" struct sockaddr_in -rust_uv_ip4_addr(const char* ip, int port) { - struct sockaddr_in addr = uv_ip4_addr(ip, port); - return addr; -} -extern "C" struct sockaddr_in6 -rust_uv_ip6_addr(const char* ip, int port) { - return uv_ip6_addr(ip, port); -} - -extern "C" struct sockaddr_in* -rust_uv_ip4_addrp(const char* ip, int port) { - struct sockaddr_in addr = uv_ip4_addr(ip, port); - struct sockaddr_in *addrp = (sockaddr_in*)malloc(sizeof(struct sockaddr_in)); - assert(addrp); - memcpy(addrp, &addr, sizeof(struct sockaddr_in)); - return addrp; -} -extern "C" struct sockaddr_in6* -rust_uv_ip6_addrp(const char* ip, int port) { - struct sockaddr_in6 addr = uv_ip6_addr(ip, port); - struct sockaddr_in6 *addrp = (sockaddr_in6*)malloc(sizeof(struct sockaddr_in6)); - assert(addrp); - memcpy(addrp, &addr, sizeof(struct sockaddr_in6)); - return addrp; -} - -extern "C" struct sockaddr_storage * -rust_uv_malloc_sockaddr_storage() { - struct sockaddr_storage *ss = (sockaddr_storage *)malloc(sizeof(struct sockaddr_storage)); - return ss; +rust_sockaddr_size() { + return sizeof(struct sockaddr_storage); } -extern "C" void -rust_uv_free_sockaddr_storage(struct sockaddr_storage *ss) { - free(ss); +extern "C" struct sockaddr* +rust_malloc_ip4_addr(char *name, int port) { + struct sockaddr_in *addr = (struct sockaddr_in*) calloc(1, rust_sockaddr_size()); + assert(addr != NULL); + addr->sin_port = htons(port); + assert(uv_inet_pton(AF_INET, name, &addr->sin_addr) == 0); + addr->sin_family = AF_INET; + return (struct sockaddr*) addr; } -extern "C" void -rust_uv_free_ip4_addr(sockaddr_in *addrp) { - free(addrp); +extern "C" struct sockaddr* +rust_malloc_ip6_addr(char *name, int port) { + struct sockaddr_in6 *addr = (struct sockaddr_in6*) calloc(1, rust_sockaddr_size()); + assert(addr != NULL); + addr->sin6_port = htons(port); + assert(uv_inet_pton(AF_INET6, name, &addr->sin6_addr) == 0); + addr->sin6_family = AF_INET6; + return (struct sockaddr*) addr; } -extern "C" void -rust_uv_free_ip6_addr(sockaddr_in6 *addrp) { - free(addrp); -} - -extern "C" int -rust_uv_ip4_name(struct sockaddr_in* src, char* dst, size_t size) { - return uv_ip4_name(src, dst, size); -} -extern "C" int -rust_uv_ip6_name(struct sockaddr_in6* src, char* dst, size_t size) { - int result = uv_ip6_name(src, dst, size); - return result; -} extern "C" unsigned int -rust_uv_ip4_port(struct sockaddr_in* src) { +rust_ip4_port(struct sockaddr_in* src) { return ntohs(src->sin_port); } extern "C" unsigned int -rust_uv_ip6_port(struct sockaddr_in6* src) { +rust_ip6_port(struct sockaddr_in6* src) { return ntohs(src->sin6_port); } -extern "C" int -rust_uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* handle, - uv_getaddrinfo_cb cb, - char* node, char* service, - addrinfo* hints) { - return uv_getaddrinfo(loop, handle, cb, node, service, hints); -} -extern "C" void -rust_uv_freeaddrinfo(addrinfo* res) { - uv_freeaddrinfo(res); -} - extern "C" int -rust_uv_is_ipv4_sockaddr(sockaddr* addr) { +rust_is_ipv4_sockaddr(sockaddr* addr) { return addr->sa_family == AF_INET; } extern "C" int -rust_uv_is_ipv6_sockaddr(sockaddr* addr) { +rust_is_ipv6_sockaddr(sockaddr* addr) { return addr->sa_family == AF_INET6; } -extern "C" bool -rust_uv_is_ipv4_addrinfo(addrinfo* input) { - return input->ai_family == AF_INET; -} - -extern "C" bool -rust_uv_is_ipv6_addrinfo(addrinfo* input) { - return input->ai_family == AF_INET6; -} -extern "C" addrinfo* -rust_uv_get_next_addrinfo(addrinfo* input) { - return input->ai_next; -} -extern "C" sockaddr_in* -rust_uv_addrinfo_as_sockaddr_in(addrinfo* input) { - return (sockaddr_in*)input->ai_addr; -} -extern "C" sockaddr_in6* -rust_uv_addrinfo_as_sockaddr_in6(addrinfo* input) { - return (sockaddr_in6*)input->ai_addr; -} - -extern "C" int -rust_uv_idle_init(uv_loop_t* loop, uv_idle_t* idle) { - return uv_idle_init(loop, idle); -} - -extern "C" int -rust_uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) { - return uv_idle_start(idle, cb); -} - -extern "C" int -rust_uv_idle_stop(uv_idle_t* idle) { - return uv_idle_stop(idle); -} - -extern "C" size_t -rust_uv_handle_size(uintptr_t type) { - return uv_handle_size((uv_handle_type)type); -} - -extern "C" size_t -rust_uv_req_size(uintptr_t type) { - return uv_req_size((uv_req_type)type); -} - extern "C" uintptr_t rust_uv_handle_type_max() { return UV_HANDLE_TYPE_MAX; @@ -501,33 +140,6 @@ rust_uv_req_type_max() { return UV_REQ_TYPE_MAX; } -extern "C" int -rust_uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, - int mode, uv_fs_cb cb) { - return uv_fs_open(loop, req, path, flags, mode, cb); -} -extern "C" int -rust_uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_unlink(loop, req, path, cb); -} -extern "C" int -rust_uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file fd, void* buf, - size_t len, int64_t offset, uv_fs_cb cb) { - return uv_fs_write(loop, req, fd, buf, len, offset, cb); -} -extern "C" int -rust_uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file fd, void* buf, - size_t len, int64_t offset, uv_fs_cb cb) { - return uv_fs_read(loop, req, fd, buf, len, offset, cb); -} -extern "C" int -rust_uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { - return uv_fs_close(loop, req, fd, cb); -} -extern "C" void -rust_uv_fs_req_cleanup(uv_fs_t* req) { - uv_fs_req_cleanup(req); -} extern "C" int rust_uv_get_result_from_fs_req(uv_fs_t* req) { return req->result; @@ -550,15 +162,6 @@ rust_uv_get_loop_from_getaddrinfo_req(uv_getaddrinfo_t* req) { return req->loop; } -extern "C" int -rust_uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_stat(loop, req, path, cb); -} -extern "C" int -rust_uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { - return uv_fs_fstat(loop, req, file, cb); -} - extern "C" void rust_uv_populate_uv_stat(uv_fs_t* req_in, uv_stat_t* stat_out) { stat_out->st_dev = req_in->statbuf.st_dev; @@ -583,39 +186,6 @@ rust_uv_populate_uv_stat(uv_fs_t* req_in, uv_stat_t* stat_out) { stat_out->st_birthtim.tv_nsec = req_in->statbuf.st_birthtim.tv_nsec; } -extern "C" int -rust_uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { - return uv_fs_mkdir(loop, req, path, mode, cb); -} -extern "C" int -rust_uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_rmdir(loop, req, path, cb); -} - -extern "C" int -rust_uv_fs_readdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb) { - return uv_fs_readdir(loop, req, path, flags, cb); -} -extern "C" int -rust_uv_fs_rename(uv_loop_t *loop, uv_fs_t* req, const char *path, - const char *to, uv_fs_cb cb) { - return uv_fs_rename(loop, req, path, to, cb); -} -extern "C" int -rust_uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { - return uv_fs_chmod(loop, req, path, mode, cb); -} - -extern "C" int -rust_uv_spawn(uv_loop_t *loop, uv_process_t *p, uv_process_options_t options) { - return uv_spawn(loop, p, options); -} - -extern "C" int -rust_uv_process_kill(uv_process_t *p, int signum) { - return uv_process_kill(p, signum); -} - extern "C" void rust_set_stdio_container_flags(uv_stdio_container_t *c, int flags) { c->flags = (uv_stdio_flags) flags; @@ -636,58 +206,7 @@ rust_uv_process_pid(uv_process_t* p) { return p->pid; } -extern "C" int -rust_uv_pipe_init(uv_loop_t *loop, uv_pipe_t* p, int ipc) { - return uv_pipe_init(loop, p, ipc); -} - -extern "C" int -rust_uv_pipe_open(uv_pipe_t *pipe, int file) { - return uv_pipe_open(pipe, file); -} - -extern "C" int -rust_uv_pipe_bind(uv_pipe_t *pipe, char *name) { - return uv_pipe_bind(pipe, name); -} - -extern "C" void -rust_uv_pipe_connect(uv_connect_t *req, uv_pipe_t *handle, - char *name, uv_connect_cb cb) { - uv_pipe_connect(req, handle, name, cb); -} - -extern "C" int -rust_uv_tty_init(uv_loop_t *loop, uv_tty_t *tty, int fd, int readable) { - return uv_tty_init(loop, tty, fd, readable); -} - -extern "C" int -rust_uv_tty_set_mode(uv_tty_t *tty, int mode) { - return uv_tty_set_mode(tty, mode); -} - -extern "C" int -rust_uv_tty_get_winsize(uv_tty_t *tty, int *width, int *height) { - return uv_tty_get_winsize(tty, width, height); -} - extern "C" int rust_uv_guess_handle(int fd) { - return uv_guess_handle(fd); -} - -extern "C" int -rust_uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { - return uv_signal_init(loop, handle); -} - -extern "C" int -rust_uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { - return uv_signal_start(handle, signal_cb, signum); -} - -extern "C" int -rust_uv_signal_stop(uv_signal_t* handle) { - return uv_signal_stop(handle); + return uv_guess_handle(fd); } diff --git a/src/test/run-pass/closure-reform.rs b/src/test/run-pass/closure-reform.rs index 18ca64d0f2762..629a807266182 100644 --- a/src/test/run-pass/closure-reform.rs +++ b/src/test/run-pass/closure-reform.rs @@ -67,7 +67,8 @@ pub fn main() { call_that(|x, y| *x + *y - z); call_cramped(|| 1, || unsafe { - cast::transmute(&100) + static a: uint = 100; + cast::transmute(&a) }); // External functions diff --git a/src/test/run-pass/rtio-processes.rs b/src/test/run-pass/rtio-processes.rs index 14595f83ce506..f45889eeb03b6 100644 --- a/src/test/run-pass/rtio-processes.rs +++ b/src/test/run-pass/rtio-processes.rs @@ -23,8 +23,8 @@ // // See #9341 +use std::rt::io; use std::rt::io::process::{Process, ProcessConfig, CreatePipe, Ignored}; -use std::rt::io::{Reader, Writer}; use std::str; #[test] @@ -55,10 +55,10 @@ fn smoke_failure() { cwd: None, io: io, }; - let p = Process::new(args); - assert!(p.is_some()); - let mut p = p.unwrap(); - assert!(p.wait() != 0); + match io::result(|| Process::new(args)) { + Ok(*) => fail!(), + Err(*) => {} + } } #[test]