From 30c885ea52458b361bb8f215c17c384743e6851a Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sun, 3 Nov 2013 10:39:39 -0800 Subject: [PATCH 01/27] uv: Remove lots of uv/C++ wrappers --- src/librustuv/addrinfo.rs | 14 +- src/librustuv/async.rs | 4 +- src/librustuv/file.rs | 111 +++++-- src/librustuv/idle.rs | 8 +- src/librustuv/lib.rs | 24 +- src/librustuv/net.rs | 28 +- src/librustuv/pipe.rs | 8 +- src/librustuv/process.rs | 4 +- src/librustuv/signal.rs | 8 +- src/librustuv/timer.rs | 6 +- src/librustuv/tty.rs | 10 +- src/librustuv/uvio.rs | 46 ++- src/librustuv/uvll.rs | 642 +++++++++----------------------------- src/rt/rust_uv.cpp | 352 --------------------- 14 files changed, 310 insertions(+), 955 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 09736749997be..77e70acca8d5e 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -110,12 +110,12 @@ impl GetAddrInfoRequest { self.get_req_data().getaddrinfo_cb = Some(wrapper_cb); unsafe { - assert!(0 == uvll::getaddrinfo(loop_.native_handle(), - self.native_handle(), - getaddrinfo_cb, - c_node_ptr, - c_service_ptr, - hint_ptr)); + assert!(0 == uvll::uv_getaddrinfo(loop_.native_handle(), + self.native_handle(), + getaddrinfo_cb, + c_node_ptr, + c_service_ptr, + hint_ptr)); } extern "C" fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, @@ -127,7 +127,7 @@ impl GetAddrInfoRequest { let data = req.get_req_data(); (*data.getaddrinfo_cb.get_ref())(req, &addrinfo, err); unsafe { - uvll::freeaddrinfo(res); + uvll::uv_freeaddrinfo(res); } } } diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 4a1858ee03672..79e57db1bf591 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -26,7 +26,7 @@ impl AsyncWatcher { watcher.install_watcher_data(); let data = watcher.get_watcher_data(); data.async_cb = Some(cb); - assert_eq!(0, uvll::async_init(loop_.native_handle(), handle, async_cb)); + assert_eq!(0, uvll::uv_async_init(loop_.native_handle(), handle, async_cb)); return watcher; } @@ -42,7 +42,7 @@ impl AsyncWatcher { pub fn send(&mut self) { unsafe { let handle = self.native_handle(); - uvll::async_send(handle); + uvll::uv_async_send(handle); } } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 8c9302e123815..e3fe6c95bafe7 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -42,8 +42,9 @@ impl FsRequest { me.req_boilerplate(Some(cb)) }; let ret = path.with_ref(|p| unsafe { - uvll::fs_open(loop_.native_handle(), - self.native_handle(), p, flags, mode, complete_cb_ptr) + uvll::uv_fs_open(loop_.native_handle(), + self.native_handle(), p, flags as c_int, + mode as c_int, complete_cb_ptr) }); assert_eq!(ret, 0); } @@ -52,8 +53,9 @@ impl FsRequest { flags: int, mode: int) -> Result { let complete_cb_ptr = self.req_boilerplate(None); let result = path.with_ref(|p| unsafe { - uvll::fs_open(loop_.native_handle(), - self.native_handle(), p, flags, mode, complete_cb_ptr) + uvll::uv_fs_open(loop_.native_handle(), + self.native_handle(), p, flags as c_int, + mode as c_int, complete_cb_ptr) }); self.sync_cleanup(result) } @@ -61,8 +63,8 @@ impl FsRequest { pub fn unlink(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); let ret = path.with_ref(|p| unsafe { - uvll::fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) + uvll::uv_fs_unlink(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) }); assert_eq!(ret, 0); } @@ -71,8 +73,8 @@ impl FsRequest { -> Result { let complete_cb_ptr = self.req_boilerplate(None); let result = path.with_ref(|p| unsafe { - uvll::fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) + uvll::uv_fs_unlink(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) }); self.sync_cleanup(result) } @@ -89,8 +91,8 @@ impl FsRequest { pub fn stat(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); let ret = path.with_ref(|p| unsafe { - uvll::fs_stat(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) + uvll::uv_fs_stat(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) }); assert_eq!(ret, 0); } @@ -101,9 +103,9 @@ impl FsRequest { let base_ptr = buf.base as *c_void; let len = buf.len as uint; let ret = unsafe { - uvll::fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len, offset, complete_cb_ptr) + uvll::uv_fs_write(loop_.native_handle(), self.native_handle(), + fd, base_ptr, + len as c_uint, offset, complete_cb_ptr) }; assert_eq!(ret, 0); } @@ -113,9 +115,9 @@ impl FsRequest { let base_ptr = buf.base as *c_void; let len = buf.len as uint; let result = unsafe { - uvll::fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len, offset, complete_cb_ptr) + uvll::uv_fs_write(loop_.native_handle(), self.native_handle(), + fd, base_ptr, + len as c_uint, offset, complete_cb_ptr) }; self.sync_cleanup(result) } @@ -126,9 +128,9 @@ impl FsRequest { let buf_ptr = buf.base as *c_void; let len = buf.len as uint; let ret = unsafe { - uvll::fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len, offset, complete_cb_ptr) + uvll::uv_fs_read(loop_.native_handle(), self.native_handle(), + fd, buf_ptr, + len as c_uint, offset, complete_cb_ptr) }; assert_eq!(ret, 0); } @@ -138,30 +140,44 @@ impl FsRequest { let buf_ptr = buf.base as *c_void; let len = buf.len as uint; let result = unsafe { - uvll::fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len, offset, complete_cb_ptr) + uvll::uv_fs_read(loop_.native_handle(), self.native_handle(), + fd, buf_ptr, + len as c_uint, offset, complete_cb_ptr) }; self.sync_cleanup(result) } +<<<<<<< HEAD pub fn close(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(unsafe { uvll::fs_close(loop_.native_handle(), self.native_handle(), fd, complete_cb_ptr) }, 0); +======= + pub fn close(self, loop_: &Loop, fd: c_int, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = unsafe { + uvll::uv_fs_close(loop_.native_handle(), self.native_handle(), + fd, complete_cb_ptr) + }; + assert_eq!(ret, 0); +>>>>>>> 1850d26... Remove lots of uv/C++ wrappers } pub fn close_sync(mut self, loop_: &Loop, fd: c_int) -> Result { let complete_cb_ptr = self.req_boilerplate(None); let result = unsafe { - uvll::fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) + uvll::uv_fs_close(loop_.native_handle(), self.native_handle(), + fd, complete_cb_ptr) }; self.sync_cleanup(result) } +<<<<<<< HEAD pub fn mkdir(mut self, loop_: &Loop, path: &CString, mode: c_int, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); @@ -198,10 +214,36 @@ impl FsRequest { uvll::fs_chmod(loop_.native_handle(), self.native_handle(), p, mode, complete_cb_ptr) }), 0); +======= + pub fn mkdir(self, loop_: &Loop, path: &CString, mode: int, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::uv_fs_mkdir(loop_.native_handle(), + self.native_handle(), p, + mode as c_int, complete_cb_ptr) + }); + assert_eq!(ret, 0); + } + + pub fn rmdir(self, loop_: &Loop, path: &CString, cb: FsCallback) { + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::uv_fs_rmdir(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) + }); + assert_eq!(ret, 0); +>>>>>>> 1850d26... Remove lots of uv/C++ wrappers } pub fn readdir(mut self, loop_: &Loop, path: &CString, flags: c_int, cb: FsCallback) { +<<<<<<< HEAD let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(path.with_ref(|p| unsafe { uvll::fs_readdir(loop_.native_handle(), @@ -276,6 +318,17 @@ impl FsRequest { uvll::uv_fs_fdatasync(loop_.native_handle(), self.native_handle(), fd, complete_cb_ptr) }, 0); +======= + let complete_cb_ptr = { + let mut me = self; + me.req_boilerplate(Some(cb)) + }; + let ret = path.with_ref(|p| unsafe { + uvll::uv_fs_readdir(loop_.native_handle(), + self.native_handle(), p, flags, complete_cb_ptr) + }); + assert_eq!(ret, 0); +>>>>>>> 1850d26... Remove lots of uv/C++ wrappers } // accessors/utility funcs @@ -287,12 +340,10 @@ impl FsRequest { None => Ok(result) } } - fn req_boilerplate(&mut self, cb: Option) -> *u8 { + fn req_boilerplate(&mut self, cb: Option) -> uvll::uv_fs_cb { let result = match cb { - Some(_) => { - compl_cb as *u8 - }, - None => 0 as *u8 + Some(_) => compl_cb, + None => 0 as uvll::uv_fs_cb }; self.install_req_data(cb); result @@ -365,7 +416,7 @@ impl FsRequest { let data = uvll::get_data_for_req(self.native_handle()); let _data = transmute::<*c_void, ~RequestData>(data); uvll::set_data_for_req(self.native_handle(), null::<()>()); - uvll::fs_req_cleanup(self.native_handle()); + uvll::uv_fs_req_cleanup(self.native_handle()); free_req(self.native_handle() as *c_void) } } diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 4f606b5f01f8a..7c9b0ff461ccd 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -21,7 +21,7 @@ impl IdleWatcher { unsafe { let handle = uvll::malloc_handle(uvll::UV_IDLE); assert!(handle.is_not_null()); - assert_eq!(uvll::idle_init(loop_.native_handle(), handle), 0); + assert_eq!(uvll::uv_idle_init(loop_.native_handle(), handle), 0); let mut watcher: IdleWatcher = NativeHandle::from_native_handle(handle); watcher.install_watcher_data(); return watcher @@ -35,14 +35,14 @@ impl IdleWatcher { } unsafe { - assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) + assert_eq!(uvll::uv_idle_start(self.native_handle(), idle_cb), 0) } } pub fn restart(&mut self) { unsafe { assert!(self.get_watcher_data().idle_cb.is_some()); - assert_eq!(uvll::idle_start(self.native_handle(), idle_cb), 0) + assert_eq!(uvll::uv_idle_start(self.native_handle(), idle_cb), 0) } } @@ -52,7 +52,7 @@ impl IdleWatcher { // free unsafe { - assert_eq!(uvll::idle_stop(self.native_handle()), 0); + assert_eq!(uvll::uv_idle_stop(self.native_handle()), 0); } } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index f0a607ae35f1c..64aea4f01744d 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -49,7 +49,7 @@ use std::str::raw::from_c_str; use std::vec; use std::ptr; use std::str; -use std::libc::{c_void, c_int, size_t, malloc, free}; +use std::libc::{c_void, c_int, size_t, malloc, free, c_char, c_uint}; use std::cast::transmute; use std::ptr::null; use std::unstable::finally::Finally; @@ -127,11 +127,11 @@ impl Loop { } pub fn run(&mut self) { - unsafe { uvll::run(self.native_handle()) }; + unsafe { uvll::uv_run(self.native_handle(), uvll::RUN_DEFAULT) }; } pub fn close(&mut self) { - unsafe { uvll::loop_delete(self.native_handle()) }; + unsafe { uvll::uv_loop_delete(self.native_handle()) }; } } @@ -240,7 +240,9 @@ impl> WatcherInterop for W { data.close_cb = Some(cb); } - unsafe { uvll::close(self.native_handle(), close_cb); } + unsafe { + uvll::uv_close(self.native_handle() as *uvll::uv_handle_t, close_cb); + } extern fn close_cb(handle: *uvll::uv_handle_t) { let mut h: Handle = NativeHandle::from_native_handle(handle); @@ -251,7 +253,9 @@ impl> WatcherInterop for W { } fn close_async(self) { - unsafe { uvll::close(self.native_handle(), close_cb); } + unsafe { + uvll::uv_close(self.native_handle() as *uvll::uv_handle_t, close_cb); + } extern fn close_cb(handle: *uvll::uv_handle_t) { let mut h: Handle = NativeHandle::from_native_handle(handle); @@ -270,7 +274,7 @@ impl UvError { pub fn name(&self) -> ~str { unsafe { let inner = match self { &UvError(a) => a }; - let name_str = uvll::err_name(inner); + let name_str = uvll::uv_err_name(inner); assert!(name_str.is_not_null()); from_c_str(name_str) } @@ -279,7 +283,7 @@ impl UvError { pub fn desc(&self) -> ~str { unsafe { let inner = match self { &UvError(a) => a }; - let desc_str = uvll::strerror(inner); + let desc_str = uvll::uv_strerror(inner); assert!(desc_str.is_not_null()); from_c_str(desc_str) } @@ -309,7 +313,7 @@ pub fn uv_error_to_io_error(uverr: UvError) -> IoError { use std::rt::io::*; // uv error descriptions are static - let c_desc = uvll::strerror(*uverr); + let c_desc = uvll::uv_strerror(*uverr); let desc = str::raw::c_str_to_static_slice(c_desc); let kind = match *uverr { @@ -360,7 +364,7 @@ pub fn empty_buf() -> Buf { /// Borrow a slice to a Buf pub fn slice_to_uv_buf(v: &[u8]) -> Buf { let data = vec::raw::to_ptr(v); - unsafe { uvll::buf_init(data, v.len()) } + unsafe { uvll::uv_buf_init(data as *c_char, v.len() as c_uint) } } // XXX: Do these conversions without copying @@ -376,7 +380,7 @@ pub fn vec_to_uv_buf(v: ~[u8]) -> Buf { let data = data as *mut u8; ptr::copy_memory(data, b, l) } - uvll::buf_init(data, v.len()) + uvll::uv_buf_init(data as *c_char, v.len() as c_uint) } } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 0aaa931c9475e..e9f3f2bba4c5e 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc::{size_t, ssize_t, c_int, c_void, c_uint}; +use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; use std::vec; use std::str; use std::rt::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; @@ -70,8 +70,10 @@ fn uv_socket_addr_as_socket_addr(addr: UvSocketAddr, f: &fn(SocketAddr) -> T) unsafe { let buf_ptr = vec::raw::to_ptr(buf); match addr { - UvIpv4SocketAddr(addr) => uvll::ip4_name(addr, buf_ptr, ip_size as size_t), - UvIpv6SocketAddr(addr) => uvll::ip6_name(addr, buf_ptr, ip_size as size_t), + UvIpv4SocketAddr(addr) => + uvll::uv_ip4_name(addr, buf_ptr as *c_char, ip_size as size_t), + UvIpv6SocketAddr(addr) => + uvll::uv_ip6_name(addr, buf_ptr as *c_char, ip_size as size_t), } }; buf @@ -119,7 +121,7 @@ impl Watcher for StreamWatcher { } impl StreamWatcher { pub fn read_start(&mut self, alloc: AllocCallback, cb: ReadCallback) { unsafe { - match uvll::read_start(self.native_handle(), alloc_cb, read_cb) { + match uvll::uv_read_start(self.native_handle(), alloc_cb, read_cb) { 0 => { let data = self.get_watcher_data(); data.alloc_cb = Some(alloc); @@ -152,14 +154,14 @@ impl StreamWatcher { // but read_stop may be called from inside one of them and we // would end up freeing the in-use environment let handle = self.native_handle(); - unsafe { assert_eq!(uvll::read_stop(handle), 0); } + unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } } pub fn write(&mut self, buf: Buf, cb: ConnectionCallback) { let req = WriteRequest::new(); return unsafe { - match uvll::write(req.native_handle(), self.native_handle(), - [buf], write_cb) { + match uvll::uv_write(req.native_handle(), self.native_handle(), + [buf], write_cb) { 0 => { let data = self.get_watcher_data(); assert!(data.write_cb.is_none()); @@ -192,7 +194,7 @@ impl StreamWatcher { return unsafe { static BACKLOG: c_int = 128; // XXX should be configurable - match uvll::listen(self.native_handle(), BACKLOG, connection_cb) { + match uvll::uv_listen(self.native_handle(), BACKLOG, connection_cb) { 0 => Ok(()), n => Err(UvError(n)) } @@ -210,7 +212,7 @@ impl StreamWatcher { pub fn accept(&mut self, stream: StreamWatcher) { let self_handle = self.native_handle() as *c_void; let stream_handle = stream.native_handle() as *c_void; - assert_eq!(0, unsafe { uvll::accept(self_handle, stream_handle) } ); + assert_eq!(0, unsafe { uvll::uv_accept(self_handle, stream_handle) } ); } } @@ -231,7 +233,7 @@ impl TcpWatcher { unsafe { let handle = malloc_handle(UV_TCP); assert!(handle.is_not_null()); - assert_eq!(0, uvll::tcp_init(loop_.native_handle(), handle)); + assert_eq!(0, uvll::uv_tcp_init(loop_.native_handle(), handle)); let mut watcher: TcpWatcher = NativeHandle::from_native_handle(handle); watcher.install_watcher_data(); return watcher; @@ -304,7 +306,7 @@ impl UdpWatcher { unsafe { let handle = malloc_handle(UV_UDP); assert!(handle.is_not_null()); - assert_eq!(0, uvll::udp_init(loop_.native_handle(), handle)); + assert_eq!(0, uvll::uv_udp_init(loop_.native_handle(), handle)); let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle); watcher.install_watcher_data(); return watcher; @@ -333,7 +335,7 @@ impl UdpWatcher { data.udp_recv_cb = Some(cb); } - unsafe { uvll::udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } + unsafe { uvll::uv_udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); @@ -361,7 +363,7 @@ impl UdpWatcher { } pub fn recv_stop(&mut self) { - unsafe { uvll::udp_recv_stop(self.native_handle()); } + unsafe { uvll::uv_udp_recv_stop(self.native_handle()); } } pub fn send(&mut self, buf: Buf, address: SocketAddr, cb: UdpSendCallback) { diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index b453da0cc9ea2..0b65c55636d40 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -26,7 +26,7 @@ impl Pipe { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); assert!(handle.is_not_null()); let ipc = ipc as libc::c_int; - assert_eq!(uvll::pipe_init(loop_.native_handle(), handle, ipc), 0); + assert_eq!(uvll::uv_pipe_init(loop_.native_handle(), handle, ipc), 0); let mut ret: Pipe = NativeHandle::from_native_handle(handle); ret.install_watcher_data(); @@ -40,7 +40,7 @@ impl Pipe { #[fixed_stack_segment] #[inline(never)] pub fn open(&mut self, file: libc::c_int) -> Result<(), UvError> { - match unsafe { uvll::pipe_open(self.native_handle(), file) } { + match unsafe { uvll::uv_pipe_open(self.native_handle(), file) } { 0 => Ok(()), n => Err(UvError(n)) } @@ -49,7 +49,7 @@ impl Pipe { #[fixed_stack_segment] #[inline(never)] pub fn bind(&mut self, name: &CString) -> Result<(), UvError> { do name.with_ref |name| { - match unsafe { uvll::pipe_bind(self.native_handle(), name) } { + match unsafe { uvll::uv_pipe_bind(self.native_handle(), name) } { 0 => Ok(()), n => Err(UvError(n)) } @@ -68,7 +68,7 @@ impl Pipe { let name = do name.with_ref |p| { p }; unsafe { - uvll::pipe_connect(connect.native_handle(), + uvll::uv_pipe_connect(connect.native_handle(), self.native_handle(), name, connect_cb) diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 2d746e329f44a..ce281b656d39f 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -94,7 +94,7 @@ impl Process { }; match unsafe { - uvll::spawn(loop_.native_handle(), **self, options) + uvll::uv_spawn(loop_.native_handle(), **self, options) } { 0 => { (*self).get_watcher_data().exit_cb = Some(exit_cb.take()); @@ -111,7 +111,7 @@ impl Process { /// This is a wrapper around `uv_process_kill` pub fn kill(&self, signum: int) -> Result<(), UvError> { match unsafe { - uvll::process_kill(self.native_handle(), signum as libc::c_int) + uvll::uv_process_kill(self.native_handle(), signum as libc::c_int) } { 0 => Ok(()), err => Err(UvError(err)) diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index 3fcf449959dba..d5774b5aaab35 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -24,7 +24,7 @@ impl SignalWatcher { unsafe { let handle = uvll::malloc_handle(uvll::UV_SIGNAL); assert!(handle.is_not_null()); - assert!(0 == uvll::signal_init(loop_.native_handle(), handle)); + assert!(0 == uvll::uv_signal_init(loop_.native_handle(), handle)); let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); watcher.install_watcher_data(); return watcher; @@ -35,8 +35,8 @@ impl SignalWatcher { -> Result<(), UvError> { return unsafe { - match uvll::signal_start(self.native_handle(), signal_cb, - signum as c_int) { + match uvll::uv_signal_start(self.native_handle(), signal_cb, + signum as c_int) { 0 => { let data = self.get_watcher_data(); data.signal_cb = Some(callback); @@ -56,7 +56,7 @@ impl SignalWatcher { pub fn stop(&mut self) { unsafe { - uvll::signal_stop(self.native_handle()); + uvll::uv_signal_stop(self.native_handle()); } } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 9a693f6a27d35..4fc4934bf650a 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -21,7 +21,7 @@ impl TimerWatcher { unsafe { let handle = uvll::malloc_handle(uvll::UV_TIMER); assert!(handle.is_not_null()); - assert!(0 == uvll::timer_init(loop_.native_handle(), handle)); + assert!(0 == uvll::uv_timer_init(loop_.native_handle(), handle)); let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); watcher.install_watcher_data(); return watcher; @@ -35,7 +35,7 @@ impl TimerWatcher { } unsafe { - uvll::timer_start(self.native_handle(), timer_cb, timeout, repeat); + uvll::uv_timer_start(self.native_handle(), timer_cb, timeout, repeat); } extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { @@ -49,7 +49,7 @@ impl TimerWatcher { pub fn stop(&mut self) { unsafe { - uvll::timer_stop(self.native_handle()); + uvll::uv_timer_stop(self.native_handle()); } } } diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 65ba09376c14d..ad5f5043737f2 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -28,8 +28,8 @@ impl TTY { assert!(handle.is_not_null()); let ret = unsafe { - uvll::tty_init(loop_.native_handle(), handle, fd as libc::c_int, - readable as libc::c_int) + uvll::uv_tty_init(loop_.native_handle(), handle, fd as libc::c_int, + readable as libc::c_int) }; match ret { 0 => { @@ -51,7 +51,7 @@ impl TTY { #[fixed_stack_segment] #[inline(never)] pub fn set_mode(&self, raw: bool) -> Result<(), UvError> { let raw = raw as libc::c_int; - match unsafe { uvll::tty_set_mode(self.native_handle(), raw) } { + match unsafe { uvll::uv_tty_set_mode(self.native_handle(), raw) } { 0 => Ok(()), n => Err(UvError(n)) } @@ -64,8 +64,8 @@ impl TTY { let widthptr: *libc::c_int = &width; let heightptr: *libc::c_int = &width; - match unsafe { uvll::tty_get_winsize(self.native_handle(), - widthptr, heightptr) } { + match unsafe { uvll::uv_tty_get_winsize(self.native_handle(), + widthptr, heightptr) } { 0 => Ok((width as int, height as int)), n => Err(UvError(n)) } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 3f119bc8ccbf0..b4382ab4cee25 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -990,7 +990,7 @@ impl RtioSocket for UvTcpAcceptor { fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { let r = unsafe { - uvll::tcp_simultaneous_accepts(stream.native_handle(), a as c_int) + uvll::uv_tcp_simultaneous_accepts(stream.native_handle(), a as c_int) }; match status_to_maybe_uv_error(r) { @@ -1194,7 +1194,9 @@ impl RtioTcpStream for UvTcpStream { fn control_congestion(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) }; + let r = unsafe { + uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) + }; match status_to_maybe_uv_error(r) { Some(err) => Err(uv_error_to_io_error(err)), @@ -1205,7 +1207,9 @@ impl RtioTcpStream for UvTcpStream { fn nodelay(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) }; + let r = unsafe { + uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) + }; match status_to_maybe_uv_error(r) { Some(err) => Err(uv_error_to_io_error(err)), @@ -1217,8 +1221,8 @@ impl RtioTcpStream for UvTcpStream { fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { do self.home_for_io |self_| { let r = unsafe { - uvll::tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, - delay_in_seconds as c_uint) + uvll::uv_tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, + delay_in_seconds as c_uint) }; match status_to_maybe_uv_error(r) { @@ -1231,7 +1235,8 @@ impl RtioTcpStream for UvTcpStream { fn letdie(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { let r = unsafe { - uvll::tcp_keepalive(self_.watcher.native_handle(), 0 as c_int, 0 as c_uint) + uvll::uv_tcp_keepalive(self_.watcher.native_handle(), + 0 as c_int, 0 as c_uint) }; match status_to_maybe_uv_error(r) { @@ -1338,8 +1343,9 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { do multi.to_str().with_c_str |m_addr| { - uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, - ptr::null(), uvll::UV_JOIN_GROUP) + uvll::uv_udp_set_membership(self_.watcher.native_handle(), + m_addr, ptr::null(), + uvll::UV_JOIN_GROUP) } }; @@ -1354,8 +1360,9 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { do multi.to_str().with_c_str |m_addr| { - uvll::udp_set_membership(self_.watcher.native_handle(), m_addr, - ptr::null(), uvll::UV_LEAVE_GROUP) + uvll::uv_udp_set_membership(self_.watcher.native_handle(), + m_addr, ptr::null(), + uvll::UV_LEAVE_GROUP) } }; @@ -1370,7 +1377,8 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 1 as c_int) + uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), + 1 as c_int) }; match status_to_maybe_uv_error(r) { @@ -1384,7 +1392,8 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_multicast_loop(self_.watcher.native_handle(), 0 as c_int) + uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), + 0 as c_int) }; match status_to_maybe_uv_error(r) { @@ -1398,7 +1407,8 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_multicast_ttl(self_.watcher.native_handle(), ttl as c_int) + uvll::uv_udp_set_multicast_ttl(self_.watcher.native_handle(), + ttl as c_int) }; match status_to_maybe_uv_error(r) { @@ -1412,7 +1422,7 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) + uvll::uv_udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) }; match status_to_maybe_uv_error(r) { @@ -1426,7 +1436,8 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_broadcast(self_.watcher.native_handle(), 1 as c_int) + uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), + 1 as c_int) }; match status_to_maybe_uv_error(r) { @@ -1440,7 +1451,8 @@ impl RtioUdpSocket for UvUdpSocket { do self.home_for_io |self_| { let r = unsafe { - uvll::udp_set_broadcast(self_.watcher.native_handle(), 0 as c_int) + uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), + 0 as c_int) }; match status_to_maybe_uv_error(r) { @@ -1861,7 +1873,7 @@ impl RtioTTY for UvTTY { } fn isatty(&self) -> bool { - unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as c_int } + unsafe { uvll::uv_guess_handle(self.fd) == uvll::UV_TTY } } } diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 2d850383766f5..120a69fb24498 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -33,7 +33,6 @@ use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; use std::libc::ssize_t; use std::libc::{malloc, free}; use std::libc; -use std::ptr; use std::vec; pub use self::errors::*; @@ -95,6 +94,13 @@ pub struct uv_buf_t { base: *u8, } +#[repr(C)] +pub enum uv_run_mode { + RUN_DEFAULT = 0, + RUN_ONCE, + RUN_NOWAIT, +} + pub struct uv_process_options_t { exit_cb: uv_exit_cb, file: *libc::c_char, @@ -276,6 +282,7 @@ pub struct addrinfo { #[cfg(windows)] pub type uv_uid_t = libc::c_uchar; #[cfg(windows)] pub type uv_gid_t = libc::c_uchar; +#[repr(C)] #[deriving(Eq)] pub enum uv_handle_type { UV_UNKNOWN_HANDLE, @@ -299,6 +306,7 @@ pub enum uv_handle_type { UV_HANDLE_TYPE_MAX } +#[repr(C)] #[cfg(unix)] #[deriving(Eq)] pub enum uv_req_type { @@ -316,6 +324,7 @@ pub enum uv_req_type { // uv_req_type may have additional fields defined by UV_REQ_TYPE_PRIVATE. // See UV_REQ_TYPE_PRIVATE at libuv/include/uv-win.h +#[repr(C)] #[cfg(windows)] #[deriving(Eq)] pub enum uv_req_type { @@ -339,6 +348,7 @@ pub enum uv_req_type { UV_REQ_TYPE_MAX } +#[repr(C)] #[deriving(Eq)] pub enum uv_membership { UV_LEAVE_GROUP, @@ -349,7 +359,7 @@ pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { #[fixed_stack_segment]; #[inline(never)]; assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); - let size = rust_uv_handle_size(handle as uint); + let size = uv_handle_size(handle); let p = malloc(size); assert!(p.is_not_null()); return p; @@ -365,7 +375,7 @@ pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { #[fixed_stack_segment]; #[inline(never)]; assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); - let size = rust_uv_req_size(req as uint); + let size = uv_req_size(req); let p = malloc(size); assert!(p.is_not_null()); return p; @@ -400,54 +410,6 @@ pub unsafe fn loop_new() -> *c_void { return rust_uv_loop_new(); } -pub unsafe fn loop_delete(loop_handle: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_loop_delete(loop_handle); -} - -pub unsafe fn run(loop_handle: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_run(loop_handle); -} - -pub unsafe fn close(handle: *T, cb: uv_close_cb) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_close(handle as *c_void, cb); -} - -pub unsafe fn walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_walk(loop_handle, cb, arg); -} - -pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_init(loop_handle, handle) -} - -pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_start(handle, cb) -} - -pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_idle_stop(handle) -} - -pub unsafe fn udp_init(loop_handle: *uv_loop_t, handle: *uv_udp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_init(loop_handle, handle); -} - pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int { #[fixed_stack_segment]; #[inline(never)]; @@ -478,19 +440,6 @@ pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } -pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, - on_recv: uv_udp_recv_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_recv_start(server, on_alloc, on_recv); -} - -pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_recv_stop(server); -} - pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t { #[fixed_stack_segment]; #[inline(never)]; @@ -503,43 +452,6 @@ pub unsafe fn udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_i return rust_uv_udp_getsockname(handle, name); } -pub unsafe fn udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, - interface_addr: *c_char, membership: uv_membership) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_membership(handle, multicast_addr, interface_addr, membership as c_int); -} - -pub unsafe fn udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_multicast_loop(handle, on); -} - -pub unsafe fn udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_multicast_ttl(handle, ttl); -} - -pub unsafe fn udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_ttl(handle, ttl); -} - -pub unsafe fn udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_set_broadcast(handle, on); -} - -pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_init(loop_handle, handle); -} - pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in, after_connect_cb: uv_connect_cb) -> c_int { #[fixed_stack_segment]; #[inline(never)]; @@ -578,108 +490,17 @@ pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_i return rust_uv_tcp_getsockname(handle, name); } -pub unsafe fn tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_nodelay(handle, enable); -} - -pub unsafe fn tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_keepalive(handle, enable, delay); -} - -pub unsafe fn tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_simultaneous_accepts(handle, enable); -} - -pub unsafe fn listen(stream: *T, backlog: c_int, - cb: uv_connection_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_listen(stream as *c_void, backlog, cb); -} - -pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_accept(server as *c_void, client as *c_void); -} - -pub unsafe fn write(req: *uv_write_t, - stream: *T, +pub unsafe fn uv_write(req: *uv_write_t, + stream: *uv_stream_t, buf_in: &[uv_buf_t], cb: uv_write_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; + externfn!(fn uv_write(req: *uv_write_t, stream: *uv_stream_t, + buf_in: *uv_buf_t, buf_cnt: c_int, + cb: uv_write_cb) -> c_int) let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; - return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); -} -pub unsafe fn read_start(stream: *uv_stream_t, - on_alloc: uv_alloc_cb, - on_read: uv_read_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_read_start(stream as *c_void, on_alloc, on_read); -} - -pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_read_stop(stream as *c_void); -} - -pub unsafe fn strerror(err: c_int) -> *c_char { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_strerror(err); -} -pub unsafe fn err_name(err: c_int) -> *c_char { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_err_name(err); -} - -pub unsafe fn async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: uv_async_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_async_init(loop_handle, async_handle, cb); -} - -pub unsafe fn async_send(async_handle: *uv_async_t) { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_async_send(async_handle); -} -pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t { - #[fixed_stack_segment]; #[inline(never)]; - - let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t }; - let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf); - rust_uv_buf_init(out_buf_ptr, input, len as size_t); - return out_buf; -} - -pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_init(loop_ptr, timer_ptr); -} -pub unsafe fn timer_start(timer_ptr: *uv_timer_t, - cb: uv_timer_cb, timeout: u64, - repeat: u64) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_start(timer_ptr, cb, timeout, repeat); -} -pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_timer_stop(timer_ptr); + return uv_write(req, stream, buf_ptr, buf_cnt, cb); } pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool { @@ -731,18 +552,6 @@ pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { rust_uv_free_ip6_addr(addr); } -pub unsafe fn ip4_name(addr: *sockaddr_in, dst: *u8, size: size_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip4_name(addr, dst, size); -} - -pub unsafe fn ip6_name(addr: *sockaddr_in6, dst: *u8, size: size_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip6_name(addr, dst, size); -} - pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint { #[fixed_stack_segment]; #[inline(never)]; @@ -755,99 +564,6 @@ pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint { return rust_uv_ip6_port(addr); } -pub unsafe fn fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, flags: int, mode: int, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_open(loop_ptr, req, path, flags as c_int, mode as c_int, cb) -} - -pub unsafe fn fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_unlink(loop_ptr, req, path, cb) -} -pub unsafe fn fs_write(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: uint, offset: i64, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_write(loop_ptr, req, fd, buf, len as c_uint, offset, cb) -} -pub unsafe fn fs_read(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: uint, offset: i64, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_read(loop_ptr, req, fd, buf, len as c_uint, offset, cb) -} -pub unsafe fn fs_close(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_close(loop_ptr, req, fd, cb) -} -pub unsafe fn fs_stat(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_stat(loop_ptr, req, path, cb) -} -pub unsafe fn fs_fstat(loop_ptr: *uv_loop_t, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_fstat(loop_ptr, req, fd, cb) -} -pub unsafe fn fs_mkdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_mkdir(loop_ptr, req, path, mode as c_int, cb) -} -pub unsafe fn fs_rmdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_rmdir(loop_ptr, req, path, cb) -} -pub unsafe fn fs_rename(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - to: *c_char, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_rename(loop_ptr, req, path, to, cb) -} -pub unsafe fn fs_chmod(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_chmod(loop_ptr, req, path, mode as c_int, cb) -} -pub unsafe fn fs_readdir(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, - flags: c_int, cb: *u8) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_readdir(loop_ptr, req, path, flags, cb) -} -pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_populate_uv_stat(req_in, stat_out) -} -pub unsafe fn fs_req_cleanup(req: *uv_fs_t) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_fs_req_cleanup(req); -} - -pub unsafe fn spawn(loop_ptr: *c_void, result: *uv_process_t, - options: uv_process_options_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_spawn(loop_ptr, result, options); -} - -pub unsafe fn process_kill(p: *uv_process_t, signum: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_process_kill(p, signum); -} - pub unsafe fn process_pid(p: *uv_process_t) -> c_int { #[fixed_stack_segment]; #[inline(never)]; return rust_uv_process_pid(p); @@ -871,11 +587,6 @@ pub unsafe fn set_stdio_container_stream(c: *uv_stdio_container_t, rust_set_stdio_container_stream(c, stream); } -pub unsafe fn pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_init(loop_ptr, p, ipc) -} - // data access helpers pub unsafe fn get_result_from_fs_req(req: *uv_fs_t) -> c_int { #[fixed_stack_segment]; #[inline(never)]; @@ -947,114 +658,24 @@ pub unsafe fn set_data_for_req(req: *T, data: *U) { rust_uv_set_data_for_req(req as *c_void, data as *c_void); } -pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_get_base_from_buf(buf); -} -pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_get_len_from_buf(buf); -} -pub unsafe fn getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, - getaddrinfo_cb: uv_getaddrinfo_cb, - node: *c_char, service: *c_char, - hints: *addrinfo) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_getaddrinfo(loop_, req, getaddrinfo_cb, node, service, hints); -} -pub unsafe fn freeaddrinfo(ai: *addrinfo) { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_freeaddrinfo(ai); -} -pub unsafe fn pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_open(pipe, file) -} -pub unsafe fn pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_bind(pipe, name) -} -pub unsafe fn pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, - name: *c_char, cb: uv_connect_cb) { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_pipe_connect(req, handle, name, cb) -} -pub unsafe fn tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, - readable: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_init(loop_ptr, tty, fd, readable) -} -pub unsafe fn tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_set_mode(tty, mode) -} -pub unsafe fn tty_get_winsize(tty: *uv_tty_t, width: *c_int, - height: *c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - rust_uv_tty_get_winsize(tty, width, height) -} -// FIXME(#9613) this should return uv_handle_type, not a c_int -pub unsafe fn guess_handle(fd: c_int) -> c_int { +pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { #[fixed_stack_segment]; #[inline(never)]; - rust_uv_guess_handle(fd) -} -pub unsafe fn signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_init(loop_, handle); -} -pub unsafe fn signal_start(handle: *uv_signal_t, - signal_cb: uv_signal_cb, - signum: c_int) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_start(handle, signal_cb, signum); -} -pub unsafe fn signal_stop(handle: *uv_signal_t) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - return rust_uv_signal_stop(handle); + rust_uv_populate_uv_stat(req_in, stat_out) } -pub struct uv_err_data { - err_name: ~str, - err_msg: ~str, -} // uv_support is the result of compiling rust_uv.cpp #[link_args = "-luv_support -luv"] extern { + fn rust_uv_loop_new() -> *c_void; - fn rust_uv_handle_size(type_: uintptr_t) -> size_t; - fn rust_uv_req_size(type_: uintptr_t) -> size_t; fn rust_uv_handle_type_max() -> uintptr_t; fn rust_uv_req_type_max() -> uintptr_t; - - // libuv public API - fn rust_uv_loop_new() -> *c_void; - fn rust_uv_loop_delete(lp: *c_void); - fn rust_uv_run(loop_handle: *c_void); - fn rust_uv_close(handle: *c_void, cb: uv_close_cb); - fn rust_uv_walk(loop_handle: *c_void, cb: uv_walk_cb, arg: *c_void); - - fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int; - fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int; - fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; - - fn rust_uv_async_send(handle: *uv_async_t); - fn rust_uv_async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: uv_async_cb) -> c_int; - fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; - fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); - fn rust_uv_strerror(err: c_int) -> *c_char; - fn rust_uv_err_name(err: c_int) -> *c_char; fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; fn rust_uv_free_ip4_addr(addr: *sockaddr_in); fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); - fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, @@ -1067,80 +688,26 @@ extern { fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int; fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int; - fn rust_uv_tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int; - fn rust_uv_tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int; - - fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int; fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int; - fn rust_uv_udp_recv_start(server: *uv_udp_t, - on_alloc: uv_alloc_cb, - on_recv: uv_udp_recv_cb) -> c_int; - fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, - interface_addr: *c_char, membership: c_int) -> c_int; - fn rust_uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int; - fn rust_uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; - fn rust_uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; - fn rust_uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int; - fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; fn rust_uv_malloc_sockaddr_storage() -> *sockaddr_storage; fn rust_uv_free_sockaddr_storage(ss: *sockaddr_storage); - - fn rust_uv_listen(stream: *c_void, backlog: c_int, - cb: uv_connection_cb) -> c_int; - fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; - fn rust_uv_write(req: *c_void, stream: *c_void, buf_in: *uv_buf_t, buf_cnt: c_int, - cb: uv_write_cb) -> c_int; - fn rust_uv_read_start(stream: *c_void, - on_alloc: uv_alloc_cb, - on_read: uv_read_cb) -> c_int; - fn rust_uv_read_stop(stream: *c_void) -> c_int; - fn rust_uv_timer_init(loop_handle: *c_void, timer_handle: *uv_timer_t) -> c_int; - fn rust_uv_timer_start(timer_handle: *uv_timer_t, cb: uv_timer_cb, timeout: libc::uint64_t, - repeat: libc::uint64_t) -> c_int; - fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; - fn rust_uv_fs_open(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - flags: c_int, mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_unlink(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int; - fn rust_uv_fs_write(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; - fn rust_uv_fs_read(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - buf: *c_void, len: c_uint, offset: i64, cb: *u8) -> c_int; - fn rust_uv_fs_close(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, - cb: *u8) -> c_int; - fn rust_uv_fs_stat(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, cb: *u8) -> c_int; - fn rust_uv_fs_fstat(loop_ptr: *c_void, req: *uv_fs_t, fd: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_mkdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_rmdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - cb: *u8) -> c_int; - fn rust_uv_fs_rename(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - to: *c_char, cb: *u8) -> c_int; - fn rust_uv_fs_chmod(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - mode: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_readdir(loop_ptr: *c_void, req: *uv_fs_t, path: *c_char, - flags: c_int, cb: *u8) -> c_int; - fn rust_uv_fs_req_cleanup(req: *uv_fs_t); fn rust_uv_populate_uv_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t); fn rust_uv_get_result_from_fs_req(req: *uv_fs_t) -> c_int; fn rust_uv_get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void; fn rust_uv_get_path_from_fs_req(req: *uv_fs_t) -> *c_char; fn rust_uv_get_loop_from_fs_req(req: *uv_fs_t) -> *uv_loop_t; fn rust_uv_get_loop_from_getaddrinfo_req(req: *uv_fs_t) -> *uv_loop_t; - - fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t; - fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t; + fn rust_uv_get_stream_handle_from_connect_req(req: *uv_connect_t) -> *uv_stream_t; + fn rust_uv_get_stream_handle_from_write_req(req: *uv_write_t) -> *uv_stream_t; fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void; fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void; fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void); @@ -1148,56 +715,94 @@ extern { fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void); fn rust_uv_get_data_for_req(req: *c_void) -> *c_void; fn rust_uv_set_data_for_req(req: *c_void, data: *c_void); - fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8; - fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t; - fn rust_uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, - getaddrinfo_cb: uv_getaddrinfo_cb, - node: *c_char, service: *c_char, - hints: *addrinfo) -> c_int; - fn rust_uv_freeaddrinfo(ai: *addrinfo); - fn rust_uv_spawn(loop_ptr: *c_void, outptr: *uv_process_t, - options: uv_process_options_t) -> c_int; - fn rust_uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int; - fn rust_uv_process_pid(p: *uv_process_t) -> c_int; fn rust_set_stdio_container_flags(c: *uv_stdio_container_t, flags: c_int); fn rust_set_stdio_container_fd(c: *uv_stdio_container_t, fd: c_int); fn rust_set_stdio_container_stream(c: *uv_stdio_container_t, stream: *uv_stream_t); - fn rust_uv_pipe_init(loop_ptr: *c_void, p: *uv_pipe_t, ipc: c_int) -> c_int; - - fn rust_uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int; - fn rust_uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int; - fn rust_uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, - name: *c_char, cb: uv_connect_cb); - fn rust_uv_tty_init(loop_ptr: *uv_loop_t, tty: *uv_tty_t, fd: c_int, - readable: c_int) -> c_int; - fn rust_uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int; - fn rust_uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, - height: *c_int) -> c_int; - fn rust_uv_guess_handle(fd: c_int) -> c_int; - - // XXX: see comments in addrinfo.rs - // These should all really be constants... - //#[rust_stack] pub fn rust_SOCK_STREAM() -> c_int; - //#[rust_stack] pub fn rust_SOCK_DGRAM() -> c_int; - //#[rust_stack] pub fn rust_SOCK_RAW() -> c_int; - //#[rust_stack] pub fn rust_IPPROTO_UDP() -> c_int; - //#[rust_stack] pub fn rust_IPPROTO_TCP() -> c_int; - //#[rust_stack] pub fn rust_AI_ADDRCONFIG() -> c_int; - //#[rust_stack] pub fn rust_AI_ALL() -> c_int; - //#[rust_stack] pub fn rust_AI_CANONNAME() -> c_int; - //#[rust_stack] pub fn rust_AI_NUMERICHOST() -> c_int; - //#[rust_stack] pub fn rust_AI_NUMERICSERV() -> c_int; - //#[rust_stack] pub fn rust_AI_PASSIVE() -> c_int; - //#[rust_stack] pub fn rust_AI_V4MAPPED() -> c_int; - - fn rust_uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int; - fn rust_uv_signal_start(handle: *uv_signal_t, - signal_cb: uv_signal_cb, - signum: c_int) -> c_int; - fn rust_uv_signal_stop(handle: *uv_signal_t) -> c_int; - } + +// generic uv functions +externfn!(fn uv_loop_delete(l: *uv_loop_t)) +externfn!(fn uv_handle_size(ty: uv_handle_type) -> size_t) +externfn!(fn uv_req_size(ty: uv_req_type) -> size_t) +externfn!(fn uv_run(l: *uv_loop_t, mode: uv_run_mode) -> c_int) +externfn!(fn uv_close(h: *uv_handle_t, cb: uv_close_cb)) +externfn!(fn uv_walk(l: *uv_loop_t, cb: uv_walk_cb, arg: *c_void)) +externfn!(fn uv_buf_init(base: *c_char, len: c_uint) -> uv_buf_t) +externfn!(fn uv_strerror(err: c_int) -> *c_char) +externfn!(fn uv_err_name(err: c_int) -> *c_char) +externfn!(fn uv_listen(s: *uv_stream_t, backlog: c_int, + cb: uv_connection_cb) -> c_int) +externfn!(fn uv_accept(server: *uv_stream_t, client: *uv_stream_t) -> c_int) +externfn!(fn uv_read_start(stream: *uv_stream_t, + on_alloc: uv_alloc_cb, + on_read: uv_read_cb) -> c_int) +externfn!(fn uv_read_stop(stream: *uv_stream_t) -> c_int) + +// idle bindings +externfn!(fn uv_idle_init(l: *uv_loop_t, i: *uv_idle_t) -> c_int) +externfn!(fn uv_idle_start(i: *uv_idle_t, cb: uv_idle_cb) -> c_int) +externfn!(fn uv_idle_stop(i: *uv_idle_t) -> c_int) + +// async bindings +externfn!(fn uv_async_init(l: *uv_loop_t, a: *uv_async_t, + cb: uv_async_cb) -> c_int) +externfn!(fn uv_async_send(a: *uv_async_t)) + +// tcp bindings +externfn!(fn uv_tcp_init(l: *uv_loop_t, h: *uv_tcp_t) -> c_int) +externfn!(fn uv_ip4_name(src: *sockaddr_in, dst: *c_char, + size: size_t) -> c_int) +externfn!(fn uv_ip6_name(src: *sockaddr_in6, dst: *c_char, + size: size_t) -> c_int) +externfn!(fn uv_tcp_nodelay(h: *uv_tcp_t, enable: c_int) -> c_int) +externfn!(fn uv_tcp_keepalive(h: *uv_tcp_t, enable: c_int, + delay: c_uint) -> c_int) +externfn!(fn uv_tcp_simultaneous_accepts(h: *uv_tcp_t, enable: c_int) -> c_int) + +// udp bindings +externfn!(fn uv_udp_init(l: *uv_loop_t, h: *uv_udp_t) -> c_int) +externfn!(fn uv_udp_recv_start(server: *uv_udp_t, + on_alloc: uv_alloc_cb, + on_recv: uv_udp_recv_cb) -> c_int) +externfn!(fn uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, + membership: uv_membership) -> c_int) +externfn!(fn uv_udp_recv_stop(server: *uv_udp_t) -> c_int) +externfn!(fn uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int) +externfn!(fn uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) +externfn!(fn uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) +externfn!(fn uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int) + +// timer bindings +externfn!(fn uv_timer_init(l: *uv_loop_t, t: *uv_timer_t) -> c_int) +externfn!(fn uv_timer_start(t: *uv_timer_t, cb: uv_timer_cb, + timeout: libc::uint64_t, + repeat: libc::uint64_t) -> c_int) +externfn!(fn uv_timer_stop(handle: *uv_timer_t) -> c_int) + +// fs operations +externfn!(fn uv_fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + flags: c_int, mode: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_write(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: c_uint, offset: i64, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_read(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, + len: c_uint, offset: i64, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_close(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_stat(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_fstat(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_mkdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + mode: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_rmdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_readdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, + flags: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_req_cleanup(req: *uv_fs_t)) externfn!(fn uv_fs_fsync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, cb: *u8) -> c_int) externfn!(fn uv_fs_fdatasync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, @@ -1215,6 +820,39 @@ externfn!(fn uv_fs_chown(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, externfn!(fn uv_fs_lstat(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char, cb: *u8) -> c_int) +// getaddrinfo +externfn!(fn uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, + getaddrinfo_cb: uv_getaddrinfo_cb, + node: *c_char, service: *c_char, + hints: *addrinfo) -> c_int) +externfn!(fn uv_freeaddrinfo(ai: *addrinfo)) + +// process spawning +externfn!(fn uv_spawn(loop_ptr: *uv_loop_t, outptr: *uv_process_t, + options: uv_process_options_t) -> c_int) +externfn!(fn uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int) + +// pipes +externfn!(fn uv_pipe_init(l: *uv_loop_t, p: *uv_pipe_t, ipc: c_int) -> c_int) +externfn!(fn uv_pipe_open(pipe: *uv_pipe_t, file: c_int) -> c_int) +externfn!(fn uv_pipe_bind(pipe: *uv_pipe_t, name: *c_char) -> c_int) +externfn!(fn uv_pipe_connect(req: *uv_connect_t, handle: *uv_pipe_t, + name: *c_char, cb: uv_connect_cb)) + +// tty +externfn!(fn uv_tty_init(l: *uv_loop_t, tty: *uv_tty_t, fd: c_int, + readable: c_int) -> c_int) +externfn!(fn uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int) +externfn!(fn uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, + height: *c_int) -> c_int) +externfn!(fn uv_guess_handle(fd: c_int) -> uv_handle_type) + +// signals +externfn!(fn uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int) +externfn!(fn uv_signal_start(h: *uv_signal_t, cb: uv_signal_cb, + signum: c_int) -> c_int) +externfn!(fn uv_signal_stop(handle: *uv_signal_t) -> c_int) + // libuv requires various system libraries to successfully link on some // platforms #[cfg(target_os = "linux")] diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index a4361f14f69b2..09aa806891ace 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -31,80 +31,11 @@ rust_uv_loop_new() { return (void*)uv_loop_new(); } -extern "C" void -rust_uv_loop_delete(uv_loop_t* loop) { - // FIXME: This is a workaround for #1815. libev uses realloc(0) to - // free the loop, which valgrind doesn't like. We have suppressions - // to make valgrind ignore them. - // - // Valgrind also has a sanity check when collecting allocation backtraces - // that the stack pointer must be at least 512 bytes into the stack (at - // least 512 bytes of frames must have come before). When this is not - // the case it doesn't collect the backtrace. - // - // Unfortunately, with our spaghetti stacks that valgrind check triggers - // sometimes and we don't get the backtrace for the realloc(0), it - // fails to be suppressed, and it gets reported as 0 bytes lost - // from a malloc with no backtrace. - // - // This pads our stack with some extra space before deleting the loop - alloca(512); - uv_loop_delete(loop); -} - extern "C" void rust_uv_loop_set_data(uv_loop_t* loop, void* data) { loop->data = data; } -extern "C" void -rust_uv_run(uv_loop_t* loop) { - uv_run(loop, UV_RUN_DEFAULT); -} - -extern "C" void -rust_uv_close(uv_handle_t* handle, uv_close_cb cb) { - uv_close(handle, cb); -} - -extern "C" void -rust_uv_walk(uv_loop_t* loop, uv_walk_cb cb, void* arg) { - uv_walk(loop, cb, arg); -} - -extern "C" void -rust_uv_async_send(uv_async_t* handle) { - uv_async_send(handle); -} - -extern "C" int -rust_uv_async_init(uv_loop_t* loop_handle, - uv_async_t* async_handle, - uv_async_cb cb) { - return uv_async_init(loop_handle, async_handle, cb); -} - -extern "C" int -rust_uv_timer_init(uv_loop_t* loop, uv_timer_t* timer) { - return uv_timer_init(loop, timer); -} - -extern "C" int -rust_uv_timer_start(uv_timer_t* the_timer, uv_timer_cb cb, - int64_t timeout, int64_t repeat) { - return uv_timer_start(the_timer, cb, timeout, repeat); -} - -extern "C" int -rust_uv_timer_stop(uv_timer_t* the_timer) { - return uv_timer_stop(the_timer); -} - -extern "C" int -rust_uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) { - return uv_tcp_init(loop, handle); -} - extern "C" int rust_uv_tcp_connect(uv_connect_t* connect_ptr, uv_tcp_t* tcp_ptr, @@ -159,29 +90,6 @@ rust_uv_tcp_getsockname return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen); } -extern "C" int -rust_uv_tcp_nodelay -(uv_tcp_t* handle, int enable) { - return uv_tcp_nodelay(handle, enable); -} - -extern "C" int -rust_uv_tcp_keepalive -(uv_tcp_t* handle, int enable, unsigned int delay) { - return uv_tcp_keepalive(handle, enable, delay); -} - -extern "C" int -rust_uv_tcp_simultaneous_accepts -(uv_tcp_t* handle, int enable) { - return uv_tcp_simultaneous_accepts(handle, enable); -} - -extern "C" int -rust_uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { - return uv_udp_init(loop, handle); -} - extern "C" int rust_uv_udp_bind(uv_udp_t* server, sockaddr_in* addr_ptr, unsigned flags) { return uv_udp_bind(server, *addr_ptr, flags); @@ -204,16 +112,6 @@ rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb); } -extern "C" int -rust_uv_udp_recv_start(uv_udp_t* server, uv_alloc_cb on_alloc, uv_udp_recv_cb on_read) { - return uv_udp_recv_start(server, on_alloc, on_read); -} - -extern "C" int -rust_uv_udp_recv_stop(uv_udp_t* server) { - return uv_udp_recv_stop(server); -} - extern "C" uv_udp_t* rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) { return send_req->handle; @@ -228,47 +126,6 @@ rust_uv_udp_getsockname return uv_udp_getsockname(handle, (sockaddr*)name, &namelen); } -extern "C" int -rust_uv_udp_set_membership -(uv_udp_t* handle, const char* m_addr, const char* i_addr, uv_membership membership) { - return uv_udp_set_membership(handle, m_addr, i_addr, membership); -} - -extern "C" int -rust_uv_udp_set_multicast_loop -(uv_udp_t* handle, int on) { - return uv_udp_set_multicast_loop(handle, on); -} - -extern "C" int -rust_uv_udp_set_multicast_ttl -(uv_udp_t* handle, int ttl) { - return uv_udp_set_multicast_ttl(handle, ttl); -} - -extern "C" int -rust_uv_udp_set_ttl -(uv_udp_t* handle, int ttl) { - return uv_udp_set_ttl(handle, ttl); -} - -extern "C" int -rust_uv_udp_set_broadcast -(uv_udp_t* handle, int on) { - return uv_udp_set_broadcast(handle, on); -} - -extern "C" int -rust_uv_listen(uv_stream_t* stream, int backlog, - uv_connection_cb cb) { - return uv_listen(stream, backlog, cb); -} - -extern "C" int -rust_uv_accept(uv_stream_t* server, uv_stream_t* client) { - return uv_accept(server, client); -} - extern "C" uv_stream_t* rust_uv_get_stream_handle_from_connect_req(uv_connect_t* connect) { return connect->handle; @@ -319,43 +176,6 @@ rust_uv_set_data_for_req(uv_req_t* req, void* data) { req->data = data; } -extern "C" char* -rust_uv_get_base_from_buf(uv_buf_t buf) { - return buf.base; -} - -extern "C" size_t -rust_uv_get_len_from_buf(uv_buf_t buf) { - return buf.len; -} - -extern "C" const char* -rust_uv_strerror(int err) { - return uv_strerror(err); -} - -extern "C" const char* -rust_uv_err_name(int err) { - return uv_err_name(err); -} - -extern "C" int -rust_uv_write(uv_write_t* req, uv_stream_t* handle, - uv_buf_t* bufs, int buf_cnt, - uv_write_cb cb) { - return uv_write(req, handle, bufs, buf_cnt, cb); -} -extern "C" int -rust_uv_read_start(uv_stream_t* stream, uv_alloc_cb on_alloc, - uv_read_cb on_read) { - return uv_read_start(stream, on_alloc, on_read); -} - -extern "C" int -rust_uv_read_stop(uv_stream_t* stream) { - return uv_read_stop(stream); -} - extern "C" struct sockaddr_in rust_uv_ip4_addr(const char* ip, int port) { struct sockaddr_in addr = uv_ip4_addr(ip, port); @@ -403,16 +223,6 @@ extern "C" void rust_uv_free_ip6_addr(sockaddr_in6 *addrp) { free(addrp); } - -extern "C" int -rust_uv_ip4_name(struct sockaddr_in* src, char* dst, size_t size) { - return uv_ip4_name(src, dst, size); -} -extern "C" int -rust_uv_ip6_name(struct sockaddr_in6* src, char* dst, size_t size) { - int result = uv_ip6_name(src, dst, size); - return result; -} extern "C" unsigned int rust_uv_ip4_port(struct sockaddr_in* src) { return ntohs(src->sin_port); @@ -422,18 +232,6 @@ rust_uv_ip6_port(struct sockaddr_in6* src) { return ntohs(src->sin6_port); } -extern "C" int -rust_uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* handle, - uv_getaddrinfo_cb cb, - char* node, char* service, - addrinfo* hints) { - return uv_getaddrinfo(loop, handle, cb, node, service, hints); -} -extern "C" void -rust_uv_freeaddrinfo(addrinfo* res) { - uv_freeaddrinfo(res); -} - extern "C" int rust_uv_is_ipv4_sockaddr(sockaddr* addr) { return addr->sa_family == AF_INET; @@ -466,31 +264,6 @@ rust_uv_addrinfo_as_sockaddr_in6(addrinfo* input) { return (sockaddr_in6*)input->ai_addr; } -extern "C" int -rust_uv_idle_init(uv_loop_t* loop, uv_idle_t* idle) { - return uv_idle_init(loop, idle); -} - -extern "C" int -rust_uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) { - return uv_idle_start(idle, cb); -} - -extern "C" int -rust_uv_idle_stop(uv_idle_t* idle) { - return uv_idle_stop(idle); -} - -extern "C" size_t -rust_uv_handle_size(uintptr_t type) { - return uv_handle_size((uv_handle_type)type); -} - -extern "C" size_t -rust_uv_req_size(uintptr_t type) { - return uv_req_size((uv_req_type)type); -} - extern "C" uintptr_t rust_uv_handle_type_max() { return UV_HANDLE_TYPE_MAX; @@ -501,33 +274,6 @@ rust_uv_req_type_max() { return UV_REQ_TYPE_MAX; } -extern "C" int -rust_uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, - int mode, uv_fs_cb cb) { - return uv_fs_open(loop, req, path, flags, mode, cb); -} -extern "C" int -rust_uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_unlink(loop, req, path, cb); -} -extern "C" int -rust_uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file fd, void* buf, - size_t len, int64_t offset, uv_fs_cb cb) { - return uv_fs_write(loop, req, fd, buf, len, offset, cb); -} -extern "C" int -rust_uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file fd, void* buf, - size_t len, int64_t offset, uv_fs_cb cb) { - return uv_fs_read(loop, req, fd, buf, len, offset, cb); -} -extern "C" int -rust_uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { - return uv_fs_close(loop, req, fd, cb); -} -extern "C" void -rust_uv_fs_req_cleanup(uv_fs_t* req) { - uv_fs_req_cleanup(req); -} extern "C" int rust_uv_get_result_from_fs_req(uv_fs_t* req) { return req->result; @@ -550,15 +296,6 @@ rust_uv_get_loop_from_getaddrinfo_req(uv_getaddrinfo_t* req) { return req->loop; } -extern "C" int -rust_uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_stat(loop, req, path, cb); -} -extern "C" int -rust_uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { - return uv_fs_fstat(loop, req, file, cb); -} - extern "C" void rust_uv_populate_uv_stat(uv_fs_t* req_in, uv_stat_t* stat_out) { stat_out->st_dev = req_in->statbuf.st_dev; @@ -583,39 +320,6 @@ rust_uv_populate_uv_stat(uv_fs_t* req_in, uv_stat_t* stat_out) { stat_out->st_birthtim.tv_nsec = req_in->statbuf.st_birthtim.tv_nsec; } -extern "C" int -rust_uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { - return uv_fs_mkdir(loop, req, path, mode, cb); -} -extern "C" int -rust_uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { - return uv_fs_rmdir(loop, req, path, cb); -} - -extern "C" int -rust_uv_fs_readdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb) { - return uv_fs_readdir(loop, req, path, flags, cb); -} -extern "C" int -rust_uv_fs_rename(uv_loop_t *loop, uv_fs_t* req, const char *path, - const char *to, uv_fs_cb cb) { - return uv_fs_rename(loop, req, path, to, cb); -} -extern "C" int -rust_uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { - return uv_fs_chmod(loop, req, path, mode, cb); -} - -extern "C" int -rust_uv_spawn(uv_loop_t *loop, uv_process_t *p, uv_process_options_t options) { - return uv_spawn(loop, p, options); -} - -extern "C" int -rust_uv_process_kill(uv_process_t *p, int signum) { - return uv_process_kill(p, signum); -} - extern "C" void rust_set_stdio_container_flags(uv_stdio_container_t *c, int flags) { c->flags = (uv_stdio_flags) flags; @@ -635,59 +339,3 @@ extern "C" int rust_uv_process_pid(uv_process_t* p) { return p->pid; } - -extern "C" int -rust_uv_pipe_init(uv_loop_t *loop, uv_pipe_t* p, int ipc) { - return uv_pipe_init(loop, p, ipc); -} - -extern "C" int -rust_uv_pipe_open(uv_pipe_t *pipe, int file) { - return uv_pipe_open(pipe, file); -} - -extern "C" int -rust_uv_pipe_bind(uv_pipe_t *pipe, char *name) { - return uv_pipe_bind(pipe, name); -} - -extern "C" void -rust_uv_pipe_connect(uv_connect_t *req, uv_pipe_t *handle, - char *name, uv_connect_cb cb) { - uv_pipe_connect(req, handle, name, cb); -} - -extern "C" int -rust_uv_tty_init(uv_loop_t *loop, uv_tty_t *tty, int fd, int readable) { - return uv_tty_init(loop, tty, fd, readable); -} - -extern "C" int -rust_uv_tty_set_mode(uv_tty_t *tty, int mode) { - return uv_tty_set_mode(tty, mode); -} - -extern "C" int -rust_uv_tty_get_winsize(uv_tty_t *tty, int *width, int *height) { - return uv_tty_get_winsize(tty, width, height); -} - -extern "C" int -rust_uv_guess_handle(int fd) { - return uv_guess_handle(fd); -} - -extern "C" int -rust_uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { - return uv_signal_init(loop, handle); -} - -extern "C" int -rust_uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { - return uv_signal_start(handle, signal_cb, signum); -} - -extern "C" int -rust_uv_signal_stop(uv_signal_t* handle) { - return uv_signal_stop(handle); -} From 4bcde6bc068b8962d368fc70bd68cb21f5d1013c Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sun, 3 Nov 2013 11:02:19 -0800 Subject: [PATCH 02/27] uv: Provide a helper fn to Result<(), IoError> --- src/librustuv/lib.rs | 9 +++-- src/librustuv/uvio.rs | 84 +++++++------------------------------------ 2 files changed, 19 insertions(+), 74 deletions(-) diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 64aea4f01744d..6aa8723a4017d 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -341,9 +341,8 @@ pub fn uv_error_to_io_error(uverr: UvError) -> IoError { } } -/// Given a uv handle, convert a callback status to a UvError -pub fn status_to_maybe_uv_error(status: c_int) -> Option -{ +/// Given a uv error code, convert a callback status to a UvError +pub fn status_to_maybe_uv_error(status: c_int) -> Option { if status >= 0 { None } else { @@ -351,6 +350,10 @@ pub fn status_to_maybe_uv_error(status: c_int) -> Option } } +pub fn status_to_io_result(status: c_int) -> Result<(), IoError> { + if status >= 0 {Ok(())} else {Err(uv_error_to_io_error(UvError(status)))} +} + /// The uv buffer type pub type Buf = uvll::uv_buf_t; diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index b4382ab4cee25..1dbc7d71543e1 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -992,11 +992,7 @@ fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { let r = unsafe { uvll::uv_tcp_simultaneous_accepts(stream.native_handle(), a as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } impl RtioTcpAcceptor for UvTcpAcceptor { @@ -1197,11 +1193,7 @@ impl RtioTcpStream for UvTcpStream { let r = unsafe { uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } @@ -1210,11 +1202,7 @@ impl RtioTcpStream for UvTcpStream { let r = unsafe { uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } @@ -1224,11 +1212,7 @@ impl RtioTcpStream for UvTcpStream { uvll::uv_tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, delay_in_seconds as c_uint) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } @@ -1238,11 +1222,7 @@ impl RtioTcpStream for UvTcpStream { uvll::uv_tcp_keepalive(self_.watcher.native_handle(), 0 as c_int, 0 as c_uint) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } } @@ -1348,11 +1328,7 @@ impl RtioUdpSocket for UvUdpSocket { uvll::UV_JOIN_GROUP) } }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } @@ -1365,100 +1341,66 @@ impl RtioUdpSocket for UvUdpSocket { uvll::UV_LEAVE_GROUP) } }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn loop_multicast_locally(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), 1 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), 0 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_multicast_ttl(self_.watcher.native_handle(), ttl as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn hear_broadcasts(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), 1 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } fn ignore_broadcasts(&mut self) -> Result<(), IoError> { do self.home_for_io |self_| { - let r = unsafe { uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), 0 as c_int) }; - - match status_to_maybe_uv_error(r) { - Some(err) => Err(uv_error_to_io_error(err)), - None => Ok(()) - } + status_to_io_result(r) } } } From 653406fcf78aff6bfc4a7c025a8176919d708565 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sun, 3 Nov 2013 11:26:08 -0800 Subject: [PATCH 03/27] uv: Remove closure-based home_for_io for raii Using an raii wrapper means that there's no need for a '_self' variant and we can greatly reduce the amount of 'self_'-named variables. --- src/librustuv/uvio.rs | 711 +++++++++++++++++++----------------------- 1 file changed, 326 insertions(+), 385 deletions(-) diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 1dbc7d71543e1..bf8358070dcdf 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -86,12 +86,39 @@ trait HomingIO { self.home().sched_id } - // XXX: dummy self parameter - fn restore_original_home(_: Option, io_home: uint) { + /// Fires a single homing missile, returning another missile targeted back + /// at the original home of this task. In other words, this function will + /// move the local task to its I/O scheduler and then return an RAII wrapper + /// which will return the task home. + fn fire_homing_missile(&mut self) -> HomingMissile { + HomingMissile { io_home: self.go_to_IO_home() } + } + + /// Same as `fire_homing_missile`, but returns the local I/O scheduler as + /// well (the one that was homed to). + fn fire_homing_missile_sched(&mut self) -> (HomingMissile, ~Scheduler) { + // First, transplant ourselves to the home I/O scheduler + let missile = self.fire_homing_missile(); + // Next (must happen next), grab the local I/O scheduler + let io_sched: ~Scheduler = Local::take(); + + (missile, io_sched) + } +} + +/// After a homing operation has been completed, this will return the current +/// task back to its appropriate home (if applicable). The field is used to +/// assert that we are where we think we are. +struct HomingMissile { + priv io_home: uint, +} + +impl Drop for HomingMissile { + fn drop(&mut self) { // It would truly be a sad day if we had moved off the home I/O // scheduler while we were doing I/O. assert_eq!(Local::borrow(|sched: &mut Scheduler| sched.sched_id()), - io_home); + self.io_home); // If we were a homed task, then we must send ourselves back to the // original scheduler. Otherwise, we can just return and keep running @@ -106,30 +133,6 @@ trait HomingIO { } } } - - fn home_for_io(&mut self, io: &fn(&mut Self) -> A) -> A { - let home = self.go_to_IO_home(); - let a = io(self); // do IO - HomingIO::restore_original_home(None::, home); - a // return the result of the IO - } - - fn home_for_io_consume(mut self, io: &fn(Self) -> A) -> A { - let home = self.go_to_IO_home(); - let a = io(self); // do IO - HomingIO::restore_original_home(None::, home); - a // return the result of the IO - } - - fn home_for_io_with_sched(&mut self, io_sched: &fn(&mut Self, ~Scheduler) -> A) -> A { - let home = self.go_to_IO_home(); - let a = do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - io_sched(self, scheduler) // do IO and scheduling action - }; - HomingIO::restore_original_home(None::, home); - a // return result of IO - } } // get a handle for the current scheduler @@ -915,13 +918,12 @@ impl UvTcpListener { impl Drop for UvTcpListener { fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - do self_.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } + let (_m, sched) = self.fire_homing_missile_sched(); + do sched.deschedule_running_task_and_then |_, task| { + let task = Cell::new(task); + do self.watcher.as_stream().close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task.take()); } } } @@ -929,38 +931,36 @@ impl Drop for UvTcpListener { impl RtioSocket for UvTcpListener { fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.watcher) - } + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.watcher) } } impl RtioTcpListener for UvTcpListener { - fn listen(~self) -> Result<~RtioTcpAcceptor, IoError> { - do self.home_for_io_consume |self_| { - let acceptor = ~UvTcpAcceptor::new(self_); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.watcher.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(_) => Err(standard_error(OtherIoError)), - None => { - let inc = TcpWatcher::new(&server.event_loop()); - // first accept call in the callback guarenteed to succeed - server.accept(inc.as_stream()); - let home = get_handle_to_current_scheduler!(); - Ok(~UvTcpStream { watcher: inc, home: home } - as ~RtioTcpStream) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioTcpAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), + fn listen(mut ~self) -> Result<~RtioTcpAcceptor, IoError> { + let _m = self.fire_homing_missile(); + let acceptor = ~UvTcpAcceptor::new(*self); + let incoming = Cell::new(acceptor.incoming.clone()); + let mut stream = acceptor.listener.watcher.as_stream(); + let res = do stream.listen |mut server, status| { + do incoming.with_mut_ref |incoming| { + let inc = match status { + Some(_) => Err(standard_error(OtherIoError)), + None => { + let inc = TcpWatcher::new(&server.event_loop()); + // first accept call in the callback guarenteed to succeed + server.accept(inc.as_stream()); + let home = get_handle_to_current_scheduler!(); + Ok(~UvTcpStream { watcher: inc, home: home } + as ~RtioTcpStream) + } + }; + incoming.send(inc); } + }; + match res { + Ok(()) => Ok(acceptor as ~RtioTcpAcceptor), + Err(e) => Err(uv_error_to_io_error(e)), } } } @@ -982,9 +982,8 @@ impl UvTcpAcceptor { impl RtioSocket for UvTcpAcceptor { fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.listener.watcher) - } + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.listener.watcher) } } @@ -997,21 +996,18 @@ fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { impl RtioTcpAcceptor for UvTcpAcceptor { fn accept(&mut self) -> Result<~RtioTcpStream, IoError> { - do self.home_for_io |self_| { - self_.incoming.recv() - } + let _m = self.fire_homing_missile(); + self.incoming.recv() } fn accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.watcher.as_stream(), 1) - } + let _m = self.fire_homing_missile(); + accept_simultaneously(self.listener.watcher.as_stream(), 1) } fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.watcher.as_stream(), 0) - } + let _m = self.fire_homing_missile(); + accept_simultaneously(self.listener.watcher.as_stream(), 0) } } @@ -1102,14 +1098,12 @@ impl HomingIO for UvUnboundPipe { impl Drop for UvUnboundPipe { fn drop(&mut self) { - do self.home_for_io |self_| { - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.pipe.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, sched) = self.fire_homing_missile_sched(); + do sched.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self.pipe.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -1127,14 +1121,12 @@ impl UvPipeStream { impl RtioPipe for UvPipeStream { fn read(&mut self, buf: &mut [u8]) -> Result { - do self.inner.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.pipe.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.inner.fire_homing_missile_sched(); + read_stream(self.inner.pipe.as_stream(), scheduler, buf) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.inner.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.pipe.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.inner.fire_homing_missile_sched(); + write_stream(self.inner.pipe.as_stream(), scheduler, buf) } } @@ -1149,13 +1141,12 @@ impl HomingIO for UvTcpStream { impl Drop for UvTcpStream { fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, sched) = self.fire_homing_missile_sched(); + do sched.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self.watcher.as_stream().close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -1163,67 +1154,55 @@ impl Drop for UvTcpStream { impl RtioSocket for UvTcpStream { fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Tcp, self_.watcher) - } + let _m = self.fire_homing_missile(); + socket_name(Tcp, self.watcher) } } impl RtioTcpStream for UvTcpStream { fn read(&mut self, buf: &mut [u8]) -> Result { - do self.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.watcher.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + read_stream(self.watcher.as_stream(), scheduler, buf) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.watcher.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + write_stream(self.watcher.as_stream(), scheduler, buf) } fn peer_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(TcpPeer, self_.watcher) - } + let _m = self.fire_homing_missile(); + socket_name(TcpPeer, self.watcher) } fn control_congestion(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 0 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.watcher.native_handle(), 0 as c_int) + }) } fn nodelay(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_tcp_nodelay(self_.watcher.native_handle(), 1 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.watcher.native_handle(), 1 as c_int) + }) } fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_tcp_keepalive(self_.watcher.native_handle(), 1 as c_int, - delay_in_seconds as c_uint) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.watcher.native_handle(), 1 as c_int, + delay_in_seconds as c_uint) + }) } fn letdie(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_tcp_keepalive(self_.watcher.native_handle(), - 0 as c_int, 0 as c_uint) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.watcher.native_handle(), + 0 as c_int, 0 as c_uint) + }) } } @@ -1238,13 +1217,12 @@ impl HomingIO for UvUdpSocket { impl Drop for UvUdpSocket { fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -1252,156 +1230,138 @@ impl Drop for UvUdpSocket { impl RtioSocket for UvUdpSocket { fn socket_name(&mut self) -> Result { - do self.home_for_io |self_| { - socket_name(Udp, self_.watcher) - } + let _m = self.fire_homing_missile(); + socket_name(Udp, self.watcher) } } impl RtioUdpSocket for UvUdpSocket { fn recvfrom(&mut self, buf: &mut [u8]) -> Result<(uint, SocketAddr), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let uv_buf = slice_to_uv_buf(buf); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let alloc: AllocCallback = |_| uv_buf; - do self_.watcher.recv_start(alloc) |mut watcher, nread, _buf, addr, flags, status| { - let _ = flags; // /XXX add handling for partials? + let (_m, scheduler) = self.fire_homing_missile_sched(); + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; - watcher.recv_stop(); + let buf_ptr: *&mut [u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; + do self.watcher.recv_start(alloc) |mut watcher, nread, _buf, addr, flags, status| { + let _ = flags; // /XXX add handling for partials? - let result = match status { - None => { - assert!(nread >= 0); - Ok((nread as uint, addr)) - } - Some(err) => Err(uv_error_to_io_error(err)), - }; + watcher.recv_stop(); - unsafe { (*result_cell_ptr).put_back(result); } + let result = match status { + None => { + assert!(nread >= 0); + Ok((nread as uint, addr)) + } + Some(err) => Err(uv_error_to_io_error(err)), + }; - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } + unsafe { (*result_cell_ptr).put_back(result); } - assert!(!result_cell.is_empty()); - result_cell.take() + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); + } } + + assert!(!result_cell.is_empty()); + result_cell.take() } fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do self_.watcher.send(buf, dst) |_watcher, status| { + let (_m, scheduler) = self.fire_homing_missile_sched(); + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let buf_ptr: *&[u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + do self.watcher.send(buf, dst) |_watcher, status| { - let result = match status { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)), - }; + let result = match status { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)), + }; - unsafe { (*result_cell_ptr).put_back(result); } + unsafe { (*result_cell_ptr).put_back(result); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } - - assert!(!result_cell.is_empty()); - result_cell.take() } + + assert!(!result_cell.is_empty()); + result_cell.take() } fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::uv_udp_set_membership(self_.watcher.native_handle(), - m_addr, ptr::null(), - uvll::UV_JOIN_GROUP) - } - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.watcher.native_handle(), + m_addr, ptr::null(), + uvll::UV_JOIN_GROUP) + } + }) } fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::uv_udp_set_membership(self_.watcher.native_handle(), - m_addr, ptr::null(), - uvll::UV_LEAVE_GROUP) - } - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.watcher.native_handle(), + m_addr, ptr::null(), + uvll::UV_LEAVE_GROUP) + } + }) } fn loop_multicast_locally(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), - 1 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.watcher.native_handle(), + 1 as c_int) + }) } fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_multicast_loop(self_.watcher.native_handle(), - 0 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.watcher.native_handle(), + 0 as c_int) + }) } fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_multicast_ttl(self_.watcher.native_handle(), - ttl as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_ttl(self.watcher.native_handle(), + ttl as c_int) + }) } fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_ttl(self_.watcher.native_handle(), ttl as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_ttl(self.watcher.native_handle(), ttl as c_int) + }) } fn hear_broadcasts(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), - 1 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.watcher.native_handle(), + 1 as c_int) + }) } fn ignore_broadcasts(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - let r = unsafe { - uvll::uv_udp_set_broadcast(self_.watcher.native_handle(), - 0 as c_int) - }; - status_to_io_result(r) - } + let _m = self.fire_homing_missile(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.watcher.native_handle(), + 0 as c_int) + }) } } @@ -1422,14 +1382,13 @@ impl UvTimer { impl Drop for UvTimer { fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - uvdebug!("closing UvTimer"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + uvdebug!("closing UvTimer"); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -1437,18 +1396,17 @@ impl Drop for UvTimer { impl RtioTimer for UvTimer { fn sleep(&mut self, msecs: u64) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_sched, task| { - uvdebug!("sleep: entered scheduler context"); - let task_cell = Cell::new(task); - do self_.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + do scheduler.deschedule_running_task_and_then |_sched, task| { + uvdebug!("sleep: entered scheduler context"); + let task_cell = Cell::new(task); + do self.watcher.start(msecs, 0) |_, status| { + assert!(status.is_none()); + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } - self_.watcher.stop(); } + self.watcher.stop(); } fn oneshot(&mut self, msecs: u64) -> PortOne<()> { @@ -1456,13 +1414,11 @@ impl RtioTimer for UvTimer { let (port, chan) = oneshot(); let chan = Cell::new(chan); - do self.home_for_io |self_| { - let chan = Cell::new(chan.take()); - do self_.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - assert!(!chan.is_empty()); - chan.take().send_deferred(()); - } + let _m = self.fire_homing_missile(); + do self.watcher.start(msecs, 0) |_, status| { + assert!(status.is_none()); + assert!(!chan.is_empty()); + chan.take().send_deferred(()); } return port; @@ -1473,13 +1429,11 @@ impl RtioTimer for UvTimer { let (port, chan) = stream(); let chan = Cell::new(chan); - do self.home_for_io |self_| { - let chan = Cell::new(chan.take()); - do self_.watcher.start(msecs, msecs) |_, status| { - assert!(status.is_none()); - do chan.with_ref |chan| { - chan.send_deferred(()); - } + let _m = self.fire_homing_missile(); + do self.watcher.start(msecs, msecs) |_, status| { + assert!(status.is_none()); + do chan.with_ref |chan| { + chan.send_deferred(()); } } @@ -1512,20 +1466,19 @@ impl UvFileStream { let result_cell = Cell::new_empty(); let result_cell_ptr: *Cell> = &result_cell; let buf_ptr: *&mut [u8] = &buf; - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - let task_cell = Cell::new(task); - let read_req = file::FsRequest::new(); - do read_req.read(&self_.loop_, self_.fd, buf, offset) |req, uverr| { - let res = match uverr { - None => Ok(req.get_result() as int), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + do scheduler.deschedule_running_task_and_then |_, task| { + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + let task_cell = Cell::new(task); + let read_req = file::FsRequest::new(); + do read_req.read(&self.loop_, self.fd, buf, offset) |req, uverr| { + let res = match uverr { + None => Ok(req.get_result() as int), + Some(err) => Err(uv_error_to_io_error(err)) + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } result_cell.take() @@ -1555,19 +1508,18 @@ impl UvFileStream { -> Result<(), IoError> { let result_cell = Cell::new_empty(); let result_cell_ptr: *Cell> = &result_cell; - do self.home_for_io_with_sched |self_, sched| { - do sched.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - let req = file::FsRequest::new(); - do f(self_, req) |_, uverr| { - let res = match uverr { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } + let (_m, sched) = self.fire_homing_missile_sched(); + do sched.deschedule_running_task_and_then |_, task| { + let task = Cell::new(task); + let req = file::FsRequest::new(); + do f(self_, req) |_, uverr| { + let res = match uverr { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)) + }; + unsafe { (*result_cell_ptr).put_back(res); } + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task.take()); } } result_cell.take() @@ -1583,14 +1535,13 @@ impl Drop for UvFileStream { do close_req.close(&self.loop_, self.fd) |_,_| {} } CloseSynchronously => { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let close_req = file::FsRequest::new(); - do close_req.close(&self_.loop_, self_.fd) |_,_| { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + let close_req = file::FsRequest::new(); + do close_req.close(&self.loop_, self.fd) |_,_| { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } @@ -1623,7 +1574,7 @@ impl RtioFileStream for UvFileStream { fn tell(&self) -> Result { use std::libc::SEEK_CUR; // this is temporary - let self_ = unsafe { cast::transmute::<&UvFileStream, &mut UvFileStream>(self) }; + let self_ = unsafe { cast::transmute_mut(self) }; self_.seek_common(0, SEEK_CUR) } fn fsync(&mut self) -> Result<(), IoError> { @@ -1681,7 +1632,8 @@ impl Drop for UvProcess { if self.home.is_none() { close(self) } else { - self.home_for_io(close) + let _m = self.fire_homing_missile(); + close(self) } } } @@ -1692,30 +1644,28 @@ impl RtioProcess for UvProcess { } fn kill(&mut self, signal: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - match self_.process.kill(signal) { - Ok(()) => Ok(()), - Err(uverr) => Err(uv_error_to_io_error(uverr)) - } + let _m = self.fire_homing_missile(); + match self.process.kill(signal) { + Ok(()) => Ok(()), + Err(uverr) => Err(uv_error_to_io_error(uverr)) } } fn wait(&mut self) -> int { // Make sure (on the home scheduler) that we have an exit status listed - do self.home_for_io |self_| { - match self_.exit_status { - Some(*) => {} - None => { - // If there's no exit code previously listed, then the - // process's exit callback has yet to be invoked. We just - // need to deschedule ourselves and wait to be reawoken. - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - assert!(self_.descheduled.is_none()); - self_.descheduled = Some(task); - } - assert!(self_.exit_status.is_some()); + let _m = self.fire_homing_missile(); + match self.exit_status { + Some(*) => {} + None => { + // If there's no exit code previously listed, then the + // process's exit callback has yet to be invoked. We just + // need to deschedule ourselves and wait to be reawoken. + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + assert!(self.descheduled.is_none()); + self.descheduled = Some(task); } + assert!(self.exit_status.is_some()); } } @@ -1738,28 +1688,27 @@ impl UvUnixListener { } impl RtioUnixListener for UvUnixListener { - fn listen(~self) -> Result<~RtioUnixAcceptor, IoError> { - do self.home_for_io_consume |self_| { - let acceptor = ~UvUnixAcceptor::new(self_); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.inner.pipe.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(e) => Err(uv_error_to_io_error(e)), - None => { - let pipe = UvUnboundPipe::new(&server.event_loop()); - server.accept(pipe.pipe.as_stream()); - Ok(~UvPipeStream::new(pipe) as ~RtioPipe) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioUnixAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), + fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> { + let _m = self.fire_homing_missile(); + let acceptor = ~UvUnixAcceptor::new(*self); + let incoming = Cell::new(acceptor.incoming.clone()); + let mut stream = acceptor.listener.inner.pipe.as_stream(); + let res = do stream.listen |mut server, status| { + do incoming.with_mut_ref |incoming| { + let inc = match status { + Some(e) => Err(uv_error_to_io_error(e)), + None => { + let pipe = UvUnboundPipe::new(&server.event_loop()); + server.accept(pipe.pipe.as_stream()); + Ok(~UvPipeStream::new(pipe) as ~RtioPipe) + } + }; + incoming.send(inc); } + }; + match res { + Ok(()) => Ok(acceptor as ~RtioUnixAcceptor), + Err(e) => Err(uv_error_to_io_error(e)), } } } @@ -1787,30 +1736,26 @@ impl Drop for UvTTY { impl RtioTTY for UvTTY { fn read(&mut self, buf: &mut [u8]) -> Result { - do self.home_for_io_with_sched |self_, scheduler| { - read_stream(self_.tty.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + read_stream(self.tty.as_stream(), scheduler, buf) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - do self.home_for_io_with_sched |self_, scheduler| { - write_stream(self_.tty.as_stream(), scheduler, buf) - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + write_stream(self.tty.as_stream(), scheduler, buf) } fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { - do self.home_for_io |self_| { - match self_.tty.set_mode(raw) { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } + let _m = self.fire_homing_missile(); + match self.tty.set_mode(raw) { + Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) } } fn get_winsize(&mut self) -> Result<(int, int), IoError> { - do self.home_for_io |self_| { - match self_.tty.get_winsize() { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } + let _m = self.fire_homing_missile(); + match self.tty.get_winsize() { + Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) } } @@ -1836,21 +1781,18 @@ impl UvUnixAcceptor { impl RtioUnixAcceptor for UvUnixAcceptor { fn accept(&mut self) -> Result<~RtioPipe, IoError> { - do self.home_for_io |self_| { - self_.incoming.recv() - } + let _m = self.fire_homing_missile(); + self.incoming.recv() } fn accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.inner.pipe.as_stream(), 1) - } + let _m = self.fire_homing_missile(); + accept_simultaneously(self.listener.inner.pipe.as_stream(), 1) } fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - do self.home_for_io |self_| { - accept_simultaneously(self_.listener.inner.pipe.as_stream(), 0) - } + let _m = self.fire_homing_missile(); + accept_simultaneously(self.listener.inner.pipe.as_stream(), 0) } } @@ -1873,14 +1815,13 @@ impl RtioSignal for UvSignal {} impl Drop for UvSignal { fn drop(&mut self) { - do self.home_for_io_with_sched |self_, scheduler| { - uvdebug!("closing UvSignal"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self_.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + let (_m, scheduler) = self.fire_homing_missile_sched(); + uvdebug!("closing UvSignal"); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do self.watcher.close { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task_cell.take()); } } } From 24b42234185427c5141b03103d8ce640538ba500 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 1 Nov 2013 09:36:21 -0700 Subject: [PATCH 04/27] Migrate uv timer bindings away from ~fn() --- src/librustuv/lib.rs | 41 ++++++++++++- src/librustuv/macros.rs | 5 ++ src/librustuv/timer.rs | 131 ++++++++++++++++++++++++++++++---------- src/librustuv/uvio.rs | 91 ++-------------------------- src/librustuv/uvll.rs | 5 ++ src/rt/rust_uv.cpp | 5 -- 6 files changed, 151 insertions(+), 127 deletions(-) diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 6aa8723a4017d..3d0ea4e6d1b89 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -45,6 +45,7 @@ via `close` and `delete` methods. #[feature(macro_rules, globs)]; +use std::cast; use std::str::raw::from_c_str; use std::vec; use std::ptr; @@ -119,6 +120,42 @@ pub trait NativeHandle { fn native_handle(&self) -> T; } +/// A type that wraps a uv handle +pub trait UvHandle { + fn uv_handle(&self) -> *T; + + // FIXME(#8888) dummy self + fn alloc(_: Option, ty: uvll::uv_handle_type) -> *T { + unsafe { + let handle = uvll::malloc_handle(ty); + assert!(!handle.is_null()); + handle as *T + } + } + + unsafe fn from_uv_handle<'a>(h: &'a *T) -> &'a mut Self { + cast::transmute(uvll::get_data_for_uv_handle(*h)) + } + + fn install(~self) -> ~Self { + unsafe { + let myptr = cast::transmute::<&~Self, *u8>(&self); + uvll::set_data_for_uv_handle(self.uv_handle(), myptr); + } + self + } + + fn close_async_(&mut self) { + // we used malloc to allocate all handles, so we must always have at + // least a callback to free all the handles we allocated. + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } + } + + unsafe { uvll::close(self.uv_handle(), close_cb) } + } +} + impl Loop { pub fn new() -> Loop { let handle = unsafe { uvll::loop_new() }; @@ -367,7 +404,7 @@ pub fn empty_buf() -> Buf { /// Borrow a slice to a Buf pub fn slice_to_uv_buf(v: &[u8]) -> Buf { let data = vec::raw::to_ptr(v); - unsafe { uvll::uv_buf_init(data as *c_char, v.len() as c_uint) } + uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } // XXX: Do these conversions without copying @@ -383,7 +420,7 @@ pub fn vec_to_uv_buf(v: ~[u8]) -> Buf { let data = data as *mut u8; ptr::copy_memory(data, b, l) } - uvll::uv_buf_init(data as *c_char, v.len() as c_uint) + uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } } diff --git a/src/librustuv/macros.rs b/src/librustuv/macros.rs index cbbed316d83df..90b8263da79fd 100644 --- a/src/librustuv/macros.rs +++ b/src/librustuv/macros.rs @@ -27,6 +27,11 @@ macro_rules! uvdebug ( }) ) +// get a handle for the current scheduler +macro_rules! get_handle_to_current_scheduler( + () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() }) +) + pub fn dumb_println(args: &fmt::Arguments) { use std::rt::io::native::stdio::stderr; use std::rt::io::Writer; diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 4fc4934bf650a..f89a6c5e5c57f 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -8,58 +8,123 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cell::Cell; +use std::comm::{oneshot, stream, PortOne, ChanOne}; use std::libc::c_int; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::rtio::RtioTimer; +use std::rt::sched::{Scheduler, SchedHandle}; use uvll; -use super::{Watcher, Loop, NativeHandle, TimerCallback, status_to_maybe_uv_error}; +use super::{Loop, NativeHandle, UvHandle}; +use uvio::HomingIO; -pub struct TimerWatcher(*uvll::uv_timer_t); -impl Watcher for TimerWatcher { } +pub struct TimerWatcher { + handle: *uvll::uv_timer_t, + home: SchedHandle, + action: Option, +} + +pub enum NextAction { + WakeTask(BlockedTask), + SendOnce(ChanOne<()>), + SendMany(Chan<()>), +} impl TimerWatcher { - pub fn new(loop_: &mut Loop) -> TimerWatcher { - unsafe { - let handle = uvll::malloc_handle(uvll::UV_TIMER); - assert!(handle.is_not_null()); - assert!(0 == uvll::uv_timer_init(loop_.native_handle(), handle)); - let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } + pub fn new(loop_: &mut Loop) -> ~TimerWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_TIMER); + assert_eq!(unsafe { + uvll::timer_init(loop_.native_handle(), handle) + }, 0); + let me = ~TimerWatcher { + handle: handle, + action: None, + home: get_handle_to_current_scheduler!(), + }; + return me.install(); } - pub fn start(&mut self, timeout: u64, repeat: u64, cb: TimerCallback) { - { - let data = self.get_watcher_data(); - data.timer_cb = Some(cb); - } + fn start(&mut self, msecs: u64, period: u64) { + assert_eq!(unsafe { + uvll::timer_start(self.handle, timer_cb, msecs, period) + }, 0) + } + + fn stop(&mut self) { + assert_eq!(unsafe { uvll::timer_stop(self.handle) }, 0) + } +} + +impl HomingIO for TimerWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for TimerWatcher { + fn uv_handle(&self) -> *uvll::uv_timer_t { self.handle } +} - unsafe { - uvll::uv_timer_start(self.native_handle(), timer_cb, timeout, repeat); +impl RtioTimer for TimerWatcher { + fn sleep(&mut self, msecs: u64) { + do self.home_for_io_with_sched |self_, scheduler| { + do scheduler.deschedule_running_task_and_then |_sched, task| { + self_.action = Some(WakeTask(task)); + self_.start(msecs, 0); + } + self_.stop(); } + } + + fn oneshot(&mut self, msecs: u64) -> PortOne<()> { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); - extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { - let mut watcher: TimerWatcher = NativeHandle::from_native_handle(handle); - let data = watcher.get_watcher_data(); - let cb = data.timer_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(watcher, status); + do self.home_for_io |self_| { + self_.action = Some(SendOnce(chan.take())); + self_.start(msecs, 0); } + + return port; } - pub fn stop(&mut self) { - unsafe { - uvll::uv_timer_stop(self.native_handle()); + fn period(&mut self, msecs: u64) -> Port<()> { + let (port, chan) = stream(); + let chan = Cell::new(chan); + + do self.home_for_io |self_| { + self_.action = Some(SendMany(chan.take())); + self_.start(msecs, msecs); } + + return port; } } -impl NativeHandle<*uvll::uv_timer_t> for TimerWatcher { - fn from_native_handle(handle: *uvll::uv_timer_t) -> TimerWatcher { - TimerWatcher(handle) +extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { + let handle = handle as *uvll::uv_handle_t; + let foo: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + + match foo.action.take_unwrap() { + WakeTask(task) => { + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(task); + } + SendOnce(chan) => chan.send(()), + SendMany(chan) => { + chan.send(()); + foo.action = Some(SendMany(chan)); + } } - fn native_handle(&self) -> *uvll::uv_idle_t { - match self { &TimerWatcher(ptr) => ptr } +} + +impl Drop for TimerWatcher { + fn drop(&mut self) { + do self.home_for_io |self_| { + self_.action = None; + self_.stop(); + self_.close_async_(); + } } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index bf8358070dcdf..5e67e79c020f6 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -12,8 +12,8 @@ use std::c_str::CString; use std::cast::transmute; use std::cast; use std::cell::Cell; -use std::comm::{SendDeferred, SharedChan, Port, PortOne, GenericChan}; -use std::libc; +use std::clone::Clone; +use std::comm::{SendDeferred, SharedChan, GenericChan}; use std::libc::{c_int, c_uint, c_void, pid_t}; use std::ptr; use std::str; @@ -49,7 +49,7 @@ use addrinfo::{GetAddrInfoRequest, accum_addrinfo}; // XXX we should not be calling uvll functions in here. -trait HomingIO { +pub trait HomingIO { fn home<'r>(&'r mut self) -> &'r mut SchedHandle; @@ -135,11 +135,6 @@ impl Drop for HomingMissile { } } -// get a handle for the current scheduler -macro_rules! get_handle_to_current_scheduler( - () => (do Local::borrow |sched: &mut Scheduler| { sched.make_handle() }) -) - enum SocketNameKind { TcpPeer, Tcp, @@ -581,9 +576,7 @@ impl IoFactory for UvIoFactory { } fn timer_init(&mut self) -> Result<~RtioTimer, IoError> { - let watcher = TimerWatcher::new(self.uv_loop()); - let home = get_handle_to_current_scheduler!(); - Ok(~UvTimer::new(watcher, home) as ~RtioTimer) + Ok(TimerWatcher::new(self.uv_loop()) as ~RtioTimer) } fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, @@ -1365,82 +1358,6 @@ impl RtioUdpSocket for UvUdpSocket { } } -pub struct UvTimer { - priv watcher: timer::TimerWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTimer { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvTimer { - fn new(w: timer::TimerWatcher, home: SchedHandle) -> UvTimer { - UvTimer { watcher: w, home: home } - } -} - -impl Drop for UvTimer { - fn drop(&mut self) { - let (_m, scheduler) = self.fire_homing_missile_sched(); - uvdebug!("closing UvTimer"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } -} - -impl RtioTimer for UvTimer { - fn sleep(&mut self, msecs: u64) { - let (_m, scheduler) = self.fire_homing_missile_sched(); - do scheduler.deschedule_running_task_and_then |_sched, task| { - uvdebug!("sleep: entered scheduler context"); - let task_cell = Cell::new(task); - do self.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - self.watcher.stop(); - } - - fn oneshot(&mut self, msecs: u64) -> PortOne<()> { - use std::comm::oneshot; - - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - let _m = self.fire_homing_missile(); - do self.watcher.start(msecs, 0) |_, status| { - assert!(status.is_none()); - assert!(!chan.is_empty()); - chan.take().send_deferred(()); - } - - return port; - } - - fn period(&mut self, msecs: u64) -> Port<()> { - use std::comm::stream; - - let (port, chan) = stream(); - let chan = Cell::new(chan); - let _m = self.fire_homing_missile(); - do self.watcher.start(msecs, msecs) |_, status| { - assert!(status.is_none()); - do chan.with_ref |chan| { - chan.send_deferred(()); - } - } - - return port; - } -} - pub struct UvFileStream { priv loop_: Loop, priv fd: c_int, diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 120a69fb24498..3028546972fe6 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -80,6 +80,11 @@ pub static STDIO_INHERIT_STREAM: c_int = 0x04; pub static STDIO_READABLE_PIPE: c_int = 0x10; pub static STDIO_WRITABLE_PIPE: c_int = 0x20; +#[cfg(unix)] +pub type uv_buf_len_t = libc::size_t; +#[cfg(windows)] +pub type uv_buf_len_t = u32; + // see libuv/include/uv-unix.h #[cfg(unix)] pub struct uv_buf_t { diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 09aa806891ace..2745c6062e6e4 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -135,11 +135,6 @@ rust_uv_get_stream_handle_from_write_req(uv_write_t* write_req) { return write_req->handle; } -extern "C" void -rust_uv_buf_init(uv_buf_t* out_buf, char* base, size_t len) { - *out_buf = uv_buf_init(base, len); -} - extern "C" uv_loop_t* rust_uv_get_loop_for_uv_handle(uv_handle_t* handle) { return handle->loop; From ceab326e82dfba2f3cd513926c023dea1af4b1c2 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 1 Nov 2013 10:26:43 -0700 Subject: [PATCH 05/27] Migrate uv process bindings away from ~fn() --- src/librustuv/lib.rs | 11 +-- src/librustuv/process.rs | 166 +++++++++++++++++++++++++-------------- src/librustuv/timer.rs | 6 +- src/librustuv/uvio.rs | 132 ++----------------------------- 4 files changed, 115 insertions(+), 200 deletions(-) diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 3d0ea4e6d1b89..66abca5924f2f 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -139,8 +139,8 @@ pub trait UvHandle { fn install(~self) -> ~Self { unsafe { - let myptr = cast::transmute::<&~Self, *u8>(&self); - uvll::set_data_for_uv_handle(self.uv_handle(), myptr); + let myptr = cast::transmute::<&~Self, &*u8>(&self); + uvll::set_data_for_uv_handle(self.uv_handle(), *myptr); } self } @@ -188,9 +188,6 @@ pub type NullCallback = ~fn(); pub type IdleCallback = ~fn(IdleWatcher, Option); pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(&mut FsRequest, Option); -// first int is exit_status, second is term_signal -pub type ExitCallback = ~fn(Process, int, int, Option); -pub type TimerCallback = ~fn(TimerWatcher, Option); pub type AsyncCallback = ~fn(AsyncWatcher, Option); pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); pub type UdpSendCallback = ~fn(UdpWatcher, Option); @@ -206,11 +203,9 @@ struct WatcherData { close_cb: Option, alloc_cb: Option, idle_cb: Option, - timer_cb: Option, async_cb: Option, udp_recv_cb: Option, udp_send_cb: Option, - exit_cb: Option, signal_cb: Option, } @@ -242,11 +237,9 @@ impl> WatcherInterop for W { close_cb: None, alloc_cb: None, idle_cb: None, - timer_cb: None, async_cb: None, udp_recv_cb: None, udp_send_cb: None, - exit_cb: None, signal_cb: None, }; let data = transmute::<~WatcherData, *c_void>(data); diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index ce281b656d39f..96b08b3f88b6e 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -9,58 +9,42 @@ // except according to those terms. use std::cell::Cell; +use std::libc::c_int; use std::libc; use std::ptr; -use std::vec; +use std::rt::BlockedTask; +use std::rt::io::IoError; use std::rt::io::process::*; +use std::rt::local::Local; +use std::rt::rtio::RtioProcess; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::vec; -use super::{Watcher, Loop, NativeHandle, UvError}; -use super::{status_to_maybe_uv_error, ExitCallback}; -use uvio::{UvPipeStream, UvUnboundPipe}; +use super::{Loop, NativeHandle, UvHandle, UvError, uv_error_to_io_error}; +use uvio::{HomingIO, UvPipeStream, UvUnboundPipe}; use uvll; -/// A process wraps the handle of the underlying uv_process_t. -pub struct Process(*uvll::uv_process_t); +pub struct Process { + handle: *uvll::uv_process_t, + home: SchedHandle, -impl Watcher for Process {} + /// Task to wake up (may be null) for when the process exits + to_wake: Option, -impl Process { - /// Creates a new process, ready to spawn inside an event loop - pub fn new() -> Process { - let handle = unsafe { uvll::malloc_handle(uvll::UV_PROCESS) }; - assert!(handle.is_not_null()); - let mut ret: Process = NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - return ret; - } + /// Collected from the exit_cb + exit_status: Option, + term_signal: Option, +} +impl Process { /// Spawn a new process inside the specified event loop. /// - /// The `config` variable will be passed down to libuv, and the `exit_cb` - /// will be run only once, when the process exits. - /// /// Returns either the corresponding process object or an error which /// occurred. - pub fn spawn(&mut self, loop_: &Loop, config: ProcessConfig, - exit_cb: ExitCallback) - -> Result<~[Option<~UvPipeStream>], UvError> + pub fn spawn(loop_: &Loop, config: ProcessConfig) + -> Result<(~Process, ~[Option<~UvPipeStream>]), UvError> { let cwd = config.cwd.map(|s| s.to_c_str()); - - extern fn on_exit(p: *uvll::uv_process_t, - exit_status: libc::c_int, - term_signal: libc::c_int) { - let mut p: Process = NativeHandle::from_native_handle(p); - let err = match exit_status { - 0 => None, - _ => status_to_maybe_uv_error(-1) - }; - p.get_watcher_data().exit_cb.take_unwrap()(p, - exit_status as int, - term_signal as int, - err); - } - let io = config.io; let mut stdio = vec::with_capacity::(io.len()); let mut ret_io = vec::with_capacity(io.len()); @@ -73,7 +57,6 @@ impl Process { } } - let exit_cb = Cell::new(exit_cb); let ret_io = Cell::new(ret_io); do with_argv(config.program, config.args) |argv| { do with_env(config.env) |envp| { @@ -93,34 +76,47 @@ impl Process { gid: 0, }; + let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); match unsafe { - uvll::uv_spawn(loop_.native_handle(), **self, options) + uvll::uv_spawn(loop_.native_handle(), handle, options) } { 0 => { - (*self).get_watcher_data().exit_cb = Some(exit_cb.take()); - Ok(ret_io.take()) + let process = ~Process { + handle: handle, + home: get_handle_to_current_scheduler!(), + to_wake: None, + exit_status: None, + term_signal: None, + }; + Ok((process.install(), ret_io.take())) + } + err => { + unsafe { uvll::free_handle(handle) } + Err(UvError(err)) } - err => Err(UvError(err)) } } } } +} - /// Sends a signal to this process. - /// - /// This is a wrapper around `uv_process_kill` - pub fn kill(&self, signum: int) -> Result<(), UvError> { - match unsafe { - uvll::uv_process_kill(self.native_handle(), signum as libc::c_int) - } { - 0 => Ok(()), - err => Err(UvError(err)) +extern fn on_exit(handle: *uvll::uv_process_t, + exit_status: libc::c_int, + term_signal: libc::c_int) { + let handle = handle as *uvll::uv_handle_t; + let p: &mut Process = unsafe { UvHandle::from_uv_handle(&handle) }; + + assert!(p.exit_status.is_none()); + assert!(p.term_signal.is_none()); + p.exit_status = Some(exit_status as int); + p.term_signal = Some(term_signal as int); + + match p.to_wake.take() { + Some(task) => { + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task); } - } - - /// Returns the process id of a spawned process - pub fn pid(&self) -> libc::pid_t { - unsafe { uvll::process_pid(**self) as libc::pid_t } + None => {} } } @@ -192,11 +188,59 @@ fn with_env(env: Option<&[(~str, ~str)]>, f: &fn(**libc::c_char) -> T) -> T { c_envp.as_imm_buf(|buf, _| f(buf)) } -impl NativeHandle<*uvll::uv_process_t> for Process { - fn from_native_handle(handle: *uvll::uv_process_t) -> Process { - Process(handle) +impl HomingIO for Process { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for Process { + fn uv_handle(&self) -> *uvll::uv_process_t { self.handle } +} + +impl RtioProcess for Process { + fn id(&self) -> libc::pid_t { + unsafe { uvll::process_pid(self.handle) as libc::pid_t } + } + + fn kill(&mut self, signal: int) -> Result<(), IoError> { + do self.home_for_io |self_| { + match unsafe { + uvll::process_kill(self_.handle, signal as libc::c_int) + } { + 0 => Ok(()), + err => Err(uv_error_to_io_error(UvError(err))) + } + } + } + + fn wait(&mut self) -> int { + // Make sure (on the home scheduler) that we have an exit status listed + do self.home_for_io |self_| { + match self_.exit_status { + Some(*) => {} + None => { + // If there's no exit code previously listed, then the + // process's exit callback has yet to be invoked. We just + // need to deschedule ourselves and wait to be reawoken. + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + assert!(self_.to_wake.is_none()); + self_.to_wake = Some(task); + } + assert!(self_.exit_status.is_some()); + } + } + } + + // FIXME(#10109): this is wrong + self.exit_status.unwrap() } - fn native_handle(&self) -> *uvll::uv_process_t { - match self { &Process(ptr) => ptr } +} + +impl Drop for Process { + fn drop(&mut self) { + do self.home_for_io |self_| { + assert!(self_.to_wake.is_none()); + self_.close_async_(); + } } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index f89a6c5e5c57f..f4f2563f0b9e8 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -103,9 +103,9 @@ impl RtioTimer for TimerWatcher { extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { let handle = handle as *uvll::uv_handle_t; - let foo: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + let timer : &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; - match foo.action.take_unwrap() { + match timer.action.take_unwrap() { WakeTask(task) => { let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(task); @@ -113,7 +113,7 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { SendOnce(chan) => chan.send(()), SendMany(chan) => { chan.send(()); - foo.action = Some(SendMany(chan)); + timer.action = Some(SendMany(chan)); } } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 5e67e79c020f6..226507ff09a43 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -23,7 +23,6 @@ use std::rt::io::net::ip::{SocketAddr, IpAddr}; use std::rt::io::{standard_error, OtherIoError, SeekStyle, SeekSet, SeekCur, SeekEnd}; use std::rt::io::process::ProcessConfig; -use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; @@ -772,54 +771,12 @@ impl IoFactory for UvIoFactory { fn spawn(&mut self, config: ProcessConfig) -> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError> { - // Sadly, we must create the UvProcess before we actually call uv_spawn - // so that the exit_cb can close over it and notify it when the process - // has exited. - let mut ret = ~UvProcess { - process: Process::new(), - home: None, - exit_status: None, - term_signal: None, - exit_error: None, - descheduled: None, - }; - let ret_ptr = unsafe { - *cast::transmute::<&~UvProcess, &*mut UvProcess>(&ret) - }; - - // The purpose of this exit callback is to record the data about the - // exit and then wake up the task which may be waiting for the process - // to exit. This is all performed in the current io-loop, and the - // implementation of UvProcess ensures that reading these fields always - // occurs on the current io-loop. - let exit_cb: ExitCallback = |_, exit_status, term_signal, error| { - unsafe { - assert!((*ret_ptr).exit_status.is_none()); - (*ret_ptr).exit_status = Some(exit_status); - (*ret_ptr).term_signal = Some(term_signal); - (*ret_ptr).exit_error = error; - match (*ret_ptr).descheduled.take() { - Some(task) => { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task); - } - None => {} - } - } - }; - - match ret.process.spawn(self.uv_loop(), config, exit_cb) { - Ok(io) => { - // Only now do we actually get a handle to this scheduler. - ret.home = Some(get_handle_to_current_scheduler!()); - Ok((ret as ~RtioProcess, - io.move_iter().map(|p| p.map(|p| p as ~RtioPipe)).collect())) - } - Err(uverr) => { - // We still need to close the process handle we created, but - // that's taken care for us in the destructor of UvProcess - Err(uv_error_to_io_error(uverr)) + match Process::spawn(self.uv_loop(), config) { + Ok((p, io)) => { + Ok((p as ~RtioProcess, + io.move_iter().map(|i| i.map(|p| p as ~RtioPipe)).collect())) } + Err(e) => Err(uv_error_to_io_error(e)), } } @@ -1511,85 +1468,6 @@ impl RtioFileStream for UvFileStream { } } -pub struct UvProcess { - priv process: process::Process, - - // Sadly, this structure must be created before we return it, so in that - // brief interim the `home` is None. - priv home: Option, - - // All None until the process exits (exit_error may stay None) - priv exit_status: Option, - priv term_signal: Option, - priv exit_error: Option, - - // Used to store which task to wake up from the exit_cb - priv descheduled: Option, -} - -impl HomingIO for UvProcess { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.home.get_mut_ref() } -} - -impl Drop for UvProcess { - fn drop(&mut self) { - let close = |self_: &mut UvProcess| { - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - do self_.process.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - }; - - // If home is none, then this process never actually successfully - // spawned, so there's no need to switch event loops - if self.home.is_none() { - close(self) - } else { - let _m = self.fire_homing_missile(); - close(self) - } - } -} - -impl RtioProcess for UvProcess { - fn id(&self) -> pid_t { - self.process.pid() - } - - fn kill(&mut self, signal: int) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - match self.process.kill(signal) { - Ok(()) => Ok(()), - Err(uverr) => Err(uv_error_to_io_error(uverr)) - } - } - - fn wait(&mut self) -> int { - // Make sure (on the home scheduler) that we have an exit status listed - let _m = self.fire_homing_missile(); - match self.exit_status { - Some(*) => {} - None => { - // If there's no exit code previously listed, then the - // process's exit callback has yet to be invoked. We just - // need to deschedule ourselves and wait to be reawoken. - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - assert!(self.descheduled.is_none()); - self.descheduled = Some(task); - } - assert!(self.exit_status.is_some()); - } - } - - self.exit_status.unwrap() - } -} - pub struct UvUnixListener { priv inner: UvUnboundPipe } From 9286d5113d843e65fb13ff0cf142c1bfb10124f7 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 1 Nov 2013 11:13:22 -0700 Subject: [PATCH 06/27] Migrate uv signal handling away from ~fn() --- src/librustuv/lib.rs | 2 - src/librustuv/process.rs | 1 - src/librustuv/signal.rs | 95 +++++++++++++++++++++------------------- src/librustuv/timer.rs | 3 +- src/librustuv/uvio.rs | 42 ++---------------- 5 files changed, 56 insertions(+), 87 deletions(-) diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 66abca5924f2f..487c007658083 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -191,7 +191,6 @@ pub type FsCallback = ~fn(&mut FsRequest, Option); pub type AsyncCallback = ~fn(AsyncWatcher, Option); pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); pub type UdpSendCallback = ~fn(UdpWatcher, Option); -pub type SignalCallback = ~fn(SignalWatcher, Signum); /// Callbacks used by StreamWatchers, set as custom data on the foreign handle. @@ -206,7 +205,6 @@ struct WatcherData { async_cb: Option, udp_recv_cb: Option, udp_send_cb: Option, - signal_cb: Option, } pub trait WatcherInterop { diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 96b08b3f88b6e..fd35f9e494e87 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -103,7 +103,6 @@ impl Process { extern fn on_exit(handle: *uvll::uv_process_t, exit_status: libc::c_int, term_signal: libc::c_int) { - let handle = handle as *uvll::uv_handle_t; let p: &mut Process = unsafe { UvHandle::from_uv_handle(&handle) }; assert!(p.exit_status.is_none()); diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index d5774b5aaab35..c195f48022735 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -8,65 +8,72 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cast; use std::libc::c_int; use std::rt::io::signal::Signum; +use std::rt::sched::{SchedHandle, Scheduler}; +use std::comm::{SharedChan, SendDeferred}; +use std::rt::local::Local; +use std::rt::rtio::RtioSignal; -use super::{Loop, NativeHandle, SignalCallback, UvError, Watcher}; +use super::{Loop, UvError, UvHandle}; use uvll; +use uvio::HomingIO; -pub struct SignalWatcher(*uvll::uv_signal_t); +pub struct SignalWatcher { + handle: *uvll::uv_signal_t, + home: SchedHandle, -impl Watcher for SignalWatcher { } + channel: SharedChan, + signal: Signum, +} impl SignalWatcher { - pub fn new(loop_: &mut Loop) -> SignalWatcher { - unsafe { - let handle = uvll::malloc_handle(uvll::UV_SIGNAL); - assert!(handle.is_not_null()); - assert!(0 == uvll::uv_signal_init(loop_.native_handle(), handle)); - let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } - } + pub fn new(loop_: &mut Loop, signum: Signum, + channel: SharedChan) -> Result<~SignalWatcher, UvError> { + let handle = UvHandle::alloc(None::, uvll::UV_SIGNAL); + assert_eq!(unsafe { + uvll::signal_init(loop_.native_handle(), handle) + }, 0); - pub fn start(&mut self, signum: Signum, callback: SignalCallback) - -> Result<(), UvError> - { - return unsafe { - match uvll::uv_signal_start(self.native_handle(), signal_cb, - signum as c_int) { - 0 => { - let data = self.get_watcher_data(); - data.signal_cb = Some(callback); - Ok(()) - } - n => Err(UvError(n)), + match unsafe { uvll::signal_start(handle, signal_cb, signum as c_int) } { + 0 => { + let s = ~SignalWatcher { + handle: handle, + home: get_handle_to_current_scheduler!(), + channel: channel, + signal: signum, + }; + Ok(s.install()) + } + n => { + unsafe { uvll::free_handle(handle) } + Err(UvError(n)) } - }; - - extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { - let mut watcher: SignalWatcher = NativeHandle::from_native_handle(handle); - let data = watcher.get_watcher_data(); - let cb = data.signal_cb.get_ref(); - (*cb)(watcher, unsafe { cast::transmute(signum as int) }); } - } - pub fn stop(&mut self) { - unsafe { - uvll::uv_signal_stop(self.native_handle()); - } } } -impl NativeHandle<*uvll::uv_signal_t> for SignalWatcher { - fn from_native_handle(handle: *uvll::uv_signal_t) -> SignalWatcher { - SignalWatcher(handle) - } +extern fn signal_cb(handle: *uvll::uv_signal_t, signum: c_int) { + let s: &mut SignalWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + assert_eq!(signum as int, s.signal as int); + s.channel.send_deferred(s.signal); +} + +impl HomingIO for SignalWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} - fn native_handle(&self) -> *uvll::uv_signal_t { - match self { &SignalWatcher(ptr) => ptr } +impl UvHandle for SignalWatcher { + fn uv_handle(&self) -> *uvll::uv_signal_t { self.handle } +} + +impl RtioSignal for SignalWatcher {} + +impl Drop for SignalWatcher { + fn drop(&mut self) { + do self.home_for_io |self_| { + self_.close_async_(); + } } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index f4f2563f0b9e8..956699c5c2e89 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -102,8 +102,7 @@ impl RtioTimer for TimerWatcher { } extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { - let handle = handle as *uvll::uv_handle_t; - let timer : &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + let timer: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; match timer.action.take_unwrap() { WakeTask(task) => { diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 226507ff09a43..dc8793c285b80 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -13,8 +13,8 @@ use std::cast::transmute; use std::cast; use std::cell::Cell; use std::clone::Clone; -use std::comm::{SendDeferred, SharedChan, GenericChan}; -use std::libc::{c_int, c_uint, c_void, pid_t}; +use std::comm::{SharedChan, GenericChan}; +use std::libc::{c_int, c_uint, c_void}; use std::ptr; use std::str; use std::rt::io; @@ -841,11 +841,8 @@ impl IoFactory for UvIoFactory { fn signal(&mut self, signum: Signum, channel: SharedChan) -> Result<~RtioSignal, IoError> { - let watcher = SignalWatcher::new(self.uv_loop()); - let home = get_handle_to_current_scheduler!(); - let mut signal = ~UvSignal::new(watcher, home); - match signal.watcher.start(signum, |_, _| channel.send_deferred(signum)) { - Ok(()) => Ok(signal as ~RtioSignal), + match SignalWatcher::new(self.uv_loop(), signum, channel) { + Ok(s) => Ok(s as ~RtioSignal), Err(e) => Err(uv_error_to_io_error(e)), } } @@ -1591,37 +1588,6 @@ impl RtioUnixAcceptor for UvUnixAcceptor { } } -pub struct UvSignal { - watcher: signal::SignalWatcher, - home: SchedHandle, -} - -impl HomingIO for UvSignal { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvSignal { - fn new(w: signal::SignalWatcher, home: SchedHandle) -> UvSignal { - UvSignal { watcher: w, home: home } - } -} - -impl RtioSignal for UvSignal {} - -impl Drop for UvSignal { - fn drop(&mut self) { - let (_m, scheduler) = self.fire_homing_missile_sched(); - uvdebug!("closing UvSignal"); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } -} - // this function is full of lies unsafe fn local_io() -> &'static mut IoFactory { do Local::borrow |sched: &mut Scheduler| { From 28219fc679e6c2f747ad3e49eb746a383797ef9b Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 12:45:05 -0800 Subject: [PATCH 07/27] Remove usage of ~fn() from uv async/idle --- src/librustuv/async.rs | 162 +++++++++++++++++++++++++++++------- src/librustuv/idle.rs | 114 ++++++++++++++++---------- src/librustuv/lib.rs | 39 +++++++-- src/librustuv/uvio.rs | 181 +++-------------------------------------- src/libstd/rt/basic.rs | 25 +++--- src/libstd/rt/rtio.rs | 10 ++- src/libstd/rt/sched.rs | 14 +++- 7 files changed, 279 insertions(+), 266 deletions(-) diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 79e57db1bf591..27f6439a12d47 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -8,51 +8,155 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cast; use std::libc::c_int; +use std::rt::rtio::{Callback, RemoteCallback}; +use std::unstable::sync::Exclusive; use uvll; -use super::{Watcher, Loop, NativeHandle, AsyncCallback, WatcherInterop}; -use super::status_to_maybe_uv_error; +use super::{Loop, UvHandle}; -pub struct AsyncWatcher(*uvll::uv_async_t); -impl Watcher for AsyncWatcher { } +// The entire point of async is to call into a loop from other threads so it +// does not need to home. +pub struct AsyncWatcher { + handle: *uvll::uv_async_t, + + // A flag to tell the callback to exit, set from the dtor. This is + // almost never contested - only in rare races with the dtor. + exit_flag: Exclusive +} + +struct Payload { + callback: ~Callback, + exit_flag: Exclusive, +} impl AsyncWatcher { - pub fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { + pub fn new(loop_: &mut Loop, cb: ~Callback) -> AsyncWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); + assert_eq!(unsafe { + uvll::async_init(loop_.native_handle(), handle, async_cb) + }, 0); + let flag = Exclusive::new(false); + let payload = ~Payload { callback: cb, exit_flag: flag.clone() }; unsafe { - let handle = uvll::malloc_handle(uvll::UV_ASYNC); - assert!(handle.is_not_null()); - let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - let data = watcher.get_watcher_data(); - data.async_cb = Some(cb); - assert_eq!(0, uvll::uv_async_init(loop_.native_handle(), handle, async_cb)); - return watcher; + let payload: *u8 = cast::transmute(payload); + uvll::set_data_for_uv_handle(handle, payload); } + return AsyncWatcher { handle: handle, exit_flag: flag, }; + } +} - extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { - let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); - let status = status_to_maybe_uv_error(status); - let data = watcher.get_watcher_data(); - let cb = data.async_cb.get_ref(); - (*cb)(watcher, status); - } +impl UvHandle for AsyncWatcher { + fn uv_handle(&self) -> *uvll::uv_async_t { self.handle } + unsafe fn from_uv_handle<'a>(h: &'a *T) -> &'a mut AsyncWatcher { + fail!("async watchers can't be built from their handles"); } +} + +extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { + assert!(status == 0); + let payload: &mut Payload = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + + // The synchronization logic here is subtle. To review, + // the uv async handle type promises that, after it is + // triggered the remote callback is definitely called at + // least once. UvRemoteCallback needs to maintain those + // semantics while also shutting down cleanly from the + // dtor. In our case that means that, when the + // UvRemoteCallback dtor calls `async.send()`, here `f` is + // always called later. + + // In the dtor both the exit flag is set and the async + // callback fired under a lock. Here, before calling `f`, + // we take the lock and check the flag. Because we are + // checking the flag before calling `f`, and the flag is + // set under the same lock as the send, then if the flag + // is set then we're guaranteed to call `f` after the + // final send. + + // If the check was done after `f()` then there would be a + // period between that call and the check where the dtor + // could be called in the other thread, missing the final + // callback while still destroying the handle. + + let should_exit = unsafe { + payload.exit_flag.with_imm(|&should_exit| should_exit) + }; + + payload.callback.call(); + + if should_exit { + unsafe { uvll::close(handle, close_cb) } + } +} - pub fn send(&mut self) { +extern fn close_cb(handle: *uvll::uv_handle_t) { + // drop the payload + let _payload: ~Payload = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + // and then free the handle + unsafe { uvll::free_handle(handle) } +} + +impl RemoteCallback for AsyncWatcher { + fn fire(&mut self) { + unsafe { uvll::async_send(self.handle) } + } +} + +impl Drop for AsyncWatcher { + fn drop(&mut self) { unsafe { - let handle = self.native_handle(); - uvll::uv_async_send(handle); + do self.exit_flag.with |should_exit| { + // NB: These two things need to happen atomically. Otherwise + // the event handler could wake up due to a *previous* + // signal and see the exit flag, destroying the handle + // before the final send. + *should_exit = true; + uvll::async_send(self.handle) + } } } } -impl NativeHandle<*uvll::uv_async_t> for AsyncWatcher { - fn from_native_handle(handle: *uvll::uv_async_t) -> AsyncWatcher { - AsyncWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_async_t { - match self { &AsyncWatcher(ptr) => ptr } +#[cfg(test)] +mod test_remote { + use std::cell::Cell; + use std::rt::test::*; + use std::rt::thread::Thread; + use std::rt::tube::Tube; + use std::rt::rtio::EventLoop; + use std::rt::local::Local; + use std::rt::sched::Scheduler; + + #[test] + fn test_uv_remote() { + do run_in_mt_newsched_task { + let mut tube = Tube::new(); + let tube_clone = tube.clone(); + let remote_cell = Cell::new_empty(); + do Local::borrow |sched: &mut Scheduler| { + let tube_clone = tube_clone.clone(); + let tube_clone_cell = Cell::new(tube_clone); + let remote = do sched.event_loop.remote_callback { + // This could be called multiple times + if !tube_clone_cell.is_empty() { + tube_clone_cell.take().send(1); + } + }; + remote_cell.put_back(remote); + } + let thread = do Thread::start { + remote_cell.take().fire(); + }; + + assert!(tube.recv() == 1); + thread.join(); + } } } diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 7c9b0ff461ccd..da3ddacef6bbb 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -8,70 +8,98 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc::c_int; +use std::cast; +use std::libc::{c_int, c_void}; use uvll; -use super::{Watcher, Loop, NativeHandle, IdleCallback, status_to_maybe_uv_error}; - -pub struct IdleWatcher(*uvll::uv_idle_t); -impl Watcher for IdleWatcher { } +use super::{Loop, UvHandle}; +use std::rt::rtio::{Callback, PausibleIdleCallback}; + +pub struct IdleWatcher { + handle: *uvll::uv_idle_t, + idle_flag: bool, + closed: bool, + callback: Option<~Callback>, +} impl IdleWatcher { - pub fn new(loop_: &mut Loop) -> IdleWatcher { + pub fn new(loop_: &mut Loop) -> ~IdleWatcher { + let handle = UvHandle::alloc(None::, uvll::UV_IDLE); + assert_eq!(unsafe { + uvll::idle_init(loop_.native_handle(), handle) + }, 0); + let me = ~IdleWatcher { + handle: handle, + idle_flag: false, + closed: false, + callback: None, + }; + return me.install(); + } + + pub fn onetime(loop_: &mut Loop, f: proc()) { + let handle = UvHandle::alloc(None::, uvll::UV_IDLE); unsafe { - let handle = uvll::malloc_handle(uvll::UV_IDLE); - assert!(handle.is_not_null()); - assert_eq!(uvll::uv_idle_init(loop_.native_handle(), handle), 0); - let mut watcher: IdleWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher + assert_eq!(uvll::idle_init(loop_.native_handle(), handle), 0); + let data: *c_void = cast::transmute(~f); + uvll::set_data_for_uv_handle(handle, data); + assert_eq!(uvll::idle_start(handle, onetime_cb), 0) } - } - pub fn start(&mut self, cb: IdleCallback) { - { - let data = self.get_watcher_data(); - data.idle_cb = Some(cb); + extern fn onetime_cb(handle: *uvll::uv_idle_t, status: c_int) { + assert_eq!(status, 0); + unsafe { + let data = uvll::get_data_for_uv_handle(handle); + let f: ~proc() = cast::transmute(data); + (*f)(); + uvll::idle_stop(handle); + uvll::close(handle, close_cb); + } } - unsafe { - assert_eq!(uvll::uv_idle_start(self.native_handle(), idle_cb), 0) + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } } } +} - pub fn restart(&mut self) { - unsafe { - assert!(self.get_watcher_data().idle_cb.is_some()); - assert_eq!(uvll::uv_idle_start(self.native_handle(), idle_cb), 0) +impl PausibleIdleCallback for IdleWatcher { + fn start(&mut self, cb: ~Callback) { + assert!(self.callback.is_none()); + self.callback = Some(cb); + assert_eq!(unsafe { uvll::idle_start(self.handle, idle_cb) }, 0) + self.idle_flag = true; + } + fn pause(&mut self) { + if self.idle_flag == true { + assert_eq!(unsafe {uvll::idle_stop(self.handle) }, 0); + self.idle_flag = false; } } - - pub fn stop(&mut self) { - // NB: Not resetting the Rust idle_cb to None here because `stop` is - // likely called from *within* the idle callback, causing a use after - // free - - unsafe { - assert_eq!(uvll::uv_idle_stop(self.native_handle()), 0); + fn resume(&mut self) { + if self.idle_flag == false { + assert_eq!(unsafe { uvll::idle_start(self.handle, idle_cb) }, 0) + self.idle_flag = true; + } + } + fn close(&mut self) { + self.pause(); + if !self.closed { + self.closed = true; + self.close_async_(); } } } -impl NativeHandle<*uvll::uv_idle_t> for IdleWatcher { - fn from_native_handle(handle: *uvll::uv_idle_t) -> IdleWatcher { - IdleWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_idle_t { - match self { &IdleWatcher(ptr) => ptr } - } +impl UvHandle for IdleWatcher { + fn uv_handle(&self) -> *uvll::uv_idle_t { self.handle } } extern fn idle_cb(handle: *uvll::uv_idle_t, status: c_int) { - let mut idle_watcher: IdleWatcher = NativeHandle::from_native_handle(handle); - let data = idle_watcher.get_watcher_data(); - let cb: &IdleCallback = data.idle_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(idle_watcher, status); + assert_eq!(status, 0); + let idle: &mut IdleWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; + assert!(idle.callback.is_some()); + idle.callback.get_mut_ref().call(); } #[cfg(test)] diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 487c007658083..f3d9bb5443bb8 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -55,7 +55,6 @@ use std::cast::transmute; use std::ptr::null; use std::unstable::finally::Finally; use std::rt::io::net::ip::SocketAddr; -use std::rt::io::signal::Signum; use std::rt::io::IoError; @@ -152,7 +151,39 @@ pub trait UvHandle { unsafe { uvll::free_handle(handle) } } - unsafe { uvll::close(self.uv_handle(), close_cb) } + unsafe { + uvll::set_data_for_uv_handle(self.uv_handle(), null::<()>()); + uvll::close(self.uv_handle(), close_cb) + } + } +} + +pub trait UvRequest { + fn uv_request(&self) -> *T; + + // FIXME(#8888) dummy self + fn alloc(_: Option, ty: uvll::uv_req_type) -> *T { + unsafe { + let handle = uvll::malloc_req(ty); + assert!(!handle.is_null()); + handle as *T + } + } + + unsafe fn from_uv_request<'a>(h: &'a *T) -> &'a mut Self { + cast::transmute(uvll::get_data_for_req(*h)) + } + + fn install(~self) -> ~Self { + unsafe { + let myptr = cast::transmute::<&~Self, &*u8>(&self); + uvll::set_data_for_req(self.uv_request(), *myptr); + } + self + } + + fn delete(&mut self) { + unsafe { uvll::free_req(self.uv_request() as *c_void) } } } @@ -185,7 +216,6 @@ impl NativeHandle<*uvll::uv_loop_t> for Loop { pub type AllocCallback = ~fn(uint) -> Buf; pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); pub type NullCallback = ~fn(); -pub type IdleCallback = ~fn(IdleWatcher, Option); pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(&mut FsRequest, Option); pub type AsyncCallback = ~fn(AsyncWatcher, Option); @@ -201,7 +231,6 @@ struct WatcherData { connect_cb: Option, close_cb: Option, alloc_cb: Option, - idle_cb: Option, async_cb: Option, udp_recv_cb: Option, udp_send_cb: Option, @@ -234,11 +263,9 @@ impl> WatcherInterop for W { connect_cb: None, close_cb: None, alloc_cb: None, - idle_cb: None, async_cb: None, udp_recv_cb: None, udp_send_cb: None, - signal_cb: None, }; let data = transmute::<~WatcherData, *c_void>(data); uvll::set_data_for_uv_handle(self.native_handle(), data); diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index dc8793c285b80..f584fa6a1485f 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -28,9 +28,12 @@ use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use std::rt::task::Task; -use std::unstable::sync::Exclusive; -use std::libc::{lseek, off_t}; -use std::rt::io::{FileMode, FileAccess, FileStat}; +use std::path::{GenericPath, Path}; +use std::libc::{lseek, off_t, O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, + O_WRONLY, S_IRUSR, S_IWUSR, S_IRWXU}; +use std::rt::io::{FileMode, FileAccess, OpenOrCreate, Open, Create, + CreateOrTruncate, Append, Truncate, Read, Write, ReadWrite, + FileStat}; use std::rt::io::signal::Signum; use std::task; use ai = std::rt::io::net::addrinfo; @@ -199,27 +202,16 @@ impl EventLoop for UvEventLoop { self.uvio.uv_loop().run(); } - fn callback(&mut self, f: ~fn()) { - let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - do idle_watcher.start |mut idle_watcher, status| { - assert!(status.is_none()); - idle_watcher.stop(); - idle_watcher.close(||()); - f(); - } + fn callback(&mut self, f: proc()) { + IdleWatcher::onetime(self.uvio.uv_loop(), f); } fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { - let idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - ~UvPausibleIdleCallback { - watcher: idle_watcher, - idle_flag: false, - closed: false - } as ~PausibleIdleCallback + IdleWatcher::new(self.uvio.uv_loop()) as ~PausibleIdleCallback } - fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback { - ~UvRemoteCallback::new(self.uvio.uv_loop(), f) as ~RemoteCallback + fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { + ~AsyncWatcher::new(self.uvio.uv_loop(), f) as ~RemoteCallback } fn io<'a>(&'a mut self, f: &fn(&'a mut IoFactory)) { @@ -233,44 +225,6 @@ pub extern "C" fn new_loop() -> ~EventLoop { ~UvEventLoop::new() as ~EventLoop } -pub struct UvPausibleIdleCallback { - priv watcher: IdleWatcher, - priv idle_flag: bool, - priv closed: bool -} - -impl PausibleIdleCallback for UvPausibleIdleCallback { - #[inline] - fn start(&mut self, f: ~fn()) { - do self.watcher.start |_idle_watcher, _status| { - f(); - }; - self.idle_flag = true; - } - #[inline] - fn pause(&mut self) { - if self.idle_flag == true { - self.watcher.stop(); - self.idle_flag = false; - } - } - #[inline] - fn resume(&mut self) { - if self.idle_flag == false { - self.watcher.restart(); - self.idle_flag = true; - } - } - #[inline] - fn close(&mut self) { - self.pause(); - if !self.closed { - self.closed = true; - self.watcher.close(||{}); - } - } -} - #[test] fn test_callback_run_once() { do run_in_bare_thread { @@ -285,119 +239,6 @@ fn test_callback_run_once() { } } -// The entire point of async is to call into a loop from other threads so it does not need to home. -pub struct UvRemoteCallback { - // The uv async handle for triggering the callback - priv async: AsyncWatcher, - // A flag to tell the callback to exit, set from the dtor. This is - // almost never contested - only in rare races with the dtor. - priv exit_flag: Exclusive -} - -impl UvRemoteCallback { - pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback { - let exit_flag = Exclusive::new(false); - let exit_flag_clone = exit_flag.clone(); - let async = do AsyncWatcher::new(loop_) |watcher, status| { - assert!(status.is_none()); - - // The synchronization logic here is subtle. To review, - // the uv async handle type promises that, after it is - // triggered the remote callback is definitely called at - // least once. UvRemoteCallback needs to maintain those - // semantics while also shutting down cleanly from the - // dtor. In our case that means that, when the - // UvRemoteCallback dtor calls `async.send()`, here `f` is - // always called later. - - // In the dtor both the exit flag is set and the async - // callback fired under a lock. Here, before calling `f`, - // we take the lock and check the flag. Because we are - // checking the flag before calling `f`, and the flag is - // set under the same lock as the send, then if the flag - // is set then we're guaranteed to call `f` after the - // final send. - - // If the check was done after `f()` then there would be a - // period between that call and the check where the dtor - // could be called in the other thread, missing the final - // callback while still destroying the handle. - - let should_exit = unsafe { - exit_flag_clone.with_imm(|&should_exit| should_exit) - }; - - f(); - - if should_exit { - watcher.close(||()); - } - - }; - UvRemoteCallback { - async: async, - exit_flag: exit_flag - } - } -} - -impl RemoteCallback for UvRemoteCallback { - fn fire(&mut self) { self.async.send() } -} - -impl Drop for UvRemoteCallback { - fn drop(&mut self) { - unsafe { - let this: &mut UvRemoteCallback = cast::transmute_mut(self); - do this.exit_flag.with |should_exit| { - // NB: These two things need to happen atomically. Otherwise - // the event handler could wake up due to a *previous* - // signal and see the exit flag, destroying the handle - // before the final send. - *should_exit = true; - this.async.send(); - } - } - } -} - -#[cfg(test)] -mod test_remote { - use std::cell::Cell; - use std::rt::test::*; - use std::rt::thread::Thread; - use std::rt::tube::Tube; - use std::rt::rtio::EventLoop; - use std::rt::local::Local; - use std::rt::sched::Scheduler; - - #[test] - fn test_uv_remote() { - do run_in_mt_newsched_task { - let mut tube = Tube::new(); - let tube_clone = tube.clone(); - let remote_cell = Cell::new_empty(); - do Local::borrow |sched: &mut Scheduler| { - let tube_clone = tube_clone.clone(); - let tube_clone_cell = Cell::new(tube_clone); - let remote = do sched.event_loop.remote_callback { - // This could be called multiple times - if !tube_clone_cell.is_empty() { - tube_clone_cell.take().send(1); - } - }; - remote_cell.put_back(remote); - } - let thread = do Thread::start { - remote_cell.take().fire(); - }; - - assert!(tube.recv() == 1); - thread.join(); - } - } -} - pub struct UvIoFactory(Loop); impl UvIoFactory { diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs index 86d3f8a52bace..0c8d192d89ac1 100644 --- a/src/libstd/rt/basic.rs +++ b/src/libstd/rt/basic.rs @@ -15,7 +15,8 @@ use prelude::*; use cast; -use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback}; +use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback, + Callback}; use unstable::sync::Exclusive; use util; @@ -25,9 +26,9 @@ pub fn event_loop() -> ~EventLoop { } struct BasicLoop { - work: ~[~fn()], // pending work - idle: Option<*BasicPausible>, // only one is allowed - remotes: ~[(uint, ~fn())], + work: ~[proc()], // pending work + idle: Option<*mut BasicPausible>, // only one is allowed + remotes: ~[(uint, ~Callback)], next_remote: uint, messages: Exclusive<~[Message]> } @@ -86,8 +87,8 @@ impl BasicLoop { fn message(&mut self, message: Message) { match message { RunRemote(i) => { - match self.remotes.iter().find(|& &(id, _)| id == i) { - Some(&(_, ref f)) => (*f)(), + match self.remotes.mut_iter().find(|& &(id, _)| id == i) { + Some(&(_, ref mut f)) => f.call(), None => unreachable!() } } @@ -106,7 +107,7 @@ impl BasicLoop { match self.idle { Some(idle) => { if (*idle).active { - (*(*idle).work.get_ref())(); + (*idle).work.get_mut_ref().call(); } } None => {} @@ -144,7 +145,7 @@ impl EventLoop for BasicLoop { } } - fn callback(&mut self, f: ~fn()) { + fn callback(&mut self, f: proc()) { self.work.push(f); } @@ -153,13 +154,13 @@ impl EventLoop for BasicLoop { let callback = ~BasicPausible::new(self); rtassert!(self.idle.is_none()); unsafe { - let cb_ptr: &*BasicPausible = cast::transmute(&callback); + let cb_ptr: &*mut BasicPausible = cast::transmute(&callback); self.idle = Some(*cb_ptr); } return callback as ~PausibleIdleCallback; } - fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallback { + fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { let id = self.next_remote; self.next_remote += 1; self.remotes.push((id, f)); @@ -203,7 +204,7 @@ impl Drop for BasicRemote { struct BasicPausible { eloop: *mut BasicLoop, - work: Option<~fn()>, + work: Option<~Callback>, active: bool, } @@ -218,7 +219,7 @@ impl BasicPausible { } impl PausibleIdleCallback for BasicPausible { - fn start(&mut self, f: ~fn()) { + fn start(&mut self, f: ~Callback) { rtassert!(!self.active && self.work.is_none()); self.active = true; self.work = Some(f); diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index d24de7cbfee51..8684537f4e4b5 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -24,11 +24,15 @@ use path::Path; use super::io::{SeekStyle}; use super::io::{FileMode, FileAccess, FileStat, FilePermission}; +pub trait Callback { + fn call(&mut self); +} + pub trait EventLoop { fn run(&mut self); - fn callback(&mut self, ~fn()); + fn callback(&mut self, proc()); fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback; - fn remote_callback(&mut self, ~fn()) -> ~RemoteCallback; + fn remote_callback(&mut self, ~Callback) -> ~RemoteCallback; /// The asynchronous I/O services. Not all event loops may provide one // FIXME(#9382) this is an awful interface @@ -222,7 +226,7 @@ pub trait RtioTTY { } pub trait PausibleIdleCallback { - fn start(&mut self, f: ~fn()); + fn start(&mut self, f: ~Callback); fn pause(&mut self); fn resume(&mut self); fn close(&mut self); diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index e71cd92589c33..fb4bd573a73e9 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -23,7 +23,7 @@ use super::message_queue::MessageQueue; use rt::kill::BlockedTask; use rt::local_ptr; use rt::local::Local; -use rt::rtio::{RemoteCallback, PausibleIdleCallback}; +use rt::rtio::{RemoteCallback, PausibleIdleCallback, Callback}; use borrow::{to_uint}; use cell::Cell; use rand::{XorShiftRng, Rng, Rand}; @@ -184,7 +184,7 @@ impl Scheduler { // Before starting our first task, make sure the idle callback // is active. As we do not start in the sleep state this is // important. - self.idle_callback.get_mut_ref().start(Scheduler::run_sched_once); + self.idle_callback.get_mut_ref().start(~SchedRunner as ~Callback); // Now, as far as all the scheduler state is concerned, we are // inside the "scheduler" context. So we can act like the @@ -767,7 +767,7 @@ impl Scheduler { } pub fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(Scheduler::run_sched_once); + let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback); return SchedHandle { remote: remote, @@ -802,6 +802,14 @@ impl SchedHandle { } } +struct SchedRunner; + +impl Callback for SchedRunner { + fn call(&mut self) { + Scheduler::run_sched_once(); + } +} + struct CleanupJob { task: ~Task, f: UnsafeTaskReceiver From 18ce014e9d99a66690e9db02e799f57ab969f36d Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 12:45:39 -0800 Subject: [PATCH 08/27] Remove usage of ~fn from the scheduler --- src/libstd/rt/sched.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index fb4bd573a73e9..f84e10fe98949 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -454,8 +454,7 @@ impl Scheduler { // * Task Routing Functions - Make sure tasks send up in the right // place. - fn process_task(mut ~self, mut task: ~Task, - schedule_fn: SchedulingFn) { + fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) { rtdebug!("processing a task"); let home = task.take_unwrap_home(); @@ -779,7 +778,7 @@ impl Scheduler { // Supporting types -type SchedulingFn = ~fn(~Scheduler, ~Task); +type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task); pub enum SchedMessage { Wake, From 6690bcb10178b08c56ad56b111e4dcb7e0e06b36 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 14:03:32 -0800 Subject: [PATCH 09/27] Fixing rebase conflicts and such This cleans up the merging of removing ~fn() and removing C++ wrappers to a compile-able and progress-ready state --- src/librustuv/async.rs | 10 ++--- src/librustuv/file.rs | 81 ++++++++-------------------------------- src/librustuv/idle.rs | 16 ++++---- src/librustuv/lib.rs | 4 +- src/librustuv/process.rs | 45 +++++++++++----------- src/librustuv/signal.rs | 9 ++--- src/librustuv/timer.rs | 40 +++++++++----------- src/librustuv/uvio.rs | 9 +++-- src/librustuv/uvll.rs | 21 +++++++---- 9 files changed, 91 insertions(+), 144 deletions(-) diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 27f6439a12d47..0b93e8fa49fcb 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -35,7 +35,7 @@ impl AsyncWatcher { pub fn new(loop_: &mut Loop, cb: ~Callback) -> AsyncWatcher { let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); assert_eq!(unsafe { - uvll::async_init(loop_.native_handle(), handle, async_cb) + uvll::uv_async_init(loop_.native_handle(), handle, async_cb) }, 0); let flag = Exclusive::new(false); let payload = ~Payload { callback: cb, exit_flag: flag.clone() }; @@ -49,7 +49,7 @@ impl AsyncWatcher { impl UvHandle for AsyncWatcher { fn uv_handle(&self) -> *uvll::uv_async_t { self.handle } - unsafe fn from_uv_handle<'a>(h: &'a *T) -> &'a mut AsyncWatcher { + unsafe fn from_uv_handle<'a>(_: &'a *uvll::uv_async_t) -> &'a mut AsyncWatcher { fail!("async watchers can't be built from their handles"); } } @@ -89,7 +89,7 @@ extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { payload.callback.call(); if should_exit { - unsafe { uvll::close(handle, close_cb) } + unsafe { uvll::uv_close(handle, close_cb) } } } @@ -104,7 +104,7 @@ extern fn close_cb(handle: *uvll::uv_handle_t) { impl RemoteCallback for AsyncWatcher { fn fire(&mut self) { - unsafe { uvll::async_send(self.handle) } + unsafe { uvll::uv_async_send(self.handle) } } } @@ -117,7 +117,7 @@ impl Drop for AsyncWatcher { // signal and see the exit flag, destroying the handle // before the final send. *should_exit = true; - uvll::async_send(self.handle) + uvll::uv_async_send(self.handle) } } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index e3fe6c95bafe7..0ff4543a116b0 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -13,7 +13,7 @@ use std::c_str; use std::c_str::CString; use std::cast::transmute; use std::libc; -use std::libc::{c_int, c_char, c_void}; +use std::libc::{c_int, c_char, c_void, c_uint}; use super::{Request, NativeHandle, Loop, FsCallback, Buf, status_to_maybe_uv_error, UvError}; @@ -147,25 +147,12 @@ impl FsRequest { self.sync_cleanup(result) } -<<<<<<< HEAD pub fn close(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(unsafe { - uvll::fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) - }, 0); -======= - pub fn close(self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = unsafe { uvll::uv_fs_close(loop_.native_handle(), self.native_handle(), fd, complete_cb_ptr) - }; - assert_eq!(ret, 0); ->>>>>>> 1850d26... Remove lots of uv/C++ wrappers + }, 0); } pub fn close_sync(mut self, loop_: &Loop, fd: c_int) -> Result { @@ -177,21 +164,20 @@ impl FsRequest { self.sync_cleanup(result) } -<<<<<<< HEAD pub fn mkdir(mut self, loop_: &Loop, path: &CString, mode: c_int, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_mkdir(loop_.native_handle(), - self.native_handle(), p, mode, complete_cb_ptr) + uvll::uv_fs_mkdir(loop_.native_handle(), + self.native_handle(), p, mode, complete_cb_ptr) }), 0); } pub fn rmdir(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_rmdir(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) + uvll::uv_fs_rmdir(loop_.native_handle(), + self.native_handle(), p, complete_cb_ptr) }), 0); } @@ -199,11 +185,11 @@ impl FsRequest { cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(unsafe { - uvll::fs_rename(loop_.native_handle(), - self.native_handle(), - path.with_ref(|p| p), - to.with_ref(|p| p), - complete_cb_ptr) + uvll::uv_fs_rename(loop_.native_handle(), + self.native_handle(), + path.with_ref(|p| p), + to.with_ref(|p| p), + complete_cb_ptr) }, 0); } @@ -211,43 +197,17 @@ impl FsRequest { cb: FsCallback) { let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_chmod(loop_.native_handle(), self.native_handle(), p, mode, - complete_cb_ptr) + uvll::uv_fs_chmod(loop_.native_handle(), self.native_handle(), p, + mode, complete_cb_ptr) }), 0); -======= - pub fn mkdir(self, loop_: &Loop, path: &CString, mode: int, cb: FsCallback) { - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_mkdir(loop_.native_handle(), - self.native_handle(), p, - mode as c_int, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn rmdir(self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_rmdir(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); ->>>>>>> 1850d26... Remove lots of uv/C++ wrappers } pub fn readdir(mut self, loop_: &Loop, path: &CString, flags: c_int, cb: FsCallback) { -<<<<<<< HEAD let complete_cb_ptr = self.req_boilerplate(Some(cb)); assert_eq!(path.with_ref(|p| unsafe { - uvll::fs_readdir(loop_.native_handle(), - self.native_handle(), p, flags, complete_cb_ptr) + uvll::uv_fs_readdir(loop_.native_handle(), + self.native_handle(), p, flags, complete_cb_ptr) }), 0); } @@ -318,17 +278,6 @@ impl FsRequest { uvll::uv_fs_fdatasync(loop_.native_handle(), self.native_handle(), fd, complete_cb_ptr) }, 0); -======= - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_readdir(loop_.native_handle(), - self.native_handle(), p, flags, complete_cb_ptr) - }); - assert_eq!(ret, 0); ->>>>>>> 1850d26... Remove lots of uv/C++ wrappers } // accessors/utility funcs diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index da3ddacef6bbb..e3cc6ec90a1b1 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -26,7 +26,7 @@ impl IdleWatcher { pub fn new(loop_: &mut Loop) -> ~IdleWatcher { let handle = UvHandle::alloc(None::, uvll::UV_IDLE); assert_eq!(unsafe { - uvll::idle_init(loop_.native_handle(), handle) + uvll::uv_idle_init(loop_.native_handle(), handle) }, 0); let me = ~IdleWatcher { handle: handle, @@ -40,10 +40,10 @@ impl IdleWatcher { pub fn onetime(loop_: &mut Loop, f: proc()) { let handle = UvHandle::alloc(None::, uvll::UV_IDLE); unsafe { - assert_eq!(uvll::idle_init(loop_.native_handle(), handle), 0); + assert_eq!(uvll::uv_idle_init(loop_.native_handle(), handle), 0); let data: *c_void = cast::transmute(~f); uvll::set_data_for_uv_handle(handle, data); - assert_eq!(uvll::idle_start(handle, onetime_cb), 0) + assert_eq!(uvll::uv_idle_start(handle, onetime_cb), 0) } extern fn onetime_cb(handle: *uvll::uv_idle_t, status: c_int) { @@ -52,8 +52,8 @@ impl IdleWatcher { let data = uvll::get_data_for_uv_handle(handle); let f: ~proc() = cast::transmute(data); (*f)(); - uvll::idle_stop(handle); - uvll::close(handle, close_cb); + uvll::uv_idle_stop(handle); + uvll::uv_close(handle, close_cb); } } @@ -67,18 +67,18 @@ impl PausibleIdleCallback for IdleWatcher { fn start(&mut self, cb: ~Callback) { assert!(self.callback.is_none()); self.callback = Some(cb); - assert_eq!(unsafe { uvll::idle_start(self.handle, idle_cb) }, 0) + assert_eq!(unsafe { uvll::uv_idle_start(self.handle, idle_cb) }, 0) self.idle_flag = true; } fn pause(&mut self) { if self.idle_flag == true { - assert_eq!(unsafe {uvll::idle_stop(self.handle) }, 0); + assert_eq!(unsafe {uvll::uv_idle_stop(self.handle) }, 0); self.idle_flag = false; } } fn resume(&mut self) { if self.idle_flag == false { - assert_eq!(unsafe { uvll::idle_start(self.handle, idle_cb) }, 0) + assert_eq!(unsafe { uvll::uv_idle_start(self.handle, idle_cb) }, 0) self.idle_flag = true; } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index f3d9bb5443bb8..de8bed948c4df 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -50,7 +50,7 @@ use std::str::raw::from_c_str; use std::vec; use std::ptr; use std::str; -use std::libc::{c_void, c_int, size_t, malloc, free, c_char, c_uint}; +use std::libc::{c_void, c_int, size_t, malloc, free}; use std::cast::transmute; use std::ptr::null; use std::unstable::finally::Finally; @@ -153,7 +153,7 @@ pub trait UvHandle { unsafe { uvll::set_data_for_uv_handle(self.uv_handle(), null::<()>()); - uvll::close(self.uv_handle(), close_cb) + uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb) } } } diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index fd35f9e494e87..d143bc059e437 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -201,32 +201,30 @@ impl RtioProcess for Process { } fn kill(&mut self, signal: int) -> Result<(), IoError> { - do self.home_for_io |self_| { - match unsafe { - uvll::process_kill(self_.handle, signal as libc::c_int) - } { - 0 => Ok(()), - err => Err(uv_error_to_io_error(UvError(err))) - } + let _m = self.fire_missiles(); + match unsafe { + uvll::uv_process_kill(self.handle, signal as libc::c_int) + } { + 0 => Ok(()), + err => Err(uv_error_to_io_error(UvError(err))) } } fn wait(&mut self) -> int { // Make sure (on the home scheduler) that we have an exit status listed - do self.home_for_io |self_| { - match self_.exit_status { - Some(*) => {} - None => { - // If there's no exit code previously listed, then the - // process's exit callback has yet to be invoked. We just - // need to deschedule ourselves and wait to be reawoken. - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - assert!(self_.to_wake.is_none()); - self_.to_wake = Some(task); - } - assert!(self_.exit_status.is_some()); + let _m = self.fire_missiles(); + match self.exit_status { + Some(*) => {} + None => { + // If there's no exit code previously listed, then the + // process's exit callback has yet to be invoked. We just + // need to deschedule ourselves and wait to be reawoken. + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + assert!(self.to_wake.is_none()); + self.to_wake = Some(task); } + assert!(self.exit_status.is_some()); } } @@ -237,9 +235,8 @@ impl RtioProcess for Process { impl Drop for Process { fn drop(&mut self) { - do self.home_for_io |self_| { - assert!(self_.to_wake.is_none()); - self_.close_async_(); - } + let _m = self.fire_missiles(); + assert!(self.to_wake.is_none()); + self.close_async_(); } } diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index c195f48022735..d8ecc25db6d1d 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -32,10 +32,10 @@ impl SignalWatcher { channel: SharedChan) -> Result<~SignalWatcher, UvError> { let handle = UvHandle::alloc(None::, uvll::UV_SIGNAL); assert_eq!(unsafe { - uvll::signal_init(loop_.native_handle(), handle) + uvll::uv_signal_init(loop_.native_handle(), handle) }, 0); - match unsafe { uvll::signal_start(handle, signal_cb, signum as c_int) } { + match unsafe { uvll::uv_signal_start(handle, signal_cb, signum as c_int) } { 0 => { let s = ~SignalWatcher { handle: handle, @@ -72,8 +72,7 @@ impl RtioSignal for SignalWatcher {} impl Drop for SignalWatcher { fn drop(&mut self) { - do self.home_for_io |self_| { - self_.close_async_(); - } + let _m = self.fire_missiles(); + self.close_async_(); } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 956699c5c2e89..5bf3a82e972bb 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -36,7 +36,7 @@ impl TimerWatcher { pub fn new(loop_: &mut Loop) -> ~TimerWatcher { let handle = UvHandle::alloc(None::, uvll::UV_TIMER); assert_eq!(unsafe { - uvll::timer_init(loop_.native_handle(), handle) + uvll::uv_timer_init(loop_.native_handle(), handle) }, 0); let me = ~TimerWatcher { handle: handle, @@ -48,12 +48,12 @@ impl TimerWatcher { fn start(&mut self, msecs: u64, period: u64) { assert_eq!(unsafe { - uvll::timer_start(self.handle, timer_cb, msecs, period) + uvll::uv_timer_start(self.handle, timer_cb, msecs, period) }, 0) } fn stop(&mut self) { - assert_eq!(unsafe { uvll::timer_stop(self.handle) }, 0) + assert_eq!(unsafe { uvll::uv_timer_stop(self.handle) }, 0) } } @@ -67,23 +67,21 @@ impl UvHandle for TimerWatcher { impl RtioTimer for TimerWatcher { fn sleep(&mut self, msecs: u64) { - do self.home_for_io_with_sched |self_, scheduler| { - do scheduler.deschedule_running_task_and_then |_sched, task| { - self_.action = Some(WakeTask(task)); - self_.start(msecs, 0); - } - self_.stop(); + let (_m, sched) = self.fire_missiles_sched(); + do sched.deschedule_running_task_and_then |_sched, task| { + self.action = Some(WakeTask(task)); + self.start(msecs, 0); } + self.stop(); } fn oneshot(&mut self, msecs: u64) -> PortOne<()> { let (port, chan) = oneshot(); let chan = Cell::new(chan); - do self.home_for_io |self_| { - self_.action = Some(SendOnce(chan.take())); - self_.start(msecs, 0); - } + let _m = self.fire_missiles(); + self.action = Some(SendOnce(chan.take())); + self.start(msecs, 0); return port; } @@ -92,10 +90,9 @@ impl RtioTimer for TimerWatcher { let (port, chan) = stream(); let chan = Cell::new(chan); - do self.home_for_io |self_| { - self_.action = Some(SendMany(chan.take())); - self_.start(msecs, msecs); - } + let _m = self.fire_missiles(); + self.action = Some(SendMany(chan.take())); + self.start(msecs, msecs); return port; } @@ -119,11 +116,10 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { impl Drop for TimerWatcher { fn drop(&mut self) { - do self.home_for_io |self_| { - self_.action = None; - self_.stop(); - self_.close_async_(); - } + let _m = self.fire_missiles(); + self.action = None; + self.stop(); + self.close_async_(); } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index f584fa6a1485f..e0ceb954e58cb 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -14,6 +14,7 @@ use std::cast; use std::cell::Cell; use std::clone::Clone; use std::comm::{SharedChan, GenericChan}; +use std::libc; use std::libc::{c_int, c_uint, c_void}; use std::ptr; use std::str; @@ -30,9 +31,9 @@ use std::rt::tube::Tube; use std::rt::task::Task; use std::path::{GenericPath, Path}; use std::libc::{lseek, off_t, O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, - O_WRONLY, S_IRUSR, S_IWUSR, S_IRWXU}; -use std::rt::io::{FileMode, FileAccess, OpenOrCreate, Open, Create, - CreateOrTruncate, Append, Truncate, Read, Write, ReadWrite, + O_WRONLY, S_IRUSR, S_IWUSR}; +use std::rt::io::{FileMode, FileAccess, Open, + Append, Truncate, Read, Write, ReadWrite, FileStat}; use std::rt::io::signal::Signum; use std::task; @@ -1224,7 +1225,7 @@ impl UvFileStream { do sched.deschedule_running_task_and_then |_, task| { let task = Cell::new(task); let req = file::FsRequest::new(); - do f(self_, req) |_, uverr| { + do f(self, req) |_, uverr| { let res = match uverr { None => Ok(()), Some(err) => Err(uv_error_to_io_error(err)) diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 3028546972fe6..a32f03732d664 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -724,6 +724,7 @@ extern { fn rust_set_stdio_container_fd(c: *uv_stdio_container_t, fd: c_int); fn rust_set_stdio_container_stream(c: *uv_stdio_container_t, stream: *uv_stream_t); + fn rust_uv_process_pid(p: *uv_process_t) -> c_int; } // generic uv functions @@ -809,21 +810,25 @@ externfn!(fn uv_fs_readdir(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, flags: c_int, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_req_cleanup(req: *uv_fs_t)) externfn!(fn uv_fs_fsync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_fdatasync(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_ftruncate(handle: *uv_loop_t, req: *uv_fs_t, file: c_int, - offset: i64, cb: *u8) -> c_int) + offset: i64, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_readlink(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_symlink(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - dst: *c_char, flags: c_int, cb: *u8) -> c_int) + dst: *c_char, flags: c_int, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_rename(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, + dst: *c_char, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_link(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - dst: *c_char, cb: *u8) -> c_int) + dst: *c_char, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_chown(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, - uid: uv_uid_t, gid: uv_gid_t, cb: *u8) -> c_int) + uid: uv_uid_t, gid: uv_gid_t, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_chmod(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char, + mode: c_int, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_lstat(handle: *uv_loop_t, req: *uv_fs_t, file: *c_char, - cb: *u8) -> c_int) + cb: uv_fs_cb) -> c_int) // getaddrinfo externfn!(fn uv_getaddrinfo(loop_: *uv_loop_t, req: *uv_getaddrinfo_t, From c1b5c4db8fdaec025f3ace3c69f046426d69d5db Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 16:42:05 -0800 Subject: [PATCH 10/27] Start migrating stream I/O away from ~fn() --- src/librustuv/lib.rs | 6 +- src/librustuv/pipe.rs | 249 ++++++++++++++++++++++++++++++--------- src/librustuv/process.rs | 14 +-- src/librustuv/stream.rs | 216 +++++++++++++++++++++++++++++++++ src/librustuv/tty.rs | 102 ++++++++++------ src/librustuv/uvio.rs | 231 ++---------------------------------- src/libstd/rt/rtio.rs | 2 - 7 files changed, 501 insertions(+), 319 deletions(-) create mode 100644 src/librustuv/stream.rs diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index de8bed948c4df..1d6f2f0edb55e 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -66,8 +66,9 @@ pub use self::idle::IdleWatcher; pub use self::timer::TimerWatcher; pub use self::async::AsyncWatcher; pub use self::process::Process; -pub use self::pipe::Pipe; +pub use self::pipe::PipeWatcher; pub use self::signal::SignalWatcher; +pub use self::tty::TtyWatcher; mod macros; @@ -87,6 +88,7 @@ pub mod process; pub mod pipe; pub mod tty; pub mod signal; +pub mod stream; /// XXX: Loop(*handle) is buggy with destructors. Normal structs /// with dtors may not be destructured, but tuple structs can, @@ -218,7 +220,6 @@ pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); pub type NullCallback = ~fn(); pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(&mut FsRequest, Option); -pub type AsyncCallback = ~fn(AsyncWatcher, Option); pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); pub type UdpSendCallback = ~fn(UdpWatcher, Option); @@ -231,7 +232,6 @@ struct WatcherData { connect_cb: Option, close_cb: Option, alloc_cb: Option, - async_cb: Option, udp_recv_cb: Option, udp_send_cb: Option, } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 0b65c55636d40..f1936635a1839 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -8,91 +8,234 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::libc; use std::c_str::CString; +use std::cast; +use std::libc; +use std::rt::BlockedTask; +use std::rt::io::IoError; +use std::rt::local::Local; +use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor}; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::tube::Tube; -use super::{Loop, UvError, Watcher, NativeHandle, status_to_maybe_uv_error}; -use super::ConnectionCallback; -use net; +use stream::StreamWatcher; +use super::{Loop, UvError, NativeHandle, uv_error_to_io_error, UvHandle}; +use uvio::HomingIO; use uvll; -pub struct Pipe(*uvll::uv_pipe_t); +pub struct PipeWatcher { + stream: StreamWatcher, + home: SchedHandle, +} + +pub struct PipeListener { + home: SchedHandle, + pipe: *uvll::uv_pipe_t, + priv closing_task: Option, + priv outgoing: Tube>, +} + +pub struct PipeAcceptor { + listener: ~PipeListener, + priv incoming: Tube>, +} -impl Watcher for Pipe {} +// PipeWatcher implementation and traits -impl Pipe { - pub fn new(loop_: &Loop, ipc: bool) -> Pipe { +impl PipeWatcher { + pub fn new(pipe: *uvll::uv_pipe_t) -> PipeWatcher { + PipeWatcher { + stream: StreamWatcher::new(pipe), + home: get_handle_to_current_scheduler!(), + } + } + + pub fn alloc(loop_: &Loop, ipc: bool) -> *uvll::uv_pipe_t { unsafe { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); - assert!(handle.is_not_null()); + assert!(!handle.is_null()); let ipc = ipc as libc::c_int; assert_eq!(uvll::uv_pipe_init(loop_.native_handle(), handle, ipc), 0); - let mut ret: Pipe = - NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - ret + handle } } - pub fn as_stream(&self) -> net::StreamWatcher { - net::StreamWatcher(**self as *uvll::uv_stream_t) + pub fn open(loop_: &Loop, file: libc::c_int) -> Result + { + let handle = PipeWatcher::alloc(loop_, false); + match unsafe { uvll::uv_pipe_open(handle, file) } { + 0 => Ok(PipeWatcher::new(handle)), + n => { + unsafe { uvll::uv_close(handle, pipe_close_cb) } + Err(UvError(n)) + } + } } - #[fixed_stack_segment] #[inline(never)] - pub fn open(&mut self, file: libc::c_int) -> Result<(), UvError> { - match unsafe { uvll::uv_pipe_open(self.native_handle(), file) } { - 0 => Ok(()), - n => Err(UvError(n)) + pub fn connect(loop_: &Loop, name: &CString) -> Result + { + struct Ctx { + task: Option, + result: Option>, } + let mut cx = Ctx { task: None, result: None }; + let req = unsafe { uvll::malloc_req(uvll::UV_CONNECT) }; + unsafe { uvll::set_data_for_req(req, &cx as *Ctx) } + + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + cx.task = Some(task); + unsafe { + uvll::uv_pipe_connect(req, + PipeWatcher::alloc(loop_, false), + name.with_ref(|p| p), + connect_cb) + } + } + assert!(cx.task.is_none()); + return cx.result.take().expect("pipe connect needs a result"); + + extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { + unsafe { + let cx: &mut Ctx = cast::transmute(uvll::get_data_for_req(req)); + let stream = uvll::get_stream_handle_from_connect_req(req); + cx.result = Some(match status { + 0 => Ok(PipeWatcher::new(stream)), + n => { + uvll::free_handle(stream); + Err(UvError(n)) + } + }); + uvll::free_req(req); + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + } + } + } +} + +impl RtioPipe for PipeWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_missiles(); + self.stream.read(buf).map_err(uv_error_to_io_error) + } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_missiles(); + self.stream.write(buf).map_err(uv_error_to_io_error) + } +} + +impl HomingIO for PipeWatcher { + fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } +} + +impl Drop for PipeWatcher { + fn drop(&mut self) { + let _m = self.fire_missiles(); + self.stream.close(true); // close synchronously } +} + +extern fn pipe_close_cb(handle: *uvll::uv_handle_t) { + unsafe { uvll::free_handle(handle) } +} - #[fixed_stack_segment] #[inline(never)] - pub fn bind(&mut self, name: &CString) -> Result<(), UvError> { - do name.with_ref |name| { - match unsafe { uvll::uv_pipe_bind(self.native_handle(), name) } { - 0 => Ok(()), - n => Err(UvError(n)) +// PipeListener implementation and traits + +impl PipeListener { + pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> { + let pipe = PipeWatcher::alloc(loop_, false); + match unsafe { uvll::uv_pipe_bind(pipe, name.with_ref(|p| p)) } { + 0 => { + let p = ~PipeListener { + home: get_handle_to_current_scheduler!(), + pipe: pipe, + closing_task: None, + outgoing: Tube::new(), + }; + Ok(p.install()) + } + n => { + unsafe { uvll::free_handle(pipe) } + Err(UvError(n)) } } } +} - #[fixed_stack_segment] #[inline(never)] - pub fn connect(&mut self, name: &CString, cb: ConnectionCallback) { - { - let data = self.get_watcher_data(); - assert!(data.connect_cb.is_none()); - data.connect_cb = Some(cb); +impl RtioUnixListener for PipeListener { + fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> { + // create the acceptor object from ourselves + let incoming = self.outgoing.clone(); + let mut acceptor = ~PipeAcceptor { + listener: self, + incoming: incoming, + }; + + let _m = acceptor.fire_missiles(); + // XXX: the 128 backlog should be configurable + match unsafe { uvll::uv_listen(acceptor.listener.pipe, 128, listen_cb) } { + 0 => Ok(acceptor as ~RtioUnixAcceptor), + n => Err(uv_error_to_io_error(UvError(n))), } + } +} - let connect = net::ConnectRequest::new(); - let name = do name.with_ref |p| { p }; +impl HomingIO for PipeListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} - unsafe { - uvll::uv_pipe_connect(connect.native_handle(), - self.native_handle(), - name, - connect_cb) +impl UvHandle for PipeListener { + fn uv_handle(&self) -> *uvll::uv_pipe_t { self.pipe } +} + +extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { + let msg = match status { + 0 => { + let loop_ = NativeHandle::from_native_handle(unsafe { + uvll::get_loop_for_uv_handle(server) + }); + let client = PipeWatcher::alloc(&loop_, false); + assert_eq!(unsafe { uvll::uv_accept(server, client) }, 0); + Ok(~PipeWatcher::new(client) as ~RtioPipe) } + n => Err(uv_error_to_io_error(UvError(n))) + }; + + let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&server) }; + pipe.outgoing.send(msg); +} - extern "C" fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { - let connect_request: net::ConnectRequest = - NativeHandle::from_native_handle(req); - let mut stream_watcher = connect_request.stream(); - connect_request.delete(); +impl Drop for PipeListener { + fn drop(&mut self) { + let (_m, sched) = self.fire_missiles_sched(); - let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); + do sched.deschedule_running_task_and_then |_, task| { + self.closing_task = Some(task); + unsafe { uvll::uv_close(self.pipe, listener_close_cb) } } } +} + +extern fn listener_close_cb(handle: *uvll::uv_handle_t) { + let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&handle) }; + unsafe { uvll::free_handle(handle) } + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(pipe.closing_task.take_unwrap()); } -impl NativeHandle<*uvll::uv_pipe_t> for Pipe { - fn from_native_handle(handle: *uvll::uv_pipe_t) -> Pipe { - Pipe(handle) - } - fn native_handle(&self) -> *uvll::uv_pipe_t { - match self { &Pipe(ptr) => ptr } +// PipeAcceptor implementation and traits + +impl RtioUnixAcceptor for PipeAcceptor { + fn accept(&mut self) -> Result<~RtioPipe, IoError> { + let _m = self.fire_missiles(); + self.incoming.recv() } } + +impl HomingIO for PipeAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index d143bc059e437..50964d7a84c63 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -21,8 +21,9 @@ use std::rt::sched::{Scheduler, SchedHandle}; use std::vec; use super::{Loop, NativeHandle, UvHandle, UvError, uv_error_to_io_error}; -use uvio::{HomingIO, UvPipeStream, UvUnboundPipe}; +use uvio::HomingIO; use uvll; +use pipe::PipeWatcher; pub struct Process { handle: *uvll::uv_process_t, @@ -42,7 +43,7 @@ impl Process { /// Returns either the corresponding process object or an error which /// occurred. pub fn spawn(loop_: &Loop, config: ProcessConfig) - -> Result<(~Process, ~[Option<~UvPipeStream>]), UvError> + -> Result<(~Process, ~[Option]), UvError> { let cwd = config.cwd.map(|s| s.to_c_str()); let io = config.io; @@ -121,7 +122,7 @@ extern fn on_exit(handle: *uvll::uv_process_t, unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, io: &StdioContainer, - loop_: &Loop) -> Option<~UvPipeStream> { + loop_: &Loop) -> Option { match *io { Ignored => { uvll::set_stdio_container_flags(dst, uvll::STDIO_IGNORE); @@ -140,11 +141,10 @@ unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, if writable { flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int; } - let pipe = UvUnboundPipe::new(loop_); - let handle = pipe.pipe.as_stream().native_handle(); + let pipe_handle = PipeWatcher::alloc(loop_, false); uvll::set_stdio_container_flags(dst, flags); - uvll::set_stdio_container_stream(dst, handle); - Some(~UvPipeStream::new(pipe)) + uvll::set_stdio_container_stream(dst, pipe_handle); + Some(PipeWatcher::new(pipe_handle)) } } } diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs new file mode 100644 index 0000000000000..ad0deebd45711 --- /dev/null +++ b/src/librustuv/stream.rs @@ -0,0 +1,216 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cast; +use std::libc::{c_int, size_t, ssize_t, c_void}; +use std::ptr; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; + +use super::{UvError, Buf, slice_to_uv_buf}; +use uvll; + +// This is a helper structure which is intended to get embedded into other +// Watcher structures. This structure will retain a handle to the underlying +// uv_stream_t instance, and all I/O operations assume that it's already located +// on the appropriate scheduler. +pub struct StreamWatcher { + handle: *uvll::uv_stream_t, + + // Cache the last used uv_write_t so we don't have to allocate a new one on + // every call to uv_write(). Ideally this would be a stack-allocated + // structure, but currently we don't have mappings for all the structures + // defined in libuv, so we're foced to malloc this. + priv last_write_req: Option<*uvll::uv_write_t>, +} + +struct ReadContext { + buf: Option, + result: Option>, + task: Option, +} + +struct WriteContext { + result: Option>, + task: Option, +} + +impl StreamWatcher { + // Creates a new helper structure which should be then embedded into another + // watcher. This provides the generic read/write methods on streams. + // + // This structure will *not* close the stream when it is dropped. It is up + // to the enclosure structure to be sure to call the close method (which + // will block the task). Note that this is also required to prevent memory + // leaks. + // + // It should also be noted that the `data` field of the underlying uv handle + // will be manipulated on each of the methods called on this watcher. + // Wrappers should ensure to always reset the field to an appropriate value + // if they rely on the field to perform an action. + pub fn new(stream: *uvll::uv_stream_t) -> StreamWatcher { + StreamWatcher { + handle: stream, + last_write_req: None, + } + } + + pub fn read(&mut self, buf: &mut [u8]) -> Result { + // Send off the read request, but don't block until we're sure that the + // read request is queued. + match unsafe { + uvll::uv_read_start(self.handle, alloc_cb, read_cb) + } { + 0 => { + let mut rcx = ReadContext { + buf: Some(slice_to_uv_buf(buf)), + result: None, + task: None, + }; + unsafe { + uvll::set_data_for_uv_handle(self.handle, &rcx) + } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_sched, task| { + rcx.task = Some(task); + } + rcx.result.take().expect("no result in read stream?") + } + n => Err(UvError(n)) + } + } + + pub fn write(&mut self, buf: &[u8]) -> Result<(), UvError> { + // Prepare the write request, either using a cached one or allocating a + // new one + let req = match self.last_write_req { + Some(req) => req, + None => unsafe { uvll::malloc_req(uvll::UV_WRITE) }, + }; + self.last_write_req = Some(req); + let mut wcx = WriteContext { result: None, task: None, }; + unsafe { uvll::set_data_for_req(req, &wcx as *WriteContext) } + + // Send off the request, but be careful to not block until we're sure + // that the write reqeust is queued. If the reqeust couldn't be queued, + // then we should return immediately with an error. + match unsafe { + uvll::uv_write(req, self.handle, [slice_to_uv_buf(buf)], write_cb) + } { + 0 => { + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_sched, task| { + wcx.task = Some(task); + } + assert!(wcx.task.is_none()); + wcx.result.take().expect("no result in write stream?") + } + n => Err(UvError(n)), + } + } + + // This will deallocate an internally used memory, along with closing the + // handle (and freeing it). + // + // The `synchronous` flag dictates whether this handle is closed + // synchronously (the task is blocked) or asynchronously (the task is not + // block, but the handle is still deallocated). + pub fn close(&mut self, synchronous: bool) { + // clean up the cached write request if we have one + match self.last_write_req { + Some(req) => unsafe { uvll::free_req(req) }, + None => {} + } + + if synchronous { + let mut closing_task = None; + unsafe { + uvll::set_data_for_uv_handle(self.handle, &closing_task); + } + + // Wait for this stream to close because it possibly represents a remote + // connection which may have consequences if we close asynchronously. + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + closing_task = Some(task); + unsafe { uvll::uv_close(self.handle, close_cb) } + } + } else { + unsafe { + uvll::set_data_for_uv_handle(self.handle, ptr::null::()); + uvll::uv_close(self.handle, close_cb) + } + } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + let data: *c_void = unsafe { uvll::get_data_for_uv_handle(handle) }; + unsafe { uvll::free_handle(handle) } + if data.is_null() { return } + + let closing_task: &mut Option = unsafe { + cast::transmute(data) + }; + let task = closing_task.take_unwrap(); + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task); + } + } +} + +// This allocation callback expects to be invoked once and only once. It will +// unwrap the buffer in the ReadContext stored in the stream and return it. This +// will fail if it is called more than once. +extern fn alloc_cb(stream: *uvll::uv_stream_t, _hint: size_t) -> Buf { + let rcx: &mut ReadContext = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(stream)) + }; + rcx.buf.take().expect("alloc_cb called more than once") +} + +// When a stream has read some data, we will always forcibly stop reading and +// return all the data read (even if it didn't fill the whole buffer). +extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: Buf) { + let rcx: &mut ReadContext = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + // Stop reading so that no read callbacks are + // triggered before the user calls `read` again. + // XXX: Is there a performance impact to calling + // stop here? + unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } + + assert!(rcx.result.is_none()); + rcx.result = Some(match nread { + n if n < 0 => Err(UvError(n as c_int)), + n => Ok(n as uint), + }); + + let task = rcx.task.take().expect("read_cb needs a task"); + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(task); +} + +// Unlike reading, the WriteContext is stored in the uv_write_t request. Like +// reading, however, all this does is wake up the blocked task after squirreling +// away the error code as a result. +extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { + // Remember to not free the request because it is re-used between writes on + // the same stream. + unsafe { + let wcx: &mut WriteContext = cast::transmute(uvll::get_data_for_req(req)); + wcx.result = Some(match status { + 0 => Ok(()), + n => Err(UvError(n)), + }); + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); + } +} diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index ad5f5043737f2..316a817354db1 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -9,75 +9,105 @@ // except according to those terms. use std::libc; +use std::rt::io::IoError; +use std::rt::local::Local; +use std::rt::rtio::RtioTTY; +use std::rt::sched::{Scheduler, SchedHandle}; -use super::{Watcher, Loop, NativeHandle, UvError}; -use net; +use stream::StreamWatcher; +use super::{Loop, UvError, UvHandle, uv_error_to_io_error}; +use uvio::HomingIO; use uvll; -/// A process wraps the handle of the underlying uv_process_t. -pub struct TTY(*uvll::uv_tty_t); - -impl Watcher for TTY {} +pub struct TtyWatcher{ + tty: *uvll::uv_tty_t, + stream: StreamWatcher, + home: SchedHandle, + fd: libc::c_int, +} -impl TTY { - #[fixed_stack_segment] #[inline(never)] - pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) -> - Result +impl TtyWatcher { + pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) + -> Result { - let handle = unsafe { uvll::malloc_handle(uvll::UV_TTY) }; - assert!(handle.is_not_null()); + let handle = UvHandle::alloc(None::, uvll::UV_TTY); - let ret = unsafe { + match unsafe { uvll::uv_tty_init(loop_.native_handle(), handle, fd as libc::c_int, readable as libc::c_int) - }; - match ret { + } { 0 => { - let mut ret: TTY = NativeHandle::from_native_handle(handle); - ret.install_watcher_data(); - Ok(ret) + Ok(TtyWatcher { + tty: handle, + stream: StreamWatcher::new(handle), + home: get_handle_to_current_scheduler!(), + fd: fd, + }) } n => { - unsafe { uvll::free_handle(handle); } + unsafe { uvll::free_handle(handle) } Err(UvError(n)) } } } +} - pub fn as_stream(&self) -> net::StreamWatcher { - net::StreamWatcher(**self as *uvll::uv_stream_t) +impl RtioTTY for TtyWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_missiles(); + self.stream.read(buf).map_err(uv_error_to_io_error) } - #[fixed_stack_segment] #[inline(never)] - pub fn set_mode(&self, raw: bool) -> Result<(), UvError> { + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_missiles(); + self.stream.write(buf).map_err(uv_error_to_io_error) + } + + fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { let raw = raw as libc::c_int; - match unsafe { uvll::uv_tty_set_mode(self.native_handle(), raw) } { + let _m = self.fire_missiles(); + match unsafe { uvll::uv_tty_set_mode(self.tty, raw) } { 0 => Ok(()), - n => Err(UvError(n)) + n => Err(uv_error_to_io_error(UvError(n))) } } - #[fixed_stack_segment] #[inline(never)] #[allow(unused_mut)] - pub fn get_winsize(&self) -> Result<(int, int), UvError> { + #[allow(unused_mut)] + fn get_winsize(&mut self) -> Result<(int, int), IoError> { let mut width: libc::c_int = 0; let mut height: libc::c_int = 0; let widthptr: *libc::c_int = &width; let heightptr: *libc::c_int = &width; - match unsafe { uvll::uv_tty_get_winsize(self.native_handle(), + let _m = self.fire_missiles(); + match unsafe { uvll::uv_tty_get_winsize(self.tty, widthptr, heightptr) } { 0 => Ok((width as int, height as int)), - n => Err(UvError(n)) + n => Err(uv_error_to_io_error(UvError(n))) } } -} -impl NativeHandle<*uvll::uv_tty_t> for TTY { - fn from_native_handle(handle: *uvll::uv_tty_t) -> TTY { - TTY(handle) - } - fn native_handle(&self) -> *uvll::uv_tty_t { - match self { &TTY(ptr) => ptr } + fn isatty(&self) -> bool { + unsafe { uvll::uv_guess_handle(self.fd) == uvll::UV_TTY } } } +impl UvHandle for TtyWatcher { + fn uv_handle(&self) -> *uvll::uv_tty_t { self.tty } +} + +impl HomingIO for TtyWatcher { + fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } +} + +impl Drop for TtyWatcher { + // TTY handles are used for the logger in a task, so this destructor is run + // when a task is destroyed. When a task is being destroyed, a local + // scheduler isn't available, so we can't do the normal "take the scheduler + // and resume once close is done". Instead close operations on a TTY are + // asynchronous. + fn drop(&mut self) { + let _m = self.fire_missiles(); + self.stream.close(false); + } +} diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index e0ceb954e58cb..592a23297cc3a 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -49,6 +49,7 @@ use super::*; use idle::IdleWatcher; use net::{UvIpv4SocketAddr, UvIpv6SocketAddr}; use addrinfo::{GetAddrInfoRequest, accum_addrinfo}; +use pipe::PipeListener; // XXX we should not be calling uvll functions in here. @@ -616,67 +617,38 @@ impl IoFactory for UvIoFactory { match Process::spawn(self.uv_loop(), config) { Ok((p, io)) => { Ok((p as ~RtioProcess, - io.move_iter().map(|i| i.map(|p| p as ~RtioPipe)).collect())) + io.move_iter().map(|i| i.map(|p| ~p as ~RtioPipe)).collect())) } Err(e) => Err(uv_error_to_io_error(e)), } } - fn unix_bind(&mut self, path: &CString) -> - Result<~RtioUnixListener, IoError> { - let mut pipe = UvUnboundPipe::new(self.uv_loop()); - match pipe.pipe.bind(path) { - Ok(()) => Ok(~UvUnixListener::new(pipe) as ~RtioUnixListener), + fn unix_bind(&mut self, path: &CString) -> Result<~RtioUnixListener, IoError> + { + match PipeListener::bind(self.uv_loop(), path) { + Ok(p) => Ok(p as ~RtioUnixListener), Err(e) => Err(uv_error_to_io_error(e)), } } fn unix_connect(&mut self, path: &CString) -> Result<~RtioPipe, IoError> { - let pipe = UvUnboundPipe::new(self.uv_loop()); - let mut rawpipe = pipe.pipe; - - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let pipe_cell = Cell::new(pipe); - let pipe_cell_ptr: *Cell = &pipe_cell; - - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do rawpipe.connect(path) |_stream, err| { - let res = match err { - None => { - let pipe = unsafe { (*pipe_cell_ptr).take() }; - Ok(~UvPipeStream::new(pipe) as ~RtioPipe) - } - Some(e) => Err(uv_error_to_io_error(e)), - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } + match PipeWatcher::connect(self.uv_loop(), path) { + Ok(p) => Ok(~p as ~RtioPipe), + Err(e) => Err(uv_error_to_io_error(e)), } - - assert!(!result_cell.is_empty()); - return result_cell.take(); } fn tty_open(&mut self, fd: c_int, readable: bool) -> Result<~RtioTTY, IoError> { - match tty::TTY::new(self.uv_loop(), fd, readable) { - Ok(tty) => Ok(~UvTTY { - home: get_handle_to_current_scheduler!(), - tty: tty, - fd: fd, - } as ~RtioTTY), + match TtyWatcher::new(self.uv_loop(), fd, readable) { + Ok(tty) => Ok(~tty as ~RtioTTY), Err(e) => Err(uv_error_to_io_error(e)) } } fn pipe_open(&mut self, fd: c_int) -> Result<~RtioPipe, IoError> { - let mut pipe = UvUnboundPipe::new(self.uv_loop()); - match pipe.pipe.open(fd) { - Ok(()) => Ok(~UvPipeStream::new(pipe) as ~RtioPipe), + match PipeWatcher::open(self.uv_loop(), fd) { + Ok(s) => Ok(~s as ~RtioPipe), Err(e) => Err(uv_error_to_io_error(e)) } } @@ -865,60 +837,6 @@ fn write_stream(mut watcher: StreamWatcher, result_cell.take() } -pub struct UvUnboundPipe { - pipe: Pipe, - priv home: SchedHandle, -} - -impl UvUnboundPipe { - /// Creates a new unbound pipe homed to the current scheduler, placed on the - /// specified event loop - pub fn new(loop_: &Loop) -> UvUnboundPipe { - UvUnboundPipe { - pipe: Pipe::new(loop_, false), - home: get_handle_to_current_scheduler!(), - } - } -} - -impl HomingIO for UvUnboundPipe { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvUnboundPipe { - fn drop(&mut self) { - let (_m, sched) = self.fire_homing_missile_sched(); - do sched.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self.pipe.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } -} - -pub struct UvPipeStream { - priv inner: UvUnboundPipe, -} - -impl UvPipeStream { - pub fn new(inner: UvUnboundPipe) -> UvPipeStream { - UvPipeStream { inner: inner } - } -} - -impl RtioPipe for UvPipeStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let (_m, scheduler) = self.inner.fire_homing_missile_sched(); - read_stream(self.inner.pipe.as_stream(), scheduler, buf) - } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let (_m, scheduler) = self.inner.fire_homing_missile_sched(); - write_stream(self.inner.pipe.as_stream(), scheduler, buf) - } -} - pub struct UvTcpStream { priv watcher: TcpWatcher, priv home: SchedHandle, @@ -1307,129 +1225,6 @@ impl RtioFileStream for UvFileStream { } } -pub struct UvUnixListener { - priv inner: UvUnboundPipe -} - -impl HomingIO for UvUnixListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.inner.home() } -} - -impl UvUnixListener { - fn new(pipe: UvUnboundPipe) -> UvUnixListener { - UvUnixListener { inner: pipe } - } -} - -impl RtioUnixListener for UvUnixListener { - fn listen(mut ~self) -> Result<~RtioUnixAcceptor, IoError> { - let _m = self.fire_homing_missile(); - let acceptor = ~UvUnixAcceptor::new(*self); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.inner.pipe.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(e) => Err(uv_error_to_io_error(e)), - None => { - let pipe = UvUnboundPipe::new(&server.event_loop()); - server.accept(pipe.pipe.as_stream()); - Ok(~UvPipeStream::new(pipe) as ~RtioPipe) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioUnixAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), - } - } -} - -pub struct UvTTY { - tty: tty::TTY, - home: SchedHandle, - fd: c_int, -} - -impl HomingIO for UvTTY { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvTTY { - fn drop(&mut self) { - // TTY handles are used for the logger in a task, so this destructor is - // run when a task is destroyed. When a task is being destroyed, a local - // scheduler isn't available, so we can't do the normal "take the - // scheduler and resume once close is done". Instead close operations on - // a TTY are asynchronous. - self.tty.close_async(); - } -} - -impl RtioTTY for UvTTY { - fn read(&mut self, buf: &mut [u8]) -> Result { - let (_m, scheduler) = self.fire_homing_missile_sched(); - read_stream(self.tty.as_stream(), scheduler, buf) - } - - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let (_m, scheduler) = self.fire_homing_missile_sched(); - write_stream(self.tty.as_stream(), scheduler, buf) - } - - fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - match self.tty.set_mode(raw) { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } - } - - fn get_winsize(&mut self) -> Result<(int, int), IoError> { - let _m = self.fire_homing_missile(); - match self.tty.get_winsize() { - Ok(p) => Ok(p), Err(e) => Err(uv_error_to_io_error(e)) - } - } - - fn isatty(&self) -> bool { - unsafe { uvll::uv_guess_handle(self.fd) == uvll::UV_TTY } - } -} - -pub struct UvUnixAcceptor { - listener: UvUnixListener, - incoming: Tube>, -} - -impl HomingIO for UvUnixAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } -} - -impl UvUnixAcceptor { - fn new(listener: UvUnixListener) -> UvUnixAcceptor { - UvUnixAcceptor { listener: listener, incoming: Tube::new() } - } -} - -impl RtioUnixAcceptor for UvUnixAcceptor { - fn accept(&mut self) -> Result<~RtioPipe, IoError> { - let _m = self.fire_homing_missile(); - self.incoming.recv() - } - - fn accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - accept_simultaneously(self.listener.inner.pipe.as_stream(), 1) - } - - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - accept_simultaneously(self.listener.inner.pipe.as_stream(), 0) - } -} - // this function is full of lies unsafe fn local_io() -> &'static mut IoFactory { do Local::borrow |sched: &mut Scheduler| { diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 8684537f4e4b5..f8b87abb9f677 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -213,8 +213,6 @@ pub trait RtioUnixListener { pub trait RtioUnixAcceptor { fn accept(&mut self) -> Result<~RtioPipe, IoError>; - fn accept_simultaneously(&mut self) -> Result<(), IoError>; - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError>; } pub trait RtioTTY { From be896288a366cbd165e0eac9c08fef4a019ee99d Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 21:08:25 -0800 Subject: [PATCH 11/27] Migrate uv file bindings away from ~fn() --- src/librustuv/file.rs | 687 +++++++++++++++++++++-------------------- src/librustuv/lib.rs | 4 +- src/librustuv/pipe.rs | 2 +- src/librustuv/timer.rs | 6 +- src/librustuv/uvio.rs | 381 +++-------------------- 5 files changed, 405 insertions(+), 675 deletions(-) diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 0ff4543a116b0..1994c0a541998 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -8,406 +8,437 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::ptr::null; -use std::c_str; use std::c_str::CString; +use std::c_str; use std::cast::transmute; -use std::libc; +use std::cast; use std::libc::{c_int, c_char, c_void, c_uint}; - -use super::{Request, NativeHandle, Loop, FsCallback, Buf, - status_to_maybe_uv_error, UvError}; +use std::libc; +use std::rt::BlockedTask; +use std::rt::io; +use std::rt::io::{FileStat, IoError}; +use std::rt::rtio; +use std::rt::local::Local; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::vec; + +use super::{NativeHandle, Loop, UvError, uv_error_to_io_error}; +use uvio::HomingIO; use uvll; -use uvll::*; -pub struct FsRequest(*uvll::uv_fs_t); -impl Request for FsRequest {} +pub struct FsRequest { + req: *uvll::uv_fs_t, + priv fired: bool, +} -pub struct RequestData { - priv complete_cb: Option +pub struct FileWatcher { + priv loop_: Loop, + priv fd: c_int, + priv close: rtio::CloseBehavior, + priv home: SchedHandle, } impl FsRequest { - pub fn new() -> FsRequest { - let fs_req = unsafe { malloc_req(UV_FS) }; - assert!(fs_req.is_not_null()); - let fs_req: FsRequest = NativeHandle::from_native_handle(fs_req); - fs_req - } - - pub fn open(self, loop_: &Loop, path: &CString, flags: int, mode: int, - cb: FsCallback) { - let complete_cb_ptr = { - let mut me = self; - me.req_boilerplate(Some(cb)) - }; - let ret = path.with_ref(|p| unsafe { + pub fn open(loop_: &Loop, path: &CString, flags: int, mode: int) + -> Result + { + execute(|req, cb| unsafe { uvll::uv_fs_open(loop_.native_handle(), - self.native_handle(), p, flags as c_int, - mode as c_int, complete_cb_ptr) - }); - assert_eq!(ret, 0); + req, path.with_ref(|p| p), flags as c_int, + mode as c_int, cb) + }).map(|req| + FileWatcher::new(*loop_, req.get_result() as c_int, + rtio::CloseSynchronously) + ) + } + + pub fn unlink(loop_: &Loop, path: &CString) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_unlink(loop_.native_handle(), req, path.with_ref(|p| p), + cb) + }) + } + + pub fn lstat(loop_: &Loop, path: &CString) -> Result { + execute(|req, cb| unsafe { + uvll::uv_fs_lstat(loop_.native_handle(), req, path.with_ref(|p| p), + cb) + }).map(|req| req.mkstat()) + } + + pub fn stat(loop_: &Loop, path: &CString) -> Result { + execute(|req, cb| unsafe { + uvll::uv_fs_stat(loop_.native_handle(), req, path.with_ref(|p| p), + cb) + }).map(|req| req.mkstat()) + } + + pub fn write(loop_: &Loop, fd: c_int, buf: &[u8], offset: i64) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_write(loop_.native_handle(), req, + fd, vec::raw::to_ptr(buf) as *c_void, + buf.len() as c_uint, offset, cb) + }) + } + + pub fn read(loop_: &Loop, fd: c_int, buf: &mut [u8], offset: i64) + -> Result + { + do execute(|req, cb| unsafe { + uvll::uv_fs_read(loop_.native_handle(), req, + fd, vec::raw::to_ptr(buf) as *c_void, + buf.len() as c_uint, offset, cb) + }).map |req| { + req.get_result() as int + } } - pub fn open_sync(mut self, loop_: &Loop, path: &CString, - flags: int, mode: int) -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = path.with_ref(|p| unsafe { - uvll::uv_fs_open(loop_.native_handle(), - self.native_handle(), p, flags as c_int, - mode as c_int, complete_cb_ptr) - }); - self.sync_cleanup(result) - } - - pub fn unlink(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn unlink_sync(mut self, loop_: &Loop, path: &CString) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = path.with_ref(|p| unsafe { - uvll::uv_fs_unlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - self.sync_cleanup(result) - } - - pub fn lstat(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_lstat(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn stat(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let ret = path.with_ref(|p| unsafe { - uvll::uv_fs_stat(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }); - assert_eq!(ret, 0); - } - - pub fn write(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let base_ptr = buf.base as *c_void; - let len = buf.len as uint; - let ret = unsafe { - uvll::uv_fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len as c_uint, offset, complete_cb_ptr) - }; - assert_eq!(ret, 0); - } - pub fn write_sync(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let base_ptr = buf.base as *c_void; - let len = buf.len as uint; - let result = unsafe { - uvll::uv_fs_write(loop_.native_handle(), self.native_handle(), - fd, base_ptr, - len as c_uint, offset, complete_cb_ptr) - }; - self.sync_cleanup(result) - } - - pub fn read(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - let buf_ptr = buf.base as *c_void; - let len = buf.len as uint; - let ret = unsafe { - uvll::uv_fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len as c_uint, offset, complete_cb_ptr) - }; - assert_eq!(ret, 0); - } - pub fn read_sync(mut self, loop_: &Loop, fd: c_int, buf: Buf, offset: i64) - -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let buf_ptr = buf.base as *c_void; - let len = buf.len as uint; - let result = unsafe { - uvll::uv_fs_read(loop_.native_handle(), self.native_handle(), - fd, buf_ptr, - len as c_uint, offset, complete_cb_ptr) - }; - self.sync_cleanup(result) - } - - pub fn close(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) - }, 0); - } - pub fn close_sync(mut self, loop_: &Loop, - fd: c_int) -> Result { - let complete_cb_ptr = self.req_boilerplate(None); - let result = unsafe { - uvll::uv_fs_close(loop_.native_handle(), self.native_handle(), - fd, complete_cb_ptr) - }; - self.sync_cleanup(result) + pub fn close(loop_: &Loop, fd: c_int, sync: bool) -> Result<(), UvError> { + if sync { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_close(loop_.native_handle(), req, fd, cb) + }) + } else { + unsafe { + let req = uvll::malloc_req(uvll::UV_FS); + uvll::uv_fs_close(loop_.native_handle(), req, fd, close_cb); + return Ok(()); + } + + extern fn close_cb(req: *uvll::uv_fs_t) { + unsafe { + uvll::uv_fs_req_cleanup(req); + uvll::free_req(req); + } + } + } } - pub fn mkdir(mut self, loop_: &Loop, path: &CString, mode: c_int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_mkdir(loop_.native_handle(), - self.native_handle(), p, mode, complete_cb_ptr) - }), 0); + pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_mkdir(loop_.native_handle(), req, path.with_ref(|p| p), + mode, cb) + }) } - pub fn rmdir(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_rmdir(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }), 0); + pub fn rmdir(loop_: &Loop, path: &CString) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_rmdir(loop_.native_handle(), req, path.with_ref(|p| p), + cb) + }) } - pub fn rename(mut self, loop_: &Loop, path: &CString, to: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { + pub fn rename(loop_: &Loop, path: &CString, to: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { uvll::uv_fs_rename(loop_.native_handle(), - self.native_handle(), + req, path.with_ref(|p| p), to.with_ref(|p| p), - complete_cb_ptr) - }, 0); + cb) + }) } - pub fn chmod(mut self, loop_: &Loop, path: &CString, mode: c_int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_chmod(loop_.native_handle(), self.native_handle(), p, - mode, complete_cb_ptr) - }), 0); + pub fn chmod(loop_: &Loop, path: &CString, mode: c_int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_chmod(loop_.native_handle(), req, path.with_ref(|p| p), + mode, cb) + }) } - pub fn readdir(mut self, loop_: &Loop, path: &CString, - flags: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { + pub fn readdir(loop_: &Loop, path: &CString, flags: c_int) + -> Result<~[Path], UvError> + { + execute(|req, cb| unsafe { uvll::uv_fs_readdir(loop_.native_handle(), - self.native_handle(), p, flags, complete_cb_ptr) - }), 0); - } - - pub fn readlink(mut self, loop_: &Loop, path: &CString, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { - uvll::uv_fs_readlink(loop_.native_handle(), - self.native_handle(), p, complete_cb_ptr) - }), 0); + req, path.with_ref(|p| p), flags, cb) + }).map(|req| unsafe { + let mut paths = ~[]; + let path = CString::new(path.with_ref(|p| p), false); + let parent = Path::new(path); + do c_str::from_c_multistring(req.get_ptr() as *libc::c_char, + Some(req.get_result() as uint)) |rel| { + let p = rel.as_bytes(); + paths.push(parent.join(p.slice_to(rel.len()))); + }; + paths + }) + } + + pub fn readlink(loop_: &Loop, path: &CString) -> Result { + do execute(|req, cb| unsafe { + uvll::uv_fs_readlink(loop_.native_handle(), req, + path.with_ref(|p| p), cb) + }).map |req| { + Path::new(unsafe { + CString::new(req.get_ptr() as *libc::c_char, false) + }) + } } - pub fn chown(mut self, loop_: &Loop, path: &CString, uid: int, gid: int, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(path.with_ref(|p| unsafe { + pub fn chown(loop_: &Loop, path: &CString, uid: int, gid: int) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { uvll::uv_fs_chown(loop_.native_handle(), - self.native_handle(), p, + req, path.with_ref(|p| p), uid as uvll::uv_uid_t, gid as uvll::uv_gid_t, - complete_cb_ptr) - }), 0); + cb) + }) } - pub fn truncate(mut self, loop_: &Loop, file: c_int, offset: i64, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_ftruncate(loop_.native_handle(), - self.native_handle(), file, offset, - complete_cb_ptr) - }, 0); + pub fn truncate(loop_: &Loop, file: c_int, offset: i64) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_ftruncate(loop_.native_handle(), req, file, offset, cb) + }) } - pub fn link(mut self, loop_: &Loop, src: &CString, dst: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_link(loop_.native_handle(), self.native_handle(), + pub fn link(loop_: &Loop, src: &CString, dst: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_link(loop_.native_handle(), req, src.with_ref(|p| p), dst.with_ref(|p| p), - complete_cb_ptr) - }, 0); + cb) + }) } - pub fn symlink(mut self, loop_: &Loop, src: &CString, dst: &CString, - cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_symlink(loop_.native_handle(), self.native_handle(), + pub fn symlink(loop_: &Loop, src: &CString, dst: &CString) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_symlink(loop_.native_handle(), req, src.with_ref(|p| p), dst.with_ref(|p| p), - 0, - complete_cb_ptr) - }, 0); - } - - pub fn fsync(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_fsync(loop_.native_handle(), self.native_handle(), fd, - complete_cb_ptr) - }, 0); - } - - pub fn datasync(mut self, loop_: &Loop, fd: c_int, cb: FsCallback) { - let complete_cb_ptr = self.req_boilerplate(Some(cb)); - assert_eq!(unsafe { - uvll::uv_fs_fdatasync(loop_.native_handle(), self.native_handle(), fd, - complete_cb_ptr) - }, 0); - } - - // accessors/utility funcs - fn sync_cleanup(self, result: c_int) - -> Result { - self.cleanup_and_delete(); - match status_to_maybe_uv_error(result as i32) { - Some(err) => Err(err), - None => Ok(result) - } - } - fn req_boilerplate(&mut self, cb: Option) -> uvll::uv_fs_cb { - let result = match cb { - Some(_) => compl_cb, - None => 0 as uvll::uv_fs_cb - }; - self.install_req_data(cb); - result - } - pub fn install_req_data(&mut self, cb: Option) { - let fs_req = (self.native_handle()) as *uvll::uv_write_t; - let data = ~RequestData { - complete_cb: cb - }; - unsafe { - let data = transmute::<~RequestData, *c_void>(data); - uvll::set_data_for_req(fs_req, data); - } + 0, cb) + }) } - fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { - unsafe { - let data = uvll::get_data_for_req((self.native_handle())); - let data = transmute::<&*c_void, &mut ~RequestData>(&data); - &mut **data - } + pub fn fsync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_fsync(loop_.native_handle(), req, fd, cb) + }) } - pub fn get_path(&self) -> *c_char { - unsafe { uvll::get_path_from_fs_req(self.native_handle()) } + pub fn datasync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_fdatasync(loop_.native_handle(), req, fd, cb) + }) } pub fn get_result(&self) -> c_int { - unsafe { uvll::get_result_from_fs_req(self.native_handle()) } - } - - pub fn get_loop(&self) -> Loop { - unsafe { Loop{handle:uvll::get_loop_from_fs_req(self.native_handle())} } + unsafe { uvll::get_result_from_fs_req(self.req) } } - pub fn get_stat(&self) -> uv_stat_t { - let stat = uv_stat_t::new(); - unsafe { uvll::populate_stat(self.native_handle(), &stat); } + pub fn get_stat(&self) -> uvll::uv_stat_t { + let stat = uvll::uv_stat_t::new(); + unsafe { uvll::populate_stat(self.req, &stat); } stat } pub fn get_ptr(&self) -> *libc::c_void { - unsafe { - uvll::get_ptr_from_fs_req(self.native_handle()) - } + unsafe { uvll::get_ptr_from_fs_req(self.req) } } - pub fn each_path(&mut self, f: &fn(&CString)) { - let ptr = self.get_ptr(); - match self.get_result() { - n if (n <= 0) => {} - n => { - let n_len = n as uint; - // we pass in the len that uv tells us is there - // for the entries and we don't continue past that.. - // it appears that sometimes the multistring isn't - // correctly delimited and we stray into garbage memory? - // in any case, passing Some(n_len) fixes it and ensures - // good results - unsafe { - c_str::from_c_multistring(ptr as *libc::c_char, - Some(n_len), f); - } + pub fn mkstat(&self) -> FileStat { + let path = unsafe { uvll::get_path_from_fs_req(self.req) }; + let path = unsafe { Path::new(CString::new(path, false)) }; + let stat = self.get_stat(); + fn to_msec(stat: uvll::uv_timespec_t) -> u64 { + (stat.tv_sec * 1000 + stat.tv_nsec / 1000000) as u64 + } + let kind = match (stat.st_mode as c_int) & libc::S_IFMT { + libc::S_IFREG => io::TypeFile, + libc::S_IFDIR => io::TypeDirectory, + libc::S_IFIFO => io::TypeNamedPipe, + libc::S_IFBLK => io::TypeBlockSpecial, + libc::S_IFLNK => io::TypeSymlink, + _ => io::TypeUnknown, + }; + FileStat { + path: path, + size: stat.st_size as u64, + kind: kind, + perm: (stat.st_mode as io::FilePermission) & io::AllPermissions, + created: to_msec(stat.st_birthtim), + modified: to_msec(stat.st_mtim), + accessed: to_msec(stat.st_atim), + unstable: io::UnstableFileStat { + device: stat.st_dev as u64, + inode: stat.st_ino as u64, + rdev: stat.st_rdev as u64, + nlink: stat.st_nlink as u64, + uid: stat.st_uid as u64, + gid: stat.st_gid as u64, + blksize: stat.st_blksize as u64, + blocks: stat.st_blocks as u64, + flags: stat.st_flags as u64, + gen: stat.st_gen as u64, } } } +} - fn cleanup_and_delete(self) { +impl Drop for FsRequest { + fn drop(&mut self) { unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let _data = transmute::<*c_void, ~RequestData>(data); - uvll::set_data_for_req(self.native_handle(), null::<()>()); - uvll::uv_fs_req_cleanup(self.native_handle()); - free_req(self.native_handle() as *c_void) + if self.fired { + uvll::uv_fs_req_cleanup(self.req); + } + uvll::free_req(self.req); + } + } +} + +fn execute(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) + -> Result +{ + let mut req = FsRequest { + fired: false, + req: unsafe { uvll::malloc_req(uvll::UV_FS) } + }; + return match f(req.req, fs_cb) { + 0 => { + req.fired = true; + let mut slot = None; + unsafe { uvll::set_data_for_req(req.req, &slot) } + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + slot = Some(task); + } + match req.get_result() { + n if n < 0 => Err(UvError(n)), + _ => Ok(req), + } } + n => Err(UvError(n)) + + }; + + extern fn fs_cb(req: *uvll::uv_fs_t) { + let slot: &mut Option = unsafe { + cast::transmute(uvll::get_data_for_req(req)) + }; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(slot.take_unwrap()); } } -impl NativeHandle<*uvll::uv_fs_t> for FsRequest { - fn from_native_handle(handle: *uvll:: uv_fs_t) -> FsRequest { - FsRequest(handle) +fn execute_nop(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) + -> Result<(), UvError> +{ + execute(f).map(|_| {}) +} + +impl HomingIO for FileWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl FileWatcher { + pub fn new(loop_: Loop, fd: c_int, close: rtio::CloseBehavior) -> FileWatcher { + FileWatcher { + loop_: loop_, + fd: fd, + close: close, + home: get_handle_to_current_scheduler!() + } } - fn native_handle(&self) -> *uvll::uv_fs_t { - match self { &FsRequest(ptr) => ptr } + + fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { + let _m = self.fire_missiles(); + let r = FsRequest::read(&self.loop_, self.fd, buf, offset); + r.map_err(uv_error_to_io_error) + } + fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { + let _m = self.fire_missiles(); + let r = FsRequest::write(&self.loop_, self.fd, buf, offset); + r.map_err(uv_error_to_io_error) + } + fn seek_common(&mut self, pos: i64, whence: c_int) -> + Result{ + #[fixed_stack_segment]; #[inline(never)]; + unsafe { + match libc::lseek(self.fd, pos as libc::off_t, whence) { + -1 => { + Err(IoError { + kind: io::OtherIoError, + desc: "Failed to lseek.", + detail: None + }) + }, + n => Ok(n as u64) + } + } } } -fn sync_cleanup(result: int) - -> Result { - match status_to_maybe_uv_error(result as i32) { - Some(err) => Err(err), - None => Ok(result) +impl Drop for FileWatcher { + fn drop(&mut self) { + let _m = self.fire_missiles(); + match self.close { + rtio::DontClose => {} + rtio::CloseAsynchronously => { + FsRequest::close(&self.loop_, self.fd, false); + } + rtio::CloseSynchronously => { + FsRequest::close(&self.loop_, self.fd, true); + } + } } } -extern fn compl_cb(req: *uv_fs_t) { - let mut req: FsRequest = NativeHandle::from_native_handle(req); - // pull the user cb out of the req data - let cb = { - let data = req.get_req_data(); - assert!(data.complete_cb.is_some()); - // option dance, option dance. oooooh yeah. - data.complete_cb.take_unwrap() - }; - // in uv_fs_open calls, the result will be the fd in the - // case of success, otherwise it's -1 indicating an error - let result = req.get_result(); - let status = status_to_maybe_uv_error(result); - // we have a req and status, call the user cb.. - // only giving the user a ref to the FsRequest, as we - // have to clean it up, afterwards (and they aren't really - // reusable, anyways - cb(&mut req, status); - // clean up the req (and its data!) after calling the user cb - req.cleanup_and_delete(); +impl rtio::RtioFileStream for FileWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + self.base_read(buf, -1) + } + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + self.base_write(buf, -1) + } + fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result { + self.base_read(buf, offset as i64) + } + fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> { + self.base_write(buf, offset as i64) + } + fn seek(&mut self, pos: i64, whence: io::SeekStyle) -> Result { + use std::libc::{SEEK_SET, SEEK_CUR, SEEK_END}; + let whence = match whence { + io::SeekSet => SEEK_SET, + io::SeekCur => SEEK_CUR, + io::SeekEnd => SEEK_END + }; + self.seek_common(pos, whence) + } + fn tell(&self) -> Result { + use std::libc::SEEK_CUR; + // this is temporary + let self_ = unsafe { cast::transmute_mut(self) }; + self_.seek_common(0, SEEK_CUR) + } + fn fsync(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + FsRequest::fsync(&self.loop_, self.fd).map_err(uv_error_to_io_error) + } + fn datasync(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + FsRequest::datasync(&self.loop_, self.fd).map_err(uv_error_to_io_error) + } + fn truncate(&mut self, offset: i64) -> Result<(), IoError> { + let _m = self.fire_missiles(); + let r = FsRequest::truncate(&self.loop_, self.fd, offset); + r.map_err(uv_error_to_io_error) + } } #[cfg(test)] diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 1d6f2f0edb55e..eb2da05506d82 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -60,7 +60,7 @@ use std::rt::io::IoError; //#[cfg(test)] use unstable::run_in_bare_thread; -pub use self::file::{FsRequest}; +pub use self::file::{FsRequest, FileWatcher}; pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher}; pub use self::idle::IdleWatcher; pub use self::timer::TimerWatcher; @@ -219,7 +219,6 @@ pub type AllocCallback = ~fn(uint) -> Buf; pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); pub type NullCallback = ~fn(); pub type ConnectionCallback = ~fn(StreamWatcher, Option); -pub type FsCallback = ~fn(&mut FsRequest, Option); pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); pub type UdpSendCallback = ~fn(UdpWatcher, Option); @@ -263,7 +262,6 @@ impl> WatcherInterop for W { connect_cb: None, close_cb: None, alloc_cb: None, - async_cb: None, udp_recv_cb: None, udp_send_cb: None, }; diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index f1936635a1839..a857308a81b35 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -158,7 +158,7 @@ impl PipeListener { Ok(p.install()) } n => { - unsafe { uvll::free_handle(pipe) } + unsafe { uvll::uv_close(pipe, pipe_close_cb) } Err(UvError(n)) } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 5bf3a82e972bb..1732e84be4e70 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -9,7 +9,7 @@ // except according to those terms. use std::cell::Cell; -use std::comm::{oneshot, stream, PortOne, ChanOne}; +use std::comm::{oneshot, stream, PortOne, ChanOne, SendDeferred}; use std::libc::c_int; use std::rt::BlockedTask; use std::rt::local::Local; @@ -106,9 +106,9 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(task); } - SendOnce(chan) => chan.send(()), + SendOnce(chan) => chan.send_deferred(()), SendMany(chan) => { - chan.send(()); + chan.send_deferred(()); timer.action = Some(SendMany(chan)); } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 592a23297cc3a..e06f8b5430eca 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -21,20 +21,18 @@ use std::str; use std::rt::io; use std::rt::io::IoError; use std::rt::io::net::ip::{SocketAddr, IpAddr}; -use std::rt::io::{standard_error, OtherIoError, SeekStyle, SeekSet, SeekCur, - SeekEnd}; +use std::rt::io::{standard_error, OtherIoError}; use std::rt::io::process::ProcessConfig; use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use std::rt::task::Task; -use std::path::{GenericPath, Path}; -use std::libc::{lseek, off_t, O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, - O_WRONLY, S_IRUSR, S_IWUSR}; -use std::rt::io::{FileMode, FileAccess, Open, - Append, Truncate, Read, Write, ReadWrite, - FileStat}; +use std::path::Path; +use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, + S_IRUSR, S_IWUSR}; +use std::rt::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, + ReadWrite, FileStat}; use std::rt::io::signal::Signum; use std::task; use ai = std::rt::io::net::addrinfo; @@ -249,76 +247,6 @@ impl UvIoFactory { } } -/// Helper for a variety of simple uv_fs_* functions that have no ret val. This -/// function takes the loop that it will act on, and then invokes the specified -/// callback in a situation where the task wil be immediately blocked -/// afterwards. The `FsCallback` yielded must be invoked to reschedule the task -/// (once the result of the operation is known). -fn uv_fs_helper(loop_: &mut Loop, - retfn: extern "Rust" fn(&mut FsRequest) -> T, - cb: &fn(&mut FsRequest, &mut Loop, FsCallback)) - -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let mut new_req = FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do cb(&mut new_req, loop_) |req, err| { - let res = match err { - None => Ok(retfn(req)), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - }; - } - } - assert!(!result_cell.is_empty()); - return result_cell.take(); -} - -fn unit(_: &mut FsRequest) {} - -fn fs_mkstat(f: &mut FsRequest) -> FileStat { - let path = unsafe { Path::new(CString::new(f.get_path(), false)) }; - let stat = f.get_stat(); - fn to_msec(stat: uvll::uv_timespec_t) -> u64 { - (stat.tv_sec * 1000 + stat.tv_nsec / 1000000) as u64 - } - let kind = match (stat.st_mode as c_int) & libc::S_IFMT { - libc::S_IFREG => io::TypeFile, - libc::S_IFDIR => io::TypeDirectory, - libc::S_IFIFO => io::TypeNamedPipe, - libc::S_IFBLK => io::TypeBlockSpecial, - libc::S_IFLNK => io::TypeSymlink, - _ => io::TypeUnknown, - }; - FileStat { - path: path, - size: stat.st_size as u64, - kind: kind, - perm: (stat.st_mode as io::FilePermission) & io::AllPermissions, - created: to_msec(stat.st_birthtim), - modified: to_msec(stat.st_mtim), - accessed: to_msec(stat.st_atim), - unstable: io::UnstableFileStat { - device: stat.st_dev as u64, - inode: stat.st_ino as u64, - rdev: stat.st_rdev as u64, - nlink: stat.st_nlink as u64, - uid: stat.st_uid as u64, - gid: stat.st_gid as u64, - blksize: stat.st_blksize as u64, - blocks: stat.st_blocks as u64, - flags: stat.st_flags as u64, - gen: stat.st_gen as u64, - } - } -} - impl IoFactory for UvIoFactory { // Connect to an address and return a new stream // NB: This blocks the task waiting on the connection. @@ -456,10 +384,10 @@ impl IoFactory for UvIoFactory { return result_cell.take(); } - fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior) -> ~RtioFileStream { + fn fs_from_raw_fd(&mut self, fd: c_int, + close: CloseBehavior) -> ~RtioFileStream { let loop_ = Loop {handle: self.uv_loop().native_handle()}; - let home = get_handle_to_current_scheduler!(); - ~UvFileStream::new(loop_, fd, close, home) as ~RtioFileStream + ~FileWatcher::new(loop_, fd, close) as ~RtioFileStream } fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess) @@ -477,138 +405,64 @@ impl IoFactory for UvIoFactory { io::ReadWrite => (flags | libc::O_RDWR | libc::O_CREAT, libc::S_IRUSR | libc::S_IWUSR), }; - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let open_req = file::FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do open_req.open(self.uv_loop(), path, flags as int, mode as int) - |req,err| { - if err.is_none() { - let loop_ = Loop {handle: req.get_loop().native_handle()}; - let home = get_handle_to_current_scheduler!(); - let fd = req.get_result() as c_int; - let fs = ~UvFileStream::new( - loop_, fd, CloseSynchronously, home) as ~RtioFileStream; - let res = Ok(fs); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } else { - let res = Err(uv_error_to_io_error(err.unwrap())); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - }; - }; - }; - assert!(!result_cell.is_empty()); - return result_cell.take(); + + match FsRequest::open(self.uv_loop(), path, flags as int, mode as int) { + Ok(fs) => Ok(~fs as ~RtioFileStream), + Err(e) => Err(uv_error_to_io_error(e)) + } } fn fs_unlink(&mut self, path: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.unlink(l, path, cb) - } + let r = FsRequest::unlink(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_lstat(&mut self, path: &CString) -> Result { - do uv_fs_helper(self.uv_loop(), fs_mkstat) |req, l, cb| { - req.lstat(l, path, cb) - } + let r = FsRequest::lstat(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_stat(&mut self, path: &CString) -> Result { - do uv_fs_helper(self.uv_loop(), fs_mkstat) |req, l, cb| { - req.stat(l, path, cb) - } + let r = FsRequest::stat(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_mkdir(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.mkdir(l, path, perm as c_int, cb) - } + let r = FsRequest::mkdir(self.uv_loop(), path, perm as c_int); + r.map_err(uv_error_to_io_error) } fn fs_rmdir(&mut self, path: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.rmdir(l, path, cb) - } + let r = FsRequest::rmdir(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn fs_rename(&mut self, path: &CString, to: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.rename(l, path, to, cb) - } + let r = FsRequest::rename(self.uv_loop(), path, to); + r.map_err(uv_error_to_io_error) } fn fs_chmod(&mut self, path: &CString, perm: io::FilePermission) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.chmod(l, path, perm as c_int, cb) - } + let r = FsRequest::chmod(self.uv_loop(), path, perm as c_int); + r.map_err(uv_error_to_io_error) } - fn fs_readdir(&mut self, path: &CString, flags: c_int) -> - Result<~[Path], IoError> { - use str::StrSlice; - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let path_cell = Cell::new(path); - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - let stat_req = file::FsRequest::new(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let path = path_cell.take(); - // Don't pick up the null byte - let slice = path.as_bytes().slice(0, path.len()); - let path_parent = Cell::new(Path::new(slice)); - do stat_req.readdir(self.uv_loop(), path, flags) |req,err| { - let parent = path_parent.take(); - let res = match err { - None => { - let mut paths = ~[]; - do req.each_path |rel_path| { - let p = rel_path.as_bytes(); - paths.push(parent.join(p.slice_to(rel_path.len()))); - } - Ok(paths) - }, - Some(e) => { - Err(uv_error_to_io_error(e)) - } - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - }; - }; - }; - assert!(!result_cell.is_empty()); - return result_cell.take(); + fn fs_readdir(&mut self, path: &CString, flags: c_int) + -> Result<~[Path], IoError> + { + let r = FsRequest::readdir(self.uv_loop(), path, flags); + r.map_err(uv_error_to_io_error) } fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.link(l, src, dst, cb) - } + let r = FsRequest::link(self.uv_loop(), src, dst); + r.map_err(uv_error_to_io_error) } fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.symlink(l, src, dst, cb) - } + let r = FsRequest::symlink(self.uv_loop(), src, dst); + r.map_err(uv_error_to_io_error) } fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> Result<(), IoError> { - do uv_fs_helper(self.uv_loop(), unit) |req, l, cb| { - req.chown(l, path, uid, gid, cb) - } + let r = FsRequest::chown(self.uv_loop(), path, uid, gid); + r.map_err(uv_error_to_io_error) } fn fs_readlink(&mut self, path: &CString) -> Result { - fn getlink(f: &mut FsRequest) -> Path { - Path::new(unsafe { CString::new(f.get_ptr() as *libc::c_char, false) }) - } - do uv_fs_helper(self.uv_loop(), getlink) |req, l, cb| { - req.readlink(l, path, cb) - } + let r = FsRequest::readlink(self.uv_loop(), path); + r.map_err(uv_error_to_io_error) } fn spawn(&mut self, config: ProcessConfig) @@ -1072,159 +926,6 @@ impl RtioUdpSocket for UvUdpSocket { } } -pub struct UvFileStream { - priv loop_: Loop, - priv fd: c_int, - priv close: CloseBehavior, - priv home: SchedHandle, -} - -impl HomingIO for UvFileStream { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvFileStream { - fn new(loop_: Loop, fd: c_int, close: CloseBehavior, - home: SchedHandle) -> UvFileStream { - UvFileStream { - loop_: loop_, - fd: fd, - close: close, - home: home, - } - } - fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&mut [u8] = &buf; - let (_m, scheduler) = self.fire_homing_missile_sched(); - do scheduler.deschedule_running_task_and_then |_, task| { - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - let task_cell = Cell::new(task); - let read_req = file::FsRequest::new(); - do read_req.read(&self.loop_, self.fd, buf, offset) |req, uverr| { - let res = match uverr { - None => Ok(req.get_result() as int), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - result_cell.take() - } - fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.write(&self_.loop_, self_.fd, slice_to_uv_buf(buf), offset, cb) - } - } - fn seek_common(&mut self, pos: i64, whence: c_int) -> - Result{ - #[fixed_stack_segment]; #[inline(never)]; - unsafe { - match lseek(self.fd, pos as off_t, whence) { - -1 => { - Err(IoError { - kind: OtherIoError, - desc: "Failed to lseek.", - detail: None - }) - }, - n => Ok(n as u64) - } - } - } - fn nop_req(&mut self, f: &fn(&mut UvFileStream, file::FsRequest, FsCallback)) - -> Result<(), IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let (_m, sched) = self.fire_homing_missile_sched(); - do sched.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - let req = file::FsRequest::new(); - do f(self, req) |_, uverr| { - let res = match uverr { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)) - }; - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - result_cell.take() - } -} - -impl Drop for UvFileStream { - fn drop(&mut self) { - match self.close { - DontClose => {} - CloseAsynchronously => { - let close_req = file::FsRequest::new(); - do close_req.close(&self.loop_, self.fd) |_,_| {} - } - CloseSynchronously => { - let (_m, scheduler) = self.fire_homing_missile_sched(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let close_req = file::FsRequest::new(); - do close_req.close(&self.loop_, self.fd) |_,_| { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - } -} - -impl RtioFileStream for UvFileStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - self.base_read(buf, -1) - } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - self.base_write(buf, -1) - } - fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result { - self.base_read(buf, offset as i64) - } - fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> { - self.base_write(buf, offset as i64) - } - fn seek(&mut self, pos: i64, whence: SeekStyle) -> Result { - use std::libc::{SEEK_SET, SEEK_CUR, SEEK_END}; - let whence = match whence { - SeekSet => SEEK_SET, - SeekCur => SEEK_CUR, - SeekEnd => SEEK_END - }; - self.seek_common(pos, whence) - } - fn tell(&self) -> Result { - use std::libc::SEEK_CUR; - // this is temporary - let self_ = unsafe { cast::transmute_mut(self) }; - self_.seek_common(0, SEEK_CUR) - } - fn fsync(&mut self) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.fsync(&self_.loop_, self_.fd, cb) - } - } - fn datasync(&mut self) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.datasync(&self_.loop_, self_.fd, cb) - } - } - fn truncate(&mut self, offset: i64) -> Result<(), IoError> { - do self.nop_req |self_, req, cb| { - req.truncate(&self_.loop_, self_.fd, offset, cb) - } - } -} - // this function is full of lies unsafe fn local_io() -> &'static mut IoFactory { do Local::borrow |sched: &mut Scheduler| { From 5842b606a7e11c671d92aac574389e359a8172f6 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 4 Nov 2013 22:52:33 -0800 Subject: [PATCH 12/27] Migrate uv getaddrinfo away from ~fn() --- src/librustuv/addrinfo.rs | 177 +++++++++++++++----------------------- src/librustuv/uvio.rs | 35 +------- 2 files changed, 70 insertions(+), 142 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 77e70acca8d5e..36c4defdee929 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -8,41 +8,44 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cast::transmute; -use std::cell::Cell; -use std::libc::{c_int, c_void}; -use std::ptr::null; use ai = std::rt::io::net::addrinfo; +use std::cast; +use std::libc::c_int; +use std::ptr::null; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; -use uvll; -use uvll::UV_GETADDRINFO; -use super::{Loop, UvError, NativeHandle, status_to_maybe_uv_error}; use net; +use super::{Loop, UvError, NativeHandle}; +use uvll::UV_GETADDRINFO; +use uvll; -type GetAddrInfoCallback = ~fn(GetAddrInfoRequest, &net::UvAddrInfo, Option); +struct GetAddrInfoRequest { + handle: *uvll::uv_getaddrinfo_t, +} -pub struct GetAddrInfoRequest(*uvll::uv_getaddrinfo_t); +struct Addrinfo { + handle: *uvll::addrinfo, +} -pub struct RequestData { - priv getaddrinfo_cb: Option, +struct Ctx { + slot: Option, + status: c_int, + addrinfo: Option, } impl GetAddrInfoRequest { pub fn new() -> GetAddrInfoRequest { - let req = unsafe { uvll::malloc_req(UV_GETADDRINFO) }; - assert!(req.is_not_null()); - let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); - req.install_req_data(); - return req; + GetAddrInfoRequest { + handle: unsafe { uvll::malloc_req(uvll::UV_GETADDRINFO) }, + } } - pub fn getaddrinfo(&mut self, loop_: &Loop, node: Option<&str>, - service: Option<&str>, hints: Option, - cb: GetAddrInfoCallback) { - + pub fn run(loop_: &Loop, node: Option<&str>, service: Option<&str>, + hints: Option) -> Result<~[ai::Info], UvError> { assert!(node.is_some() || service.is_some()); - - let (c_node, c_node_ptr) = match node { + let (_c_node, c_node_ptr) = match node { Some(n) => { let c_node = n.to_c_str(); let c_node_ptr = c_node.with_ref(|r| r); @@ -51,7 +54,7 @@ impl GetAddrInfoRequest { None => (None, null()) }; - let (c_service, c_service_ptr) = match service { + let (_c_service, c_service_ptr) = match service { Some(s) => { let c_service = s.to_c_str(); let c_service_ptr = c_service.with_ref(|r| r); @@ -60,17 +63,6 @@ impl GetAddrInfoRequest { None => (None, null()) }; - let cb = Cell::new(cb); - let wrapper_cb: GetAddrInfoCallback = |req, addrinfo, err| { - // Capture some heap values that need to stay alive for the - // getaddrinfo call - let _ = &c_node; - let _ = &c_service; - - let cb = cb.take(); - cb(req, addrinfo, err) - }; - let hint = hints.map(|hint| { let mut flags = 0; do each_ai_flag |cval, aival| { @@ -78,19 +70,6 @@ impl GetAddrInfoRequest { flags |= cval as i32; } } - /* XXX: do we really want to support these? - let socktype = match hint.socktype { - Some(ai::Stream) => uvll::rust_SOCK_STREAM(), - Some(ai::Datagram) => uvll::rust_SOCK_DGRAM(), - Some(ai::Raw) => uvll::rust_SOCK_RAW(), - None => 0, - }; - let protocol = match hint.protocol { - Some(ai::UDP) => uvll::rust_IPPROTO_UDP(), - Some(ai::TCP) => uvll::rust_IPPROTO_TCP(), - _ => 0, - }; - */ let socktype = 0; let protocol = 0; @@ -106,66 +85,54 @@ impl GetAddrInfoRequest { } }); let hint_ptr = hint.as_ref().map_default(null(), |x| x as *uvll::addrinfo); + let req = GetAddrInfoRequest::new(); + + return match unsafe { + uvll::uv_getaddrinfo(loop_.native_handle(), req.handle, + getaddrinfo_cb, c_node_ptr, c_service_ptr, + hint_ptr) + } { + 0 => { + let mut cx = Ctx { slot: None, status: 0, addrinfo: None }; + unsafe { uvll::set_data_for_req(req.handle, &cx) } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + cx.slot = Some(task); + } - self.get_req_data().getaddrinfo_cb = Some(wrapper_cb); - - unsafe { - assert!(0 == uvll::uv_getaddrinfo(loop_.native_handle(), - self.native_handle(), - getaddrinfo_cb, - c_node_ptr, - c_service_ptr, - hint_ptr)); - } - - extern "C" fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, - status: c_int, - res: *uvll::addrinfo) { - let mut req: GetAddrInfoRequest = NativeHandle::from_native_handle(req); - let err = status_to_maybe_uv_error(status); - let addrinfo = net::UvAddrInfo(res); - let data = req.get_req_data(); - (*data.getaddrinfo_cb.get_ref())(req, &addrinfo, err); - unsafe { - uvll::uv_freeaddrinfo(res); + match cx.status { + 0 => Ok(accum_addrinfo(cx.addrinfo.get_ref())), + n => Err(UvError(n)) + } } - } - } + n => Err(UvError(n)) + }; - fn get_loop(&self) -> Loop { - unsafe { - Loop { - handle: uvll::get_loop_from_fs_req(self.native_handle()) - } - } - } - fn install_req_data(&mut self) { - let req = self.native_handle() as *uvll::uv_getaddrinfo_t; - let data = ~RequestData { - getaddrinfo_cb: None - }; - unsafe { - let data = transmute::<~RequestData, *c_void>(data); - uvll::set_data_for_req(req, data); + extern fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, + status: c_int, + res: *uvll::addrinfo) { + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_req(req)) + }; + cx.status = status; + cx.addrinfo = Some(Addrinfo { handle: res }); + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.slot.take_unwrap()); } } +} - fn get_req_data<'r>(&'r mut self) -> &'r mut RequestData { - unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let data = transmute::<&*c_void, &mut ~RequestData>(&data); - return &mut **data; - } +impl Drop for GetAddrInfoRequest { + fn drop(&mut self) { + unsafe { uvll::free_req(self.handle) } } +} - fn delete(self) { - unsafe { - let data = uvll::get_data_for_req(self.native_handle()); - let _data = transmute::<*c_void, ~RequestData>(data); - uvll::set_data_for_req(self.native_handle(), null::<()>()); - uvll::free_req(self.native_handle()); - } +impl Drop for Addrinfo { + fn drop(&mut self) { + unsafe { uvll::uv_freeaddrinfo(self.handle) } } } @@ -184,10 +151,9 @@ fn each_ai_flag(_f: &fn(c_int, ai::Flag)) { } // Traverse the addrinfo linked list, producing a vector of Rust socket addresses -pub fn accum_addrinfo(addr: &net::UvAddrInfo) -> ~[ai::Info] { +pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { unsafe { - let &net::UvAddrInfo(addr) = addr; - let mut addr = addr; + let mut addr = addr.handle; let mut addrs = ~[]; loop { @@ -235,15 +201,6 @@ pub fn accum_addrinfo(addr: &net::UvAddrInfo) -> ~[ai::Info] { } } -impl NativeHandle<*uvll::uv_getaddrinfo_t> for GetAddrInfoRequest { - fn from_native_handle(handle: *uvll::uv_getaddrinfo_t) -> GetAddrInfoRequest { - GetAddrInfoRequest(handle) - } - fn native_handle(&self) -> *uvll::uv_getaddrinfo_t { - match self { &GetAddrInfoRequest(ptr) => ptr } - } -} - #[cfg(test)] mod test { use Loop; diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index e06f8b5430eca..1c6e59d9f2eac 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -46,7 +46,7 @@ use ai = std::rt::io::net::addrinfo; use super::*; use idle::IdleWatcher; use net::{UvIpv4SocketAddr, UvIpv6SocketAddr}; -use addrinfo::{GetAddrInfoRequest, accum_addrinfo}; +use addrinfo::GetAddrInfoRequest; use pipe::PipeListener; // XXX we should not be calling uvll functions in here. @@ -351,37 +351,8 @@ impl IoFactory for UvIoFactory { fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, hint: Option) -> Result<~[ai::Info], IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let host_ptr: *Option<&str> = &host; - let servname_ptr: *Option<&str> = &servname; - let hint_ptr: *Option = &hint; - let addrinfo_req = GetAddrInfoRequest::new(); - let addrinfo_req_cell = Cell::new(addrinfo_req); - - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let mut addrinfo_req = addrinfo_req_cell.take(); - unsafe { - do addrinfo_req.getaddrinfo(self.uv_loop(), - *host_ptr, *servname_ptr, - *hint_ptr) |_, addrinfo, err| { - let res = match err { - None => Ok(accum_addrinfo(addrinfo)), - Some(err) => Err(uv_error_to_io_error(err)) - }; - (*result_cell_ptr).put_back(res); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - addrinfo_req.delete(); - assert!(!result_cell.is_empty()); - return result_cell.take(); + let r = GetAddrInfoRequest::run(self.uv_loop(), host, servname, hint); + r.map_err(uv_error_to_io_error) } fn fs_from_raw_fd(&mut self, fd: c_int, From 584b3593485ef144de7217de19ac8a98766c0532 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 00:27:41 -0800 Subject: [PATCH 13/27] Migrate uv net bindings away from ~fn() --- src/librustuv/addrinfo.rs | 32 +- src/librustuv/lib.rs | 180 ++------- src/librustuv/net.rs | 802 ++++++++++++++++++++++++-------------- src/librustuv/pipe.rs | 23 +- src/librustuv/process.rs | 11 +- src/librustuv/stream.rs | 70 ++-- src/librustuv/timer.rs | 7 +- src/librustuv/uvio.rs | 557 +------------------------- src/librustuv/uvll.rs | 3 + 9 files changed, 618 insertions(+), 1067 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 36c4defdee929..88818cf2b4d22 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -17,14 +17,9 @@ use std::rt::local::Local; use std::rt::sched::Scheduler; use net; -use super::{Loop, UvError, NativeHandle}; -use uvll::UV_GETADDRINFO; +use super::{Loop, UvError, NativeHandle, Request}; use uvll; -struct GetAddrInfoRequest { - handle: *uvll::uv_getaddrinfo_t, -} - struct Addrinfo { handle: *uvll::addrinfo, } @@ -35,13 +30,9 @@ struct Ctx { addrinfo: Option, } -impl GetAddrInfoRequest { - pub fn new() -> GetAddrInfoRequest { - GetAddrInfoRequest { - handle: unsafe { uvll::malloc_req(uvll::UV_GETADDRINFO) }, - } - } +pub struct GetAddrInfoRequest; +impl GetAddrInfoRequest { pub fn run(loop_: &Loop, node: Option<&str>, service: Option<&str>, hints: Option) -> Result<~[ai::Info], UvError> { assert!(node.is_some() || service.is_some()); @@ -85,7 +76,7 @@ impl GetAddrInfoRequest { } }); let hint_ptr = hint.as_ref().map_default(null(), |x| x as *uvll::addrinfo); - let req = GetAddrInfoRequest::new(); + let req = Request::new(uvll::UV_GETADDRINFO); return match unsafe { uvll::uv_getaddrinfo(loop_.native_handle(), req.handle, @@ -94,7 +85,8 @@ impl GetAddrInfoRequest { } { 0 => { let mut cx = Ctx { slot: None, status: 0, addrinfo: None }; - unsafe { uvll::set_data_for_req(req.handle, &cx) } + req.set_data(&cx); + req.defuse(); let scheduler: ~Scheduler = Local::take(); do scheduler.deschedule_running_task_and_then |_, task| { cx.slot = Some(task); @@ -112,9 +104,9 @@ impl GetAddrInfoRequest { extern fn getaddrinfo_cb(req: *uvll::uv_getaddrinfo_t, status: c_int, res: *uvll::addrinfo) { - let cx: &mut Ctx = unsafe { - cast::transmute(uvll::get_data_for_req(req)) - }; + let req = Request::wrap(req); + if status == uvll::ECANCELED { return } + let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; cx.status = status; cx.addrinfo = Some(Addrinfo { handle: res }); @@ -124,12 +116,6 @@ impl GetAddrInfoRequest { } } -impl Drop for GetAddrInfoRequest { - fn drop(&mut self) { - unsafe { uvll::free_req(self.handle) } - } -} - impl Drop for Addrinfo { fn drop(&mut self) { unsafe { uvll::uv_freeaddrinfo(self.handle) } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index eb2da05506d82..5e79f6e13451f 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -54,19 +54,18 @@ use std::libc::{c_void, c_int, size_t, malloc, free}; use std::cast::transmute; use std::ptr::null; use std::unstable::finally::Finally; -use std::rt::io::net::ip::SocketAddr; use std::rt::io::IoError; //#[cfg(test)] use unstable::run_in_bare_thread; pub use self::file::{FsRequest, FileWatcher}; -pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher}; +pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher}; pub use self::idle::IdleWatcher; pub use self::timer::TimerWatcher; pub use self::async::AsyncWatcher; pub use self::process::Process; -pub use self::pipe::PipeWatcher; +pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor}; pub use self::signal::SignalWatcher; pub use self::tty::TtyWatcher; @@ -97,24 +96,6 @@ pub struct Loop { priv handle: *uvll::uv_loop_t } -pub struct Handle(*uvll::uv_handle_t); - -impl Watcher for Handle {} -impl NativeHandle<*uvll::uv_handle_t> for Handle { - fn from_native_handle(h: *uvll::uv_handle_t) -> Handle { Handle(h) } - fn native_handle(&self) -> *uvll::uv_handle_t { **self } -} - -/// The trait implemented by uv 'watchers' (handles). Watchers are -/// non-owning wrappers around the uv handles and are not completely -/// safe - there may be multiple instances for a single underlying -/// handle. Watchers are generally created, then `start`ed, `stop`ed -/// and `close`ed, but due to their complex life cycle may not be -/// entirely memory safe if used in unanticipated patterns. -pub trait Watcher { } - -pub trait Request { } - /// A type that wraps a native handle pub trait NativeHandle { fn from_native_handle(T) -> Self; @@ -160,32 +141,47 @@ pub trait UvHandle { } } -pub trait UvRequest { - fn uv_request(&self) -> *T; +pub struct Request { + handle: *uvll::uv_req_t, +} - // FIXME(#8888) dummy self - fn alloc(_: Option, ty: uvll::uv_req_type) -> *T { - unsafe { - let handle = uvll::malloc_req(ty); - assert!(!handle.is_null()); - handle as *T - } +impl Request { + pub fn new(ty: uvll::uv_req_type) -> Request { + Request::wrap(unsafe { uvll::malloc_req(ty) }) } - unsafe fn from_uv_request<'a>(h: &'a *T) -> &'a mut Self { - cast::transmute(uvll::get_data_for_req(*h)) + pub fn wrap(handle: *uvll::uv_req_t) -> Request { + Request { handle: handle } } - fn install(~self) -> ~Self { - unsafe { - let myptr = cast::transmute::<&~Self, &*u8>(&self); - uvll::set_data_for_req(self.uv_request(), *myptr); - } - self + pub fn set_data(&self, t: *T) { + unsafe { uvll::set_data_for_req(self.handle, t) } } - fn delete(&mut self) { - unsafe { uvll::free_req(self.uv_request() as *c_void) } + pub fn get_data(&self) -> *c_void { + unsafe { uvll::get_data_for_req(self.handle) } + } + + // This function should be used when the request handle has been given to an + // underlying uv function, and the uv function has succeeded. This means + // that uv will at some point invoke the callback, and in the meantime we + // can't deallocate the handle because libuv could be using it. + // + // This is still a problem in blocking situations due to linked failure. In + // the connection callback the handle should be re-wrapped with the `wrap` + // function to ensure its destruction. + pub fn defuse(mut self) { + self.handle = ptr::null(); + } +} + +impl Drop for Request { + fn drop(&mut self) { + unsafe { + if self.handle != ptr::null() { + uvll::free_req(self.handle) + } + } } } @@ -214,110 +210,6 @@ impl NativeHandle<*uvll::uv_loop_t> for Loop { } } -// XXX: The uv alloc callback also has a *uv_handle_t arg -pub type AllocCallback = ~fn(uint) -> Buf; -pub type ReadCallback = ~fn(StreamWatcher, int, Buf, Option); -pub type NullCallback = ~fn(); -pub type ConnectionCallback = ~fn(StreamWatcher, Option); -pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, SocketAddr, uint, Option); -pub type UdpSendCallback = ~fn(UdpWatcher, Option); - - -/// Callbacks used by StreamWatchers, set as custom data on the foreign handle. -/// XXX: Would be better not to have all watchers allocate room for all callback types. -struct WatcherData { - read_cb: Option, - write_cb: Option, - connect_cb: Option, - close_cb: Option, - alloc_cb: Option, - udp_recv_cb: Option, - udp_send_cb: Option, -} - -pub trait WatcherInterop { - fn event_loop(&self) -> Loop; - fn install_watcher_data(&mut self); - fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData; - fn drop_watcher_data(&mut self); - fn close(self, cb: NullCallback); - fn close_async(self); -} - -impl> WatcherInterop for W { - /// Get the uv event loop from a Watcher - fn event_loop(&self) -> Loop { - unsafe { - let handle = self.native_handle(); - let loop_ = uvll::get_loop_for_uv_handle(handle); - NativeHandle::from_native_handle(loop_) - } - } - - fn install_watcher_data(&mut self) { - unsafe { - let data = ~WatcherData { - read_cb: None, - write_cb: None, - connect_cb: None, - close_cb: None, - alloc_cb: None, - udp_recv_cb: None, - udp_send_cb: None, - }; - let data = transmute::<~WatcherData, *c_void>(data); - uvll::set_data_for_uv_handle(self.native_handle(), data); - } - } - - fn get_watcher_data<'r>(&'r mut self) -> &'r mut WatcherData { - unsafe { - let data = uvll::get_data_for_uv_handle(self.native_handle()); - let data = transmute::<&*c_void, &mut ~WatcherData>(&data); - return &mut **data; - } - } - - fn drop_watcher_data(&mut self) { - unsafe { - let data = uvll::get_data_for_uv_handle(self.native_handle()); - let _data = transmute::<*c_void, ~WatcherData>(data); - uvll::set_data_for_uv_handle(self.native_handle(), null::<()>()); - } - } - - fn close(mut self, cb: NullCallback) { - { - let data = self.get_watcher_data(); - assert!(data.close_cb.is_none()); - data.close_cb = Some(cb); - } - - unsafe { - uvll::uv_close(self.native_handle() as *uvll::uv_handle_t, close_cb); - } - - extern fn close_cb(handle: *uvll::uv_handle_t) { - let mut h: Handle = NativeHandle::from_native_handle(handle); - h.get_watcher_data().close_cb.take_unwrap()(); - h.drop_watcher_data(); - unsafe { uvll::free_handle(handle as *c_void) } - } - } - - fn close_async(self) { - unsafe { - uvll::uv_close(self.native_handle() as *uvll::uv_handle_t, close_cb); - } - - extern fn close_cb(handle: *uvll::uv_handle_t) { - let mut h: Handle = NativeHandle::from_native_handle(handle); - h.drop_watcher_data(); - unsafe { uvll::free_handle(handle as *c_void) } - } - } -} - // XXX: Need to define the error constants like EOF so they can be // compared to the UvError type diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index e9f3f2bba4c5e..ef64b1e5cc545 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -8,18 +8,32 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cast; use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; -use std::vec; +use std::ptr; +use std::rt::BlockedTask; +use std::rt::io::IoError; +use std::rt::io::net::ip::{Ipv4Addr, Ipv6Addr}; +use std::rt::local::Local; +use std::rt::io::net::ip::{SocketAddr, IpAddr}; +use std::rt::rtio; +use std::rt::sched::{Scheduler, SchedHandle}; +use std::rt::tube::Tube; use std::str; -use std::rt::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; +use std::vec; use uvll; use uvll::*; -use super::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, - UdpSendCallback, Loop, Watcher, Request, UvError, Buf, NativeHandle, - status_to_maybe_uv_error, empty_buf}; +use super::{ + Loop, Request, UvError, Buf, NativeHandle, + status_to_io_result, + uv_error_to_io_error, UvHandle, slice_to_uv_buf}; +use uvio::HomingIO; +use stream::StreamWatcher; -pub struct UvAddrInfo(*uvll::addrinfo); +//////////////////////////////////////////////////////////////////////////////// +/// Generic functions related to dealing with sockaddr things +//////////////////////////////////////////////////////////////////////////////// pub enum UvSocketAddr { UvIpv4SocketAddr(*sockaddr_in), @@ -113,395 +127,585 @@ fn test_ip6_conversion() { assert_eq!(ip6, socket_addr_as_uv_socket_addr(ip6, uv_socket_addr_to_socket_addr)); } -// uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t -// and uv_file_t -pub struct StreamWatcher(*uvll::uv_stream_t); -impl Watcher for StreamWatcher { } +enum SocketNameKind { + TcpPeer, + Tcp, + Udp +} -impl StreamWatcher { - pub fn read_start(&mut self, alloc: AllocCallback, cb: ReadCallback) { - unsafe { - match uvll::uv_read_start(self.native_handle(), alloc_cb, read_cb) { - 0 => { - let data = self.get_watcher_data(); - data.alloc_cb = Some(alloc); - data.read_cb = Some(cb); - } - n => { - cb(*self, 0, empty_buf(), Some(UvError(n))) - } - } - } +fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result { + let getsockname = match sk { + TcpPeer => uvll::tcp_getpeername, + Tcp => uvll::tcp_getsockname, + Udp => uvll::udp_getsockname, + }; - extern fn alloc_cb(stream: *uvll::uv_stream_t, suggested_size: size_t) -> Buf { - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let alloc_cb = stream_watcher.get_watcher_data().alloc_cb.get_ref(); - return (*alloc_cb)(suggested_size as uint); - } + // Allocate a sockaddr_storage + // since we don't know if it's ipv4 or ipv6 + let r_addr = unsafe { uvll::malloc_sockaddr_storage() }; - extern fn read_cb(stream: *uvll::uv_stream_t, nread: ssize_t, buf: Buf) { - uvdebug!("buf addr: {}", buf.base); - uvdebug!("buf len: {}", buf.len); - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let cb = stream_watcher.get_watcher_data().read_cb.get_ref(); - let status = status_to_maybe_uv_error(nread as c_int); - (*cb)(stream_watcher, nread as int, buf, status); - } - } + let r = unsafe { + getsockname(handle, r_addr as *uvll::sockaddr_storage) + }; - pub fn read_stop(&mut self) { - // It would be nice to drop the alloc and read callbacks here, - // but read_stop may be called from inside one of them and we - // would end up freeing the in-use environment - let handle = self.native_handle(); - unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } + if r != 0 { + return Err(uv_error_to_io_error(UvError(r))); } - pub fn write(&mut self, buf: Buf, cb: ConnectionCallback) { - let req = WriteRequest::new(); - return unsafe { - match uvll::uv_write(req.native_handle(), self.native_handle(), - [buf], write_cb) { - 0 => { - let data = self.get_watcher_data(); - assert!(data.write_cb.is_none()); - data.write_cb = Some(cb); - } - n => { - req.delete(); - cb(*self, Some(UvError(n))) - } - } - }; - - extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { - let write_request: WriteRequest = NativeHandle::from_native_handle(req); - let mut stream_watcher = write_request.stream(); - write_request.delete(); - let cb = stream_watcher.get_watcher_data().write_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); + let addr = unsafe { + if uvll::is_ip6_addr(r_addr as *uvll::sockaddr) { + uv_socket_addr_to_socket_addr(UvIpv6SocketAddr(r_addr as *uvll::sockaddr_in6)) + } else { + uv_socket_addr_to_socket_addr(UvIpv4SocketAddr(r_addr as *uvll::sockaddr_in)) } - } + }; + unsafe { uvll::free_sockaddr_storage(r_addr); } - pub fn listen(&mut self, cb: ConnectionCallback) -> Result<(), UvError> { - { - let data = self.get_watcher_data(); - assert!(data.connect_cb.is_none()); - data.connect_cb = Some(cb); - } + Ok(addr) - return unsafe { - static BACKLOG: c_int = 128; // XXX should be configurable - match uvll::uv_listen(self.native_handle(), BACKLOG, connection_cb) { - 0 => Ok(()), - n => Err(UvError(n)) - } - }; +} - extern fn connection_cb(handle: *uvll::uv_stream_t, status: c_int) { - uvdebug!("connection_cb"); - let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle); - let cb = stream_watcher.get_watcher_data().connect_cb.get_ref(); - let status = status_to_maybe_uv_error(status); - (*cb)(stream_watcher, status); - } - } +//////////////////////////////////////////////////////////////////////////////// +/// TCP implementation +//////////////////////////////////////////////////////////////////////////////// - pub fn accept(&mut self, stream: StreamWatcher) { - let self_handle = self.native_handle() as *c_void; - let stream_handle = stream.native_handle() as *c_void; - assert_eq!(0, unsafe { uvll::uv_accept(self_handle, stream_handle) } ); - } +pub struct TcpWatcher { + handle: *uvll::uv_tcp_t, + stream: StreamWatcher, + home: SchedHandle, } -impl NativeHandle<*uvll::uv_stream_t> for StreamWatcher { - fn from_native_handle(handle: *uvll::uv_stream_t) -> StreamWatcher { - StreamWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_stream_t { - match self { &StreamWatcher(ptr) => ptr } - } +pub struct TcpListener { + home: SchedHandle, + handle: *uvll::uv_pipe_t, + priv closing_task: Option, + priv outgoing: Tube>, +} + +pub struct TcpAcceptor { + listener: ~TcpListener, + priv incoming: Tube>, } -pub struct TcpWatcher(*uvll::uv_tcp_t); -impl Watcher for TcpWatcher { } +// TCP watchers (clients/streams) impl TcpWatcher { pub fn new(loop_: &Loop) -> TcpWatcher { - unsafe { - let handle = malloc_handle(UV_TCP); - assert!(handle.is_not_null()); - assert_eq!(0, uvll::uv_tcp_init(loop_.native_handle(), handle)); - let mut watcher: TcpWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; + let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; + assert_eq!(unsafe { + uvll::uv_tcp_init(loop_.native_handle(), handle) + }, 0); + TcpWatcher { + home: get_handle_to_current_scheduler!(), + handle: handle, + stream: StreamWatcher::new(handle), } } - pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_bind(self.native_handle(), addr), - UvIpv6SocketAddr(addr) => uvll::tcp_bind6(self.native_handle(), addr), - } + pub fn connect(loop_: &mut Loop, address: SocketAddr) + -> Result + { + struct Ctx { status: c_int, task: Option } + + let tcp = TcpWatcher::new(loop_); + let ret = do socket_addr_as_uv_socket_addr(address) |addr| { + let req = Request::new(uvll::UV_CONNECT); + let result = match addr { + UvIpv4SocketAddr(addr) => unsafe { + uvll::tcp_connect(req.handle, tcp.handle, addr, + connect_cb) + }, + UvIpv6SocketAddr(addr) => unsafe { + uvll::tcp_connect6(req.handle, tcp.handle, addr, + connect_cb) + }, }; match result { - 0 => Ok(()), - _ => Err(UvError(result)), + 0 => { + req.defuse(); + let mut cx = Ctx { status: 0, task: None }; + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + cx.task = Some(task); + } + match cx.status { + 0 => Ok(()), + n => Err(UvError(n)), + } + } + n => Err(UvError(n)) } - } - } + }; - pub fn connect(&mut self, address: SocketAddr, cb: ConnectionCallback) { - unsafe { - assert!(self.get_watcher_data().connect_cb.is_none()); - self.get_watcher_data().connect_cb = Some(cb); - - let connect_handle = ConnectRequest::new().native_handle(); - uvdebug!("connect_t: {}", connect_handle); - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_connect(connect_handle, - self.native_handle(), addr, connect_cb), - UvIpv6SocketAddr(addr) => uvll::tcp_connect6(connect_handle, - self.native_handle(), addr, connect_cb), - }; - assert_eq!(0, result); - } + return match ret { + Ok(()) => Ok(tcp), + Err(e) => Err(e), + }; - extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { - uvdebug!("connect_t: {}", req); - let connect_request: ConnectRequest = NativeHandle::from_native_handle(req); - let mut stream_watcher = connect_request.stream(); - connect_request.delete(); - let cb = stream_watcher.get_watcher_data().connect_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(stream_watcher, status); - } + extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { + let _req = Request::wrap(req); + if status == uvll::ECANCELED { return } + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_req(req)) + }; + cx.status = status; + let scheduler: ~Scheduler = Local::take(); + scheduler.resume_blocked_task_immediately(cx.task.take_unwrap()); } } +} - pub fn as_stream(&self) -> StreamWatcher { - NativeHandle::from_native_handle(self.native_handle() as *uvll::uv_stream_t) +impl HomingIO for TcpWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl rtio::RtioSocket for TcpWatcher { + fn socket_name(&mut self) -> Result { + let _m = self.fire_missiles(); + socket_name(Tcp, self.handle) } } -impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher { - fn from_native_handle(handle: *uvll::uv_tcp_t) -> TcpWatcher { - TcpWatcher(handle) +impl rtio::RtioTcpStream for TcpWatcher { + fn read(&mut self, buf: &mut [u8]) -> Result { + let _m = self.fire_missiles(); + self.stream.read(buf).map_err(uv_error_to_io_error) } - fn native_handle(&self) -> *uvll::uv_tcp_t { - match self { &TcpWatcher(ptr) => ptr } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _m = self.fire_missiles(); + self.stream.write(buf).map_err(uv_error_to_io_error) } -} -pub struct UdpWatcher(*uvll::uv_udp_t); -impl Watcher for UdpWatcher { } + fn peer_name(&mut self) -> Result { + let _m = self.fire_missiles(); + socket_name(TcpPeer, self.handle) + } -impl UdpWatcher { - pub fn new(loop_: &Loop) -> UdpWatcher { - unsafe { - let handle = malloc_handle(UV_UDP); - assert!(handle.is_not_null()); - assert_eq!(0, uvll::uv_udp_init(loop_.native_handle(), handle)); - let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - watcher.install_watcher_data(); - return watcher; - } + fn control_congestion(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.handle, 0 as c_int) + }) } - pub fn bind(&mut self, address: SocketAddr) -> Result<(), UvError> { - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::udp_bind(self.native_handle(), addr, 0u32), - UvIpv6SocketAddr(addr) => uvll::udp_bind6(self.native_handle(), addr, 0u32), - } - }; - match result { - 0 => Ok(()), - _ => Err(UvError(result)), - } - } + fn nodelay(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_nodelay(self.handle, 1 as c_int) + }) } - pub fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { - { - let data = self.get_watcher_data(); - data.alloc_cb = Some(alloc); - data.udp_recv_cb = Some(cb); - } + fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.handle, 1 as c_int, + delay_in_seconds as c_uint) + }) + } - unsafe { uvll::uv_udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } + fn letdie(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_keepalive(self.handle, 0 as c_int, 0 as c_uint) + }) + } +} - extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { - let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let alloc_cb = udp_watcher.get_watcher_data().alloc_cb.get_ref(); - return (*alloc_cb)(suggested_size as uint); - } +impl Drop for TcpWatcher { + fn drop(&mut self) { + let _m = self.fire_missiles(); + self.stream.close(true); + } +} - extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, - addr: *uvll::sockaddr, flags: c_uint) { - // When there's no data to read the recv callback can be a no-op. - // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring - // this we just drop back to kqueue and wait for the next callback. - if nread == 0 { - return; +// TCP listeners (unbound servers) + +impl TcpListener { + pub fn bind(loop_: &mut Loop, address: SocketAddr) + -> Result<~TcpListener, UvError> + { + let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; + assert_eq!(unsafe { + uvll::uv_tcp_init(loop_.native_handle(), handle) + }, 0); + let l = ~TcpListener { + home: get_handle_to_current_scheduler!(), + handle: handle, + closing_task: None, + outgoing: Tube::new(), + }; + let res = socket_addr_as_uv_socket_addr(address, |addr| unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::tcp_bind(l.handle, addr), + UvIpv6SocketAddr(addr) => uvll::tcp_bind6(l.handle, addr), } - - uvdebug!("buf addr: {}", buf.base); - uvdebug!("buf len: {}", buf.len); - let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref(); - let status = status_to_maybe_uv_error(nread as c_int); - let addr = uv_socket_addr_to_socket_addr(sockaddr_to_UvSocketAddr(addr)); - (*cb)(udp_watcher, nread as int, buf, addr, flags as uint, status); + }); + match res { + 0 => Ok(l.install()), + n => Err(UvError(n)) } } +} + +impl HomingIO for TcpListener { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl UvHandle for TcpListener { + fn uv_handle(&self) -> *uvll::uv_tcp_t { self.handle } +} - pub fn recv_stop(&mut self) { - unsafe { uvll::uv_udp_recv_stop(self.native_handle()); } +impl rtio::RtioSocket for TcpListener { + fn socket_name(&mut self) -> Result { + let _m = self.fire_missiles(); + socket_name(Tcp, self.handle) } +} + +impl rtio::RtioTcpListener for TcpListener { + fn listen(mut ~self) -> Result<~rtio::RtioTcpAcceptor, IoError> { + // create the acceptor object from ourselves + let incoming = self.outgoing.clone(); + let mut acceptor = ~TcpAcceptor { + listener: self, + incoming: incoming, + }; - pub fn send(&mut self, buf: Buf, address: SocketAddr, cb: UdpSendCallback) { - { - let data = self.get_watcher_data(); - assert!(data.udp_send_cb.is_none()); - data.udp_send_cb = Some(cb); + let _m = acceptor.fire_missiles(); + // XXX: the 128 backlog should be configurable + match unsafe { uvll::uv_listen(acceptor.listener.handle, 128, listen_cb) } { + 0 => Ok(acceptor as ~rtio::RtioTcpAcceptor), + n => Err(uv_error_to_io_error(UvError(n))), } + } +} - let req = UdpSendRequest::new(); - do socket_addr_as_uv_socket_addr(address) |addr| { - let result = unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::udp_send(req.native_handle(), - self.native_handle(), [buf], addr, send_cb), - UvIpv6SocketAddr(addr) => uvll::udp_send6(req.native_handle(), - self.native_handle(), [buf], addr, send_cb), - } - }; - assert_eq!(0, result); +extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { + let msg = match status { + 0 => { + let loop_ = NativeHandle::from_native_handle(unsafe { + uvll::get_loop_for_uv_handle(server) + }); + let client = TcpWatcher::new(&loop_); + assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0); + Ok(~client as ~rtio::RtioTcpStream) } + uvll::ECANCELED => return, + n => Err(uv_error_to_io_error(UvError(n))) + }; - extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { - let send_request: UdpSendRequest = NativeHandle::from_native_handle(req); - let mut udp_watcher = send_request.handle(); - send_request.delete(); - let cb = udp_watcher.get_watcher_data().udp_send_cb.take_unwrap(); - let status = status_to_maybe_uv_error(status); - cb(udp_watcher, status); + let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&server) }; + tcp.outgoing.send(msg); +} + +impl Drop for TcpListener { + fn drop(&mut self) { + let (_m, sched) = self.fire_missiles_sched(); + + do sched.deschedule_running_task_and_then |_, task| { + self.closing_task = Some(task); + unsafe { uvll::uv_close(self.handle, listener_close_cb) } } } } -impl NativeHandle<*uvll::uv_udp_t> for UdpWatcher { - fn from_native_handle(handle: *uvll::uv_udp_t) -> UdpWatcher { - UdpWatcher(handle) - } - fn native_handle(&self) -> *uvll::uv_udp_t { - match self { &UdpWatcher(ptr) => ptr } - } +extern fn listener_close_cb(handle: *uvll::uv_handle_t) { + let tcp: &mut TcpListener = unsafe { UvHandle::from_uv_handle(&handle) }; + unsafe { uvll::free_handle(handle) } + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(tcp.closing_task.take_unwrap()); } -// uv_connect_t is a subclass of uv_req_t -pub struct ConnectRequest(*uvll::uv_connect_t); -impl Request for ConnectRequest { } +// TCP acceptors (bound servers) + +impl HomingIO for TcpAcceptor { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } +} -impl ConnectRequest { +impl rtio::RtioSocket for TcpAcceptor { + fn socket_name(&mut self) -> Result { + let _m = self.fire_missiles(); + socket_name(Tcp, self.listener.handle) + } +} - pub fn new() -> ConnectRequest { - let connect_handle = unsafe { malloc_req(UV_CONNECT) }; - assert!(connect_handle.is_not_null()); - ConnectRequest(connect_handle as *uvll::uv_connect_t) +impl rtio::RtioTcpAcceptor for TcpAcceptor { + fn accept(&mut self) -> Result<~rtio::RtioTcpStream, IoError> { + let _m = self.fire_missiles(); + self.incoming.recv() } - fn stream(&self) -> StreamWatcher { - unsafe { - let stream_handle = uvll::get_stream_handle_from_connect_req(self.native_handle()); - NativeHandle::from_native_handle(stream_handle) - } + fn accept_simultaneously(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 1) + }) } - fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 0) + }) } } -impl NativeHandle<*uvll::uv_connect_t> for ConnectRequest { - fn from_native_handle(handle: *uvll:: uv_connect_t) -> ConnectRequest { - ConnectRequest(handle) +//////////////////////////////////////////////////////////////////////////////// +/// UDP implementation +//////////////////////////////////////////////////////////////////////////////// + +pub struct UdpWatcher { + handle: *uvll::uv_udp_t, + home: SchedHandle, +} + +impl UdpWatcher { + pub fn bind(loop_: &Loop, address: SocketAddr) + -> Result + { + let udp = UdpWatcher { + handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, + home: get_handle_to_current_scheduler!(), + }; + assert_eq!(unsafe { + uvll::uv_udp_init(loop_.native_handle(), udp.handle) + }, 0); + let result = socket_addr_as_uv_socket_addr(address, |addr| unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::udp_bind(udp.handle, addr, 0u32), + UvIpv6SocketAddr(addr) => uvll::udp_bind6(udp.handle, addr, 0u32), + } + }); + match result { + 0 => Ok(udp), + n => Err(UvError(n)), + } } - fn native_handle(&self) -> *uvll::uv_connect_t { - match self { &ConnectRequest(ptr) => ptr } +} + +impl HomingIO for UdpWatcher { + fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } +} + +impl rtio::RtioSocket for UdpWatcher { + fn socket_name(&mut self) -> Result { + let _m = self.fire_missiles(); + socket_name(Udp, self.handle) } } -pub struct WriteRequest(*uvll::uv_write_t); +impl rtio::RtioUdpSocket for UdpWatcher { + fn recvfrom(&mut self, buf: &mut [u8]) + -> Result<(uint, SocketAddr), IoError> + { + struct Ctx { + task: Option, + buf: Option, + result: Option<(ssize_t, SocketAddr)>, + } + let _m = self.fire_missiles(); + + return match unsafe { + uvll::uv_udp_recv_start(self.handle, alloc_cb, recv_cb) + } { + 0 => { + let mut cx = Ctx { + task: None, + buf: Some(slice_to_uv_buf(buf)), + result: None, + }; + unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + cx.task = Some(task); + } + match cx.result.take_unwrap() { + (n, _) if n < 0 => + Err(uv_error_to_io_error(UvError(n as c_int))), + (n, addr) => Ok((n as uint, addr)) + } + } + n => Err(uv_error_to_io_error(UvError(n))) + }; + + extern fn alloc_cb(handle: *uvll::uv_udp_t, + _suggested_size: size_t) -> Buf { + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + cx.buf.take().expect("alloc_cb called more than once") + } + + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, _buf: Buf, + addr: *uvll::sockaddr, _flags: c_uint) { -impl Request for WriteRequest { } + // When there's no data to read the recv callback can be a no-op. + // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring + // this we just drop back to kqueue and wait for the next callback. + if nread == 0 { return } + if nread == uvll::ECANCELED as ssize_t { return } -impl WriteRequest { - pub fn new() -> WriteRequest { - let write_handle = unsafe { malloc_req(UV_WRITE) }; - assert!(write_handle.is_not_null()); - WriteRequest(write_handle as *uvll::uv_write_t) + unsafe { + assert_eq!(uvll::uv_udp_recv_stop(handle), 0) + } + + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + let addr = sockaddr_to_UvSocketAddr(addr); + let addr = uv_socket_addr_to_socket_addr(addr); + cx.result = Some((nread, addr)); + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); + } } - pub fn stream(&self) -> StreamWatcher { - unsafe { - let stream_handle = uvll::get_stream_handle_from_write_req(self.native_handle()); - NativeHandle::from_native_handle(stream_handle) + fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { + struct Ctx { task: Option, result: c_int } + + let _m = self.fire_missiles(); + + let req = Request::new(uvll::UV_UDP_SEND); + let buf = slice_to_uv_buf(buf); + let result = socket_addr_as_uv_socket_addr(dst, |dst| unsafe { + match dst { + UvIpv4SocketAddr(dst) => + uvll::udp_send(req.handle, self.handle, [buf], dst, send_cb), + UvIpv6SocketAddr(dst) => + uvll::udp_send6(req.handle, self.handle, [buf], dst, send_cb), + } + }); + + return match result { + 0 => { + let mut cx = Ctx { task: None, result: 0 }; + req.set_data(&cx); + req.defuse(); + + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + cx.task = Some(task); + } + + match cx.result { + 0 => Ok(()), + n => Err(uv_error_to_io_error(UvError(n))) + } + } + n => Err(uv_error_to_io_error(UvError(n))) + }; + + extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { + let req = Request::wrap(req); + let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; + cx.result = status; + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); } } - pub fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.handle, + m_addr, ptr::null(), + uvll::UV_JOIN_GROUP) + } + }) } -} -impl NativeHandle<*uvll::uv_write_t> for WriteRequest { - fn from_native_handle(handle: *uvll:: uv_write_t) -> WriteRequest { - WriteRequest(handle) + fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + do multi.to_str().with_c_str |m_addr| { + uvll::uv_udp_set_membership(self.handle, + m_addr, ptr::null(), + uvll::UV_LEAVE_GROUP) + } + }) } - fn native_handle(&self) -> *uvll::uv_write_t { - match self { &WriteRequest(ptr) => ptr } + + fn loop_multicast_locally(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.handle, + 1 as c_int) + }) } -} -pub struct UdpSendRequest(*uvll::uv_udp_send_t); -impl Request for UdpSendRequest { } + fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_loop(self.handle, + 0 as c_int) + }) + } -impl UdpSendRequest { - pub fn new() -> UdpSendRequest { - let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; - assert!(send_handle.is_not_null()); - UdpSendRequest(send_handle as *uvll::uv_udp_send_t) + fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_multicast_ttl(self.handle, + ttl as c_int) + }) } - pub fn handle(&self) -> UdpWatcher { - let send_request_handle = unsafe { - uvll::get_udp_handle_from_send_req(self.native_handle()) - }; - NativeHandle::from_native_handle(send_request_handle) + fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_ttl(self.handle, ttl as c_int) + }) } - pub fn delete(self) { - unsafe { free_req(self.native_handle() as *c_void) } + fn hear_broadcasts(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.handle, + 1 as c_int) + }) } -} -impl NativeHandle<*uvll::uv_udp_send_t> for UdpSendRequest { - fn from_native_handle(handle: *uvll::uv_udp_send_t) -> UdpSendRequest { - UdpSendRequest(handle) + fn ignore_broadcasts(&mut self) -> Result<(), IoError> { + let _m = self.fire_missiles(); + status_to_io_result(unsafe { + uvll::uv_udp_set_broadcast(self.handle, + 0 as c_int) + }) } - fn native_handle(&self) -> *uvll::uv_udp_send_t { - match self { &UdpSendRequest(ptr) => ptr } +} + +impl Drop for UdpWatcher { + fn drop(&mut self) { + // Send ourselves home to close this handle (blocking while doing so). + let (_m, sched) = self.fire_missiles_sched(); + let mut slot = None; + unsafe { + uvll::set_data_for_uv_handle(self.handle, &slot); + uvll::uv_close(self.handle, close_cb); + } + do sched.deschedule_running_task_and_then |_, task| { + slot = Some(task); + } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + let slot: &mut Option = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(slot.take_unwrap()); + } } } +//////////////////////////////////////////////////////////////////////////////// +/// UV request support +//////////////////////////////////////////////////////////////////////////////// + #[cfg(test)] mod test { use super::*; diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index a857308a81b35..2a41dd9efe19b 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -19,7 +19,7 @@ use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use stream::StreamWatcher; -use super::{Loop, UvError, NativeHandle, uv_error_to_io_error, UvHandle}; +use super::{Loop, UvError, NativeHandle, uv_error_to_io_error, UvHandle, Request}; use uvio::HomingIO; use uvll; @@ -79,23 +79,26 @@ impl PipeWatcher { result: Option>, } let mut cx = Ctx { task: None, result: None }; - let req = unsafe { uvll::malloc_req(uvll::UV_CONNECT) }; - unsafe { uvll::set_data_for_req(req, &cx as *Ctx) } + let req = Request::new(uvll::UV_CONNECT); + unsafe { + uvll::set_data_for_req(req.handle, &cx as *Ctx); + uvll::uv_pipe_connect(req.handle, + PipeWatcher::alloc(loop_, false), + name.with_ref(|p| p), + connect_cb) + } + req.defuse(); let sched: ~Scheduler = Local::take(); do sched.deschedule_running_task_and_then |_, task| { cx.task = Some(task); - unsafe { - uvll::uv_pipe_connect(req, - PipeWatcher::alloc(loop_, false), - name.with_ref(|p| p), - connect_cb) - } } assert!(cx.task.is_none()); return cx.result.take().expect("pipe connect needs a result"); extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { + let _req = Request::wrap(req); + if status == uvll::ECANCELED { return } unsafe { let cx: &mut Ctx = cast::transmute(uvll::get_data_for_req(req)); let stream = uvll::get_stream_handle_from_connect_req(req); @@ -106,7 +109,6 @@ impl PipeWatcher { Err(UvError(n)) } }); - uvll::free_req(req); let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(cx.task.take_unwrap()); @@ -201,6 +203,7 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { assert_eq!(unsafe { uvll::uv_accept(server, client) }, 0); Ok(~PipeWatcher::new(client) as ~RtioPipe) } + uvll::ECANCELED => return, n => Err(uv_error_to_io_error(UvError(n))) }; diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 50964d7a84c63..7b44c350f13de 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::Cell; use std::libc::c_int; use std::libc; use std::ptr; @@ -58,8 +57,7 @@ impl Process { } } - let ret_io = Cell::new(ret_io); - do with_argv(config.program, config.args) |argv| { + let ret = do with_argv(config.program, config.args) |argv| { do with_env(config.env) |envp| { let options = uvll::uv_process_options_t { exit_cb: on_exit, @@ -89,7 +87,7 @@ impl Process { exit_status: None, term_signal: None, }; - Ok((process.install(), ret_io.take())) + Ok(process.install()) } err => { unsafe { uvll::free_handle(handle) } @@ -97,6 +95,11 @@ impl Process { } } } + }; + + match ret { + Ok(p) => Ok((p, ret_io)), + Err(e) => Err(e), } } } diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index ad0deebd45711..01bc02a50be90 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -15,7 +15,7 @@ use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::sched::Scheduler; -use super::{UvError, Buf, slice_to_uv_buf}; +use super::{UvError, Buf, slice_to_uv_buf, Request}; use uvll; // This is a helper structure which is intended to get embedded into other @@ -29,17 +29,17 @@ pub struct StreamWatcher { // every call to uv_write(). Ideally this would be a stack-allocated // structure, but currently we don't have mappings for all the structures // defined in libuv, so we're foced to malloc this. - priv last_write_req: Option<*uvll::uv_write_t>, + priv last_write_req: Option, } struct ReadContext { buf: Option, - result: Option>, + result: ssize_t, task: Option, } struct WriteContext { - result: Option>, + result: c_int, task: Option, } @@ -72,7 +72,7 @@ impl StreamWatcher { 0 => { let mut rcx = ReadContext { buf: Some(slice_to_uv_buf(buf)), - result: None, + result: 0, task: None, }; unsafe { @@ -82,7 +82,10 @@ impl StreamWatcher { do scheduler.deschedule_running_task_and_then |_sched, task| { rcx.task = Some(task); } - rcx.result.take().expect("no result in read stream?") + match rcx.result { + n if n < 0 => Err(UvError(n as c_int)), + n => Ok(n as uint), + } } n => Err(UvError(n)) } @@ -91,27 +94,29 @@ impl StreamWatcher { pub fn write(&mut self, buf: &[u8]) -> Result<(), UvError> { // Prepare the write request, either using a cached one or allocating a // new one - let req = match self.last_write_req { - Some(req) => req, - None => unsafe { uvll::malloc_req(uvll::UV_WRITE) }, - }; - self.last_write_req = Some(req); - let mut wcx = WriteContext { result: None, task: None, }; - unsafe { uvll::set_data_for_req(req, &wcx as *WriteContext) } + if self.last_write_req.is_none() { + self.last_write_req = Some(Request::new(uvll::UV_WRITE)); + } + let req = self.last_write_req.get_ref(); // Send off the request, but be careful to not block until we're sure // that the write reqeust is queued. If the reqeust couldn't be queued, // then we should return immediately with an error. match unsafe { - uvll::uv_write(req, self.handle, [slice_to_uv_buf(buf)], write_cb) + uvll::uv_write(req.handle, self.handle, [slice_to_uv_buf(buf)], + write_cb) } { 0 => { + let mut wcx = WriteContext { result: 0, task: None, }; + req.set_data(&wcx); let scheduler: ~Scheduler = Local::take(); do scheduler.deschedule_running_task_and_then |_sched, task| { wcx.task = Some(task); } - assert!(wcx.task.is_none()); - wcx.result.take().expect("no result in write stream?") + match wcx.result { + 0 => Ok(()), + n => Err(UvError(n)), + } } n => Err(UvError(n)), } @@ -124,12 +129,6 @@ impl StreamWatcher { // synchronously (the task is blocked) or asynchronously (the task is not // block, but the handle is still deallocated). pub fn close(&mut self, synchronous: bool) { - // clean up the cached write request if we have one - match self.last_write_req { - Some(req) => unsafe { uvll::free_req(req) }, - None => {} - } - if synchronous { let mut closing_task = None; unsafe { @@ -186,31 +185,24 @@ extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: Buf) { // XXX: Is there a performance impact to calling // stop here? unsafe { assert_eq!(uvll::uv_read_stop(handle), 0); } + rcx.result = nread; - assert!(rcx.result.is_none()); - rcx.result = Some(match nread { - n if n < 0 => Err(UvError(n as c_int)), - n => Ok(n as uint), - }); - - let task = rcx.task.take().expect("read_cb needs a task"); let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task); + scheduler.resume_blocked_task_immediately(rcx.task.take_unwrap()); } // Unlike reading, the WriteContext is stored in the uv_write_t request. Like // reading, however, all this does is wake up the blocked task after squirreling // away the error code as a result. extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { + if status == uvll::ECANCELED { return } // Remember to not free the request because it is re-used between writes on // the same stream. - unsafe { - let wcx: &mut WriteContext = cast::transmute(uvll::get_data_for_req(req)); - wcx.result = Some(match status { - 0 => Ok(()), - n => Err(UvError(n)), - }); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); - } + let req = Request::wrap(req); + let wcx: &mut WriteContext = unsafe { cast::transmute(req.get_data()) }; + wcx.result = status; + + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); + req.defuse(); } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 1732e84be4e70..46731993bc7b6 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::Cell; use std::comm::{oneshot, stream, PortOne, ChanOne, SendDeferred}; use std::libc::c_int; use std::rt::BlockedTask; @@ -77,10 +76,9 @@ impl RtioTimer for TimerWatcher { fn oneshot(&mut self, msecs: u64) -> PortOne<()> { let (port, chan) = oneshot(); - let chan = Cell::new(chan); let _m = self.fire_missiles(); - self.action = Some(SendOnce(chan.take())); + self.action = Some(SendOnce(chan)); self.start(msecs, 0); return port; @@ -88,10 +86,9 @@ impl RtioTimer for TimerWatcher { fn period(&mut self, msecs: u64) -> Port<()> { let (port, chan) = stream(); - let chan = Cell::new(chan); let _m = self.fire_missiles(); - self.action = Some(SendMany(chan.take())); + self.action = Some(SendMany(chan)); self.start(msecs, msecs); return port; diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 1c6e59d9f2eac..d0a160ba8ce9e 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -11,22 +11,17 @@ use std::c_str::CString; use std::cast::transmute; use std::cast; -use std::cell::Cell; -use std::clone::Clone; use std::comm::{SharedChan, GenericChan}; use std::libc; -use std::libc::{c_int, c_uint, c_void}; -use std::ptr; +use std::libc::c_int; use std::str; use std::rt::io; use std::rt::io::IoError; -use std::rt::io::net::ip::{SocketAddr, IpAddr}; -use std::rt::io::{standard_error, OtherIoError}; +use std::rt::io::net::ip::SocketAddr; use std::rt::io::process::ProcessConfig; use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; -use std::rt::tube::Tube; use std::rt::task::Task; use std::path::Path; use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, @@ -45,9 +40,7 @@ use ai = std::rt::io::net::addrinfo; use super::*; use idle::IdleWatcher; -use net::{UvIpv4SocketAddr, UvIpv6SocketAddr}; use addrinfo::GetAddrInfoRequest; -use pipe::PipeListener; // XXX we should not be calling uvll functions in here. @@ -137,47 +130,6 @@ impl Drop for HomingMissile { } } -enum SocketNameKind { - TcpPeer, - Tcp, - Udp -} - -fn socket_name>(sk: SocketNameKind, - handle: U) -> Result { - let getsockname = match sk { - TcpPeer => uvll::tcp_getpeername, - Tcp => uvll::tcp_getsockname, - Udp => uvll::udp_getsockname, - }; - - // Allocate a sockaddr_storage - // since we don't know if it's ipv4 or ipv6 - let r_addr = unsafe { uvll::malloc_sockaddr_storage() }; - - let r = unsafe { - getsockname(handle.native_handle() as *c_void, r_addr as *uvll::sockaddr_storage) - }; - - if r != 0 { - let status = status_to_maybe_uv_error(r); - return Err(uv_error_to_io_error(status.unwrap())); - } - - let addr = unsafe { - if uvll::is_ip6_addr(r_addr as *uvll::sockaddr) { - net::uv_socket_addr_to_socket_addr(UvIpv6SocketAddr(r_addr as *uvll::sockaddr_in6)) - } else { - net::uv_socket_addr_to_socket_addr(UvIpv4SocketAddr(r_addr as *uvll::sockaddr_in)) - } - }; - - unsafe { uvll::free_sockaddr_storage(r_addr); } - - Ok(addr) - -} - // Obviously an Event Loop is always home. pub struct UvEventLoop { priv uvio: UvIoFactory @@ -251,97 +203,26 @@ impl IoFactory for UvIoFactory { // Connect to an address and return a new stream // NB: This blocks the task waiting on the connection. // It would probably be better to return a future - fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError> { - // Create a cell in the task to hold the result. We will fill - // the cell before resuming the task. - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - - // Block this task and take ownership, switch to scheduler context - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - - let mut tcp = TcpWatcher::new(self.uv_loop()); - let task_cell = Cell::new(task); - - // Wait for a connection - do tcp.connect(addr) |stream, status| { - match status { - None => { - let tcp = NativeHandle::from_native_handle(stream.native_handle()); - let home = get_handle_to_current_scheduler!(); - let res = Ok(~UvTcpStream { watcher: tcp, home: home } - as ~RtioTcpStream); - - // Store the stream in the task's stack - unsafe { (*result_cell_ptr).put_back(res); } - - // Context switch - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - Some(_) => { - let task_cell = Cell::new(task_cell.take()); - do stream.close { - let res = Err(uv_error_to_io_error(status.unwrap())); - unsafe { (*result_cell_ptr).put_back(res); } - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } - } - } + fn tcp_connect(&mut self, addr: SocketAddr) + -> Result<~RtioTcpStream, IoError> + { + match TcpWatcher::connect(self.uv_loop(), addr) { + Ok(t) => Ok(~t as ~RtioTcpStream), + Err(e) => Err(uv_error_to_io_error(e)), } - - assert!(!result_cell.is_empty()); - return result_cell.take(); } fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError> { - let mut watcher = TcpWatcher::new(self.uv_loop()); - match watcher.bind(addr) { - Ok(_) => { - let home = get_handle_to_current_scheduler!(); - Ok(~UvTcpListener::new(watcher, home) as ~RtioTcpListener) - } - Err(uverr) => { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - Err(uv_error_to_io_error(uverr)) - } - } + match TcpListener::bind(self.uv_loop(), addr) { + Ok(t) => Ok(t as ~RtioTcpListener), + Err(e) => Err(uv_error_to_io_error(e)), } } fn udp_bind(&mut self, addr: SocketAddr) -> Result<~RtioUdpSocket, IoError> { - let mut watcher = UdpWatcher::new(self.uv_loop()); - match watcher.bind(addr) { - Ok(_) => { - let home = get_handle_to_current_scheduler!(); - Ok(~UvUdpSocket { watcher: watcher, home: home } as ~RtioUdpSocket) - } - Err(uverr) => { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - Err(uv_error_to_io_error(uverr)) - } - } + match UdpWatcher::bind(self.uv_loop(), addr) { + Ok(u) => Ok(~u as ~RtioUdpSocket), + Err(e) => Err(uv_error_to_io_error(e)), } } @@ -487,416 +368,6 @@ impl IoFactory for UvIoFactory { } } -pub struct UvTcpListener { - priv watcher : TcpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTcpListener { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl UvTcpListener { - fn new(watcher: TcpWatcher, home: SchedHandle) -> UvTcpListener { - UvTcpListener { watcher: watcher, home: home } - } -} - -impl Drop for UvTcpListener { - fn drop(&mut self) { - let (_m, sched) = self.fire_homing_missile_sched(); - do sched.deschedule_running_task_and_then |_, task| { - let task = Cell::new(task); - do self.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task.take()); - } - } - } -} - -impl RtioSocket for UvTcpListener { - fn socket_name(&mut self) -> Result { - let _m = self.fire_homing_missile(); - socket_name(Tcp, self.watcher) - } -} - -impl RtioTcpListener for UvTcpListener { - fn listen(mut ~self) -> Result<~RtioTcpAcceptor, IoError> { - let _m = self.fire_homing_missile(); - let acceptor = ~UvTcpAcceptor::new(*self); - let incoming = Cell::new(acceptor.incoming.clone()); - let mut stream = acceptor.listener.watcher.as_stream(); - let res = do stream.listen |mut server, status| { - do incoming.with_mut_ref |incoming| { - let inc = match status { - Some(_) => Err(standard_error(OtherIoError)), - None => { - let inc = TcpWatcher::new(&server.event_loop()); - // first accept call in the callback guarenteed to succeed - server.accept(inc.as_stream()); - let home = get_handle_to_current_scheduler!(); - Ok(~UvTcpStream { watcher: inc, home: home } - as ~RtioTcpStream) - } - }; - incoming.send(inc); - } - }; - match res { - Ok(()) => Ok(acceptor as ~RtioTcpAcceptor), - Err(e) => Err(uv_error_to_io_error(e)), - } - } -} - -pub struct UvTcpAcceptor { - priv listener: UvTcpListener, - priv incoming: Tube>, -} - -impl HomingIO for UvTcpAcceptor { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } -} - -impl UvTcpAcceptor { - fn new(listener: UvTcpListener) -> UvTcpAcceptor { - UvTcpAcceptor { listener: listener, incoming: Tube::new() } - } -} - -impl RtioSocket for UvTcpAcceptor { - fn socket_name(&mut self) -> Result { - let _m = self.fire_homing_missile(); - socket_name(Tcp, self.listener.watcher) - } -} - -fn accept_simultaneously(stream: StreamWatcher, a: int) -> Result<(), IoError> { - let r = unsafe { - uvll::uv_tcp_simultaneous_accepts(stream.native_handle(), a as c_int) - }; - status_to_io_result(r) -} - -impl RtioTcpAcceptor for UvTcpAcceptor { - fn accept(&mut self) -> Result<~RtioTcpStream, IoError> { - let _m = self.fire_homing_missile(); - self.incoming.recv() - } - - fn accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - accept_simultaneously(self.listener.watcher.as_stream(), 1) - } - - fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - accept_simultaneously(self.listener.watcher.as_stream(), 0) - } -} - -fn read_stream(mut watcher: StreamWatcher, - scheduler: ~Scheduler, - buf: &mut [u8]) -> Result { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - - let uv_buf = slice_to_uv_buf(buf); - do scheduler.deschedule_running_task_and_then |_sched, task| { - let task_cell = Cell::new(task); - // XXX: We shouldn't reallocate these callbacks every - // call to read - let alloc: AllocCallback = |_| uv_buf; - do watcher.read_start(alloc) |mut watcher, nread, _buf, status| { - - // Stop reading so that no read callbacks are - // triggered before the user calls `read` again. - // XXX: Is there a performance impact to calling - // stop here? - watcher.read_stop(); - - let result = if status.is_none() { - assert!(nread >= 0); - Ok(nread as uint) - } else { - Err(uv_error_to_io_error(status.unwrap())) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() -} - -fn write_stream(mut watcher: StreamWatcher, - scheduler: ~Scheduler, - buf: &[u8]) -> Result<(), IoError> { - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.write(buf) |_watcher, status| { - let result = if status.is_none() { - Ok(()) - } else { - Err(uv_error_to_io_error(status.unwrap())) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() -} - -pub struct UvTcpStream { - priv watcher: TcpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvTcpStream { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvTcpStream { - fn drop(&mut self) { - let (_m, sched) = self.fire_homing_missile_sched(); - do sched.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self.watcher.as_stream().close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } -} - -impl RtioSocket for UvTcpStream { - fn socket_name(&mut self) -> Result { - let _m = self.fire_homing_missile(); - socket_name(Tcp, self.watcher) - } -} - -impl RtioTcpStream for UvTcpStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let (_m, scheduler) = self.fire_homing_missile_sched(); - read_stream(self.watcher.as_stream(), scheduler, buf) - } - - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let (_m, scheduler) = self.fire_homing_missile_sched(); - write_stream(self.watcher.as_stream(), scheduler, buf) - } - - fn peer_name(&mut self) -> Result { - let _m = self.fire_homing_missile(); - socket_name(TcpPeer, self.watcher) - } - - fn control_congestion(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_tcp_nodelay(self.watcher.native_handle(), 0 as c_int) - }) - } - - fn nodelay(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_tcp_nodelay(self.watcher.native_handle(), 1 as c_int) - }) - } - - fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_tcp_keepalive(self.watcher.native_handle(), 1 as c_int, - delay_in_seconds as c_uint) - }) - } - - fn letdie(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_tcp_keepalive(self.watcher.native_handle(), - 0 as c_int, 0 as c_uint) - }) - } -} - -pub struct UvUdpSocket { - priv watcher: UdpWatcher, - priv home: SchedHandle, -} - -impl HomingIO for UvUdpSocket { - fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } -} - -impl Drop for UvUdpSocket { - fn drop(&mut self) { - let (_m, scheduler) = self.fire_homing_missile_sched(); - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - do self.watcher.close { - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - } -} - -impl RtioSocket for UvUdpSocket { - fn socket_name(&mut self) -> Result { - let _m = self.fire_homing_missile(); - socket_name(Udp, self.watcher) - } -} - -impl RtioUdpSocket for UvUdpSocket { - fn recvfrom(&mut self, buf: &mut [u8]) -> Result<(uint, SocketAddr), IoError> { - let (_m, scheduler) = self.fire_homing_missile_sched(); - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - - let buf_ptr: *&mut [u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; - do self.watcher.recv_start(alloc) |mut watcher, nread, _buf, addr, flags, status| { - let _ = flags; // /XXX add handling for partials? - - watcher.recv_stop(); - - let result = match status { - None => { - assert!(nread >= 0); - Ok((nread as uint, addr)) - } - Some(err) => Err(uv_error_to_io_error(err)), - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() - } - - fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { - let (_m, scheduler) = self.fire_homing_missile_sched(); - let result_cell = Cell::new_empty(); - let result_cell_ptr: *Cell> = &result_cell; - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell::new(task); - let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do self.watcher.send(buf, dst) |_watcher, status| { - - let result = match status { - None => Ok(()), - Some(err) => Err(uv_error_to_io_error(err)), - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - result_cell.take() - } - - fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::uv_udp_set_membership(self.watcher.native_handle(), - m_addr, ptr::null(), - uvll::UV_JOIN_GROUP) - } - }) - } - - fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - do multi.to_str().with_c_str |m_addr| { - uvll::uv_udp_set_membership(self.watcher.native_handle(), - m_addr, ptr::null(), - uvll::UV_LEAVE_GROUP) - } - }) - } - - fn loop_multicast_locally(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_multicast_loop(self.watcher.native_handle(), - 1 as c_int) - }) - } - - fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_multicast_loop(self.watcher.native_handle(), - 0 as c_int) - }) - } - - fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_multicast_ttl(self.watcher.native_handle(), - ttl as c_int) - }) - } - - fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_ttl(self.watcher.native_handle(), ttl as c_int) - }) - } - - fn hear_broadcasts(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_broadcast(self.watcher.native_handle(), - 1 as c_int) - }) - } - - fn ignore_broadcasts(&mut self) -> Result<(), IoError> { - let _m = self.fire_homing_missile(); - status_to_io_result(unsafe { - uvll::uv_udp_set_broadcast(self.watcher.native_handle(), - 0 as c_int) - }) - } -} - // this function is full of lies unsafe fn local_io() -> &'static mut IoFactory { do Local::borrow |sched: &mut Scheduler| { diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index a32f03732d664..42e0f58d87d41 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -53,6 +53,7 @@ pub mod errors { pub static ENOTCONN: c_int = -4054; pub static EPIPE: c_int = -4048; pub static ECONNABORTED: c_int = -4080; + pub static ECANCELED: c_int = -4082; } #[cfg(not(windows))] pub mod errors { @@ -65,6 +66,7 @@ pub mod errors { pub static ENOTCONN: c_int = -libc::ENOTCONN; pub static EPIPE: c_int = -libc::EPIPE; pub static ECONNABORTED: c_int = -libc::ECONNABORTED; + pub static ECANCELED : c_int = -libc::ECANCELED; } pub static PROCESS_SETUID: c_int = 1 << 0; @@ -127,6 +129,7 @@ pub struct uv_stdio_container_t { } pub type uv_handle_t = c_void; +pub type uv_req_t = c_void; pub type uv_loop_t = c_void; pub type uv_idle_t = c_void; pub type uv_tcp_t = c_void; From aa78c3d6f6b25a0e54f815cd8db765000763e48a Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 11:29:45 -0800 Subject: [PATCH 14/27] Clean up the remaining chunks of uv --- src/librustuv/addrinfo.rs | 4 +-- src/librustuv/async.rs | 2 +- src/librustuv/file.rs | 42 ++++++++++++++++---------------- src/librustuv/idle.rs | 4 +-- src/librustuv/lib.rs | 45 ++++++++++++---------------------- src/librustuv/net.rs | 51 ++++++++++++++++++--------------------- src/librustuv/pipe.rs | 6 ++--- src/librustuv/process.rs | 4 +-- src/librustuv/signal.rs | 3 ++- src/librustuv/stream.rs | 2 +- src/librustuv/timer.rs | 4 +-- src/librustuv/tty.rs | 2 +- src/librustuv/uvio.rs | 13 ++++------ src/libstd/logging.rs | 16 ++++++------ src/libstd/rt/logging.rs | 12 ++++----- src/libstd/rt/macros.rs | 7 +++--- src/libstd/rt/task.rs | 9 ++++--- 17 files changed, 105 insertions(+), 121 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 88818cf2b4d22..965e97893b640 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -17,7 +17,7 @@ use std::rt::local::Local; use std::rt::sched::Scheduler; use net; -use super::{Loop, UvError, NativeHandle, Request}; +use super::{Loop, UvError, Request}; use uvll; struct Addrinfo { @@ -79,7 +79,7 @@ impl GetAddrInfoRequest { let req = Request::new(uvll::UV_GETADDRINFO); return match unsafe { - uvll::uv_getaddrinfo(loop_.native_handle(), req.handle, + uvll::uv_getaddrinfo(loop_.handle, req.handle, getaddrinfo_cb, c_node_ptr, c_service_ptr, hint_ptr) } { diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 0b93e8fa49fcb..f4c7f633ee264 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -35,7 +35,7 @@ impl AsyncWatcher { pub fn new(loop_: &mut Loop, cb: ~Callback) -> AsyncWatcher { let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); assert_eq!(unsafe { - uvll::uv_async_init(loop_.native_handle(), handle, async_cb) + uvll::uv_async_init(loop_.handle, handle, async_cb) }, 0); let flag = Exclusive::new(false); let payload = ~Payload { callback: cb, exit_flag: flag.clone() }; diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 1994c0a541998..e042b7744be7a 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -22,7 +22,7 @@ use std::rt::local::Local; use std::rt::sched::{Scheduler, SchedHandle}; use std::vec; -use super::{NativeHandle, Loop, UvError, uv_error_to_io_error}; +use super::{Loop, UvError, uv_error_to_io_error}; use uvio::HomingIO; use uvll; @@ -43,7 +43,7 @@ impl FsRequest { -> Result { execute(|req, cb| unsafe { - uvll::uv_fs_open(loop_.native_handle(), + uvll::uv_fs_open(loop_.handle, req, path.with_ref(|p| p), flags as c_int, mode as c_int, cb) }).map(|req| @@ -54,21 +54,21 @@ impl FsRequest { pub fn unlink(loop_: &Loop, path: &CString) -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_unlink(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_unlink(loop_.handle, req, path.with_ref(|p| p), cb) }) } pub fn lstat(loop_: &Loop, path: &CString) -> Result { execute(|req, cb| unsafe { - uvll::uv_fs_lstat(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_lstat(loop_.handle, req, path.with_ref(|p| p), cb) }).map(|req| req.mkstat()) } pub fn stat(loop_: &Loop, path: &CString) -> Result { execute(|req, cb| unsafe { - uvll::uv_fs_stat(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_stat(loop_.handle, req, path.with_ref(|p| p), cb) }).map(|req| req.mkstat()) } @@ -77,7 +77,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_write(loop_.native_handle(), req, + uvll::uv_fs_write(loop_.handle, req, fd, vec::raw::to_ptr(buf) as *c_void, buf.len() as c_uint, offset, cb) }) @@ -87,7 +87,7 @@ impl FsRequest { -> Result { do execute(|req, cb| unsafe { - uvll::uv_fs_read(loop_.native_handle(), req, + uvll::uv_fs_read(loop_.handle, req, fd, vec::raw::to_ptr(buf) as *c_void, buf.len() as c_uint, offset, cb) }).map |req| { @@ -98,12 +98,12 @@ impl FsRequest { pub fn close(loop_: &Loop, fd: c_int, sync: bool) -> Result<(), UvError> { if sync { execute_nop(|req, cb| unsafe { - uvll::uv_fs_close(loop_.native_handle(), req, fd, cb) + uvll::uv_fs_close(loop_.handle, req, fd, cb) }) } else { unsafe { let req = uvll::malloc_req(uvll::UV_FS); - uvll::uv_fs_close(loop_.native_handle(), req, fd, close_cb); + uvll::uv_fs_close(loop_.handle, req, fd, close_cb); return Ok(()); } @@ -120,14 +120,14 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_mkdir(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_mkdir(loop_.handle, req, path.with_ref(|p| p), mode, cb) }) } pub fn rmdir(loop_: &Loop, path: &CString) -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_rmdir(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_rmdir(loop_.handle, req, path.with_ref(|p| p), cb) }) } @@ -136,7 +136,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_rename(loop_.native_handle(), + uvll::uv_fs_rename(loop_.handle, req, path.with_ref(|p| p), to.with_ref(|p| p), @@ -148,7 +148,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_chmod(loop_.native_handle(), req, path.with_ref(|p| p), + uvll::uv_fs_chmod(loop_.handle, req, path.with_ref(|p| p), mode, cb) }) } @@ -157,7 +157,7 @@ impl FsRequest { -> Result<~[Path], UvError> { execute(|req, cb| unsafe { - uvll::uv_fs_readdir(loop_.native_handle(), + uvll::uv_fs_readdir(loop_.handle, req, path.with_ref(|p| p), flags, cb) }).map(|req| unsafe { let mut paths = ~[]; @@ -174,7 +174,7 @@ impl FsRequest { pub fn readlink(loop_: &Loop, path: &CString) -> Result { do execute(|req, cb| unsafe { - uvll::uv_fs_readlink(loop_.native_handle(), req, + uvll::uv_fs_readlink(loop_.handle, req, path.with_ref(|p| p), cb) }).map |req| { Path::new(unsafe { @@ -187,7 +187,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_chown(loop_.native_handle(), + uvll::uv_fs_chown(loop_.handle, req, path.with_ref(|p| p), uid as uvll::uv_uid_t, gid as uvll::uv_gid_t, @@ -199,7 +199,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_ftruncate(loop_.native_handle(), req, file, offset, cb) + uvll::uv_fs_ftruncate(loop_.handle, req, file, offset, cb) }) } @@ -207,7 +207,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_link(loop_.native_handle(), req, + uvll::uv_fs_link(loop_.handle, req, src.with_ref(|p| p), dst.with_ref(|p| p), cb) @@ -218,7 +218,7 @@ impl FsRequest { -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_symlink(loop_.native_handle(), req, + uvll::uv_fs_symlink(loop_.handle, req, src.with_ref(|p| p), dst.with_ref(|p| p), 0, cb) @@ -227,13 +227,13 @@ impl FsRequest { pub fn fsync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_fsync(loop_.native_handle(), req, fd, cb) + uvll::uv_fs_fsync(loop_.handle, req, fd, cb) }) } pub fn datasync(loop_: &Loop, fd: c_int) -> Result<(), UvError> { execute_nop(|req, cb| unsafe { - uvll::uv_fs_fdatasync(loop_.native_handle(), req, fd, cb) + uvll::uv_fs_fdatasync(loop_.handle, req, fd, cb) }) } diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index e3cc6ec90a1b1..f4072c7c6813b 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -26,7 +26,7 @@ impl IdleWatcher { pub fn new(loop_: &mut Loop) -> ~IdleWatcher { let handle = UvHandle::alloc(None::, uvll::UV_IDLE); assert_eq!(unsafe { - uvll::uv_idle_init(loop_.native_handle(), handle) + uvll::uv_idle_init(loop_.handle, handle) }, 0); let me = ~IdleWatcher { handle: handle, @@ -40,7 +40,7 @@ impl IdleWatcher { pub fn onetime(loop_: &mut Loop, f: proc()) { let handle = UvHandle::alloc(None::, uvll::UV_IDLE); unsafe { - assert_eq!(uvll::uv_idle_init(loop_.native_handle(), handle), 0); + assert_eq!(uvll::uv_idle_init(loop_.handle, handle), 0); let data: *c_void = cast::transmute(~f); uvll::set_data_for_uv_handle(handle, data); assert_eq!(uvll::uv_idle_start(handle, onetime_cb), 0) diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 5e79f6e13451f..1afc9b1d0ea67 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -59,14 +59,14 @@ use std::rt::io::IoError; //#[cfg(test)] use unstable::run_in_bare_thread; +pub use self::async::AsyncWatcher; pub use self::file::{FsRequest, FileWatcher}; -pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher}; pub use self::idle::IdleWatcher; -pub use self::timer::TimerWatcher; -pub use self::async::AsyncWatcher; -pub use self::process::Process; +pub use self::net::{TcpWatcher, TcpListener, TcpAcceptor, UdpWatcher}; pub use self::pipe::{PipeWatcher, PipeListener, PipeAcceptor}; +pub use self::process::Process; pub use self::signal::SignalWatcher; +pub use self::timer::TimerWatcher; pub use self::tty::TtyWatcher; mod macros; @@ -89,19 +89,6 @@ pub mod tty; pub mod signal; pub mod stream; -/// XXX: Loop(*handle) is buggy with destructors. Normal structs -/// with dtors may not be destructured, but tuple structs can, -/// but the results are not correct. -pub struct Loop { - priv handle: *uvll::uv_loop_t -} - -/// A type that wraps a native handle -pub trait NativeHandle { - fn from_native_handle(T) -> Self; - fn native_handle(&self) -> T; -} - /// A type that wraps a uv handle pub trait UvHandle { fn uv_handle(&self) -> *T; @@ -185,28 +172,28 @@ impl Drop for Request { } } +/// XXX: Loop(*handle) is buggy with destructors. Normal structs +/// with dtors may not be destructured, but tuple structs can, +/// but the results are not correct. +pub struct Loop { + priv handle: *uvll::uv_loop_t +} + impl Loop { pub fn new() -> Loop { let handle = unsafe { uvll::loop_new() }; assert!(handle.is_not_null()); - NativeHandle::from_native_handle(handle) + Loop::wrap(handle) } + pub fn wrap(handle: *uvll::uv_loop_t) -> Loop { Loop { handle: handle } } + pub fn run(&mut self) { - unsafe { uvll::uv_run(self.native_handle(), uvll::RUN_DEFAULT) }; + unsafe { uvll::uv_run(self.handle, uvll::RUN_DEFAULT) }; } pub fn close(&mut self) { - unsafe { uvll::uv_loop_delete(self.native_handle()) }; - } -} - -impl NativeHandle<*uvll::uv_loop_t> for Loop { - fn from_native_handle(handle: *uvll::uv_loop_t) -> Loop { - Loop { handle: handle } - } - fn native_handle(&self) -> *uvll::uv_loop_t { - self.handle + unsafe { uvll::uv_loop_delete(self.handle) }; } } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index ef64b1e5cc545..01847a19304f6 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -13,40 +13,38 @@ use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; use std::ptr; use std::rt::BlockedTask; use std::rt::io::IoError; -use std::rt::io::net::ip::{Ipv4Addr, Ipv6Addr}; +use std::rt::io::net::ip::{Ipv4Addr, Ipv6Addr, SocketAddr, IpAddr}; use std::rt::local::Local; -use std::rt::io::net::ip::{SocketAddr, IpAddr}; use std::rt::rtio; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use std::str; use std::vec; -use uvll; -use uvll::*; -use super::{ - Loop, Request, UvError, Buf, NativeHandle, - status_to_io_result, +use stream::StreamWatcher; +use super::{Loop, Request, UvError, Buf, status_to_io_result, uv_error_to_io_error, UvHandle, slice_to_uv_buf}; use uvio::HomingIO; -use stream::StreamWatcher; +use uvll; //////////////////////////////////////////////////////////////////////////////// /// Generic functions related to dealing with sockaddr things //////////////////////////////////////////////////////////////////////////////// pub enum UvSocketAddr { - UvIpv4SocketAddr(*sockaddr_in), - UvIpv6SocketAddr(*sockaddr_in6), + UvIpv4SocketAddr(*uvll::sockaddr_in), + UvIpv6SocketAddr(*uvll::sockaddr_in6), } pub fn sockaddr_to_UvSocketAddr(addr: *uvll::sockaddr) -> UvSocketAddr { unsafe { - assert!((is_ip4_addr(addr) || is_ip6_addr(addr))); - assert!(!(is_ip4_addr(addr) && is_ip6_addr(addr))); + assert!((uvll::is_ip4_addr(addr) || uvll::is_ip6_addr(addr))); + assert!(!(uvll::is_ip4_addr(addr) && uvll::is_ip6_addr(addr))); match addr { - _ if is_ip4_addr(addr) => UvIpv4SocketAddr(addr as *uvll::sockaddr_in), - _ if is_ip6_addr(addr) => UvIpv6SocketAddr(addr as *uvll::sockaddr_in6), + _ if uvll::is_ip4_addr(addr) => + UvIpv4SocketAddr(addr as *uvll::sockaddr_in), + _ if uvll::is_ip6_addr(addr) => + UvIpv6SocketAddr(addr as *uvll::sockaddr_in6), _ => fail!(), } } @@ -54,16 +52,16 @@ pub fn sockaddr_to_UvSocketAddr(addr: *uvll::sockaddr) -> UvSocketAddr { fn socket_addr_as_uv_socket_addr(addr: SocketAddr, f: &fn(UvSocketAddr) -> T) -> T { let malloc = match addr.ip { - Ipv4Addr(*) => malloc_ip4_addr, - Ipv6Addr(*) => malloc_ip6_addr, + Ipv4Addr(*) => uvll::malloc_ip4_addr, + Ipv6Addr(*) => uvll::malloc_ip6_addr, }; let wrap = match addr.ip { Ipv4Addr(*) => UvIpv4SocketAddr, Ipv6Addr(*) => UvIpv6SocketAddr, }; let free = match addr.ip { - Ipv4Addr(*) => free_ip4_addr, - Ipv6Addr(*) => free_ip6_addr, + Ipv4Addr(*) => uvll::free_ip4_addr, + Ipv6Addr(*) => uvll::free_ip6_addr, }; let addr = unsafe { malloc(addr.ip.to_str(), addr.port as int) }; @@ -194,7 +192,7 @@ impl TcpWatcher { pub fn new(loop_: &Loop) -> TcpWatcher { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { - uvll::uv_tcp_init(loop_.native_handle(), handle) + uvll::uv_tcp_init(loop_.handle, handle) }, 0); TcpWatcher { home: get_handle_to_current_scheduler!(), @@ -223,8 +221,9 @@ impl TcpWatcher { }; match result { 0 => { - req.defuse(); let mut cx = Ctx { status: 0, task: None }; + req.set_data(&cx); + req.defuse(); let scheduler: ~Scheduler = Local::take(); do scheduler.deschedule_running_task_and_then |_, task| { cx.task = Some(task); @@ -244,11 +243,9 @@ impl TcpWatcher { }; extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { - let _req = Request::wrap(req); + let req = Request::wrap(req); if status == uvll::ECANCELED { return } - let cx: &mut Ctx = unsafe { - cast::transmute(uvll::get_data_for_req(req)) - }; + let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; cx.status = status; let scheduler: ~Scheduler = Local::take(); scheduler.resume_blocked_task_immediately(cx.task.take_unwrap()); @@ -328,7 +325,7 @@ impl TcpListener { { let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; assert_eq!(unsafe { - uvll::uv_tcp_init(loop_.native_handle(), handle) + uvll::uv_tcp_init(loop_.handle, handle) }, 0); let l = ~TcpListener { home: get_handle_to_current_scheduler!(), @@ -385,7 +382,7 @@ impl rtio::RtioTcpListener for TcpListener { extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { let msg = match status { 0 => { - let loop_ = NativeHandle::from_native_handle(unsafe { + let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); let client = TcpWatcher::new(&loop_); @@ -471,7 +468,7 @@ impl UdpWatcher { home: get_handle_to_current_scheduler!(), }; assert_eq!(unsafe { - uvll::uv_udp_init(loop_.native_handle(), udp.handle) + uvll::uv_udp_init(loop_.handle, udp.handle) }, 0); let result = socket_addr_as_uv_socket_addr(address, |addr| unsafe { match addr { diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 2a41dd9efe19b..e1cb8464114c2 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -19,7 +19,7 @@ use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use stream::StreamWatcher; -use super::{Loop, UvError, NativeHandle, uv_error_to_io_error, UvHandle, Request}; +use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error}; use uvio::HomingIO; use uvll; @@ -55,7 +55,7 @@ impl PipeWatcher { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); assert!(!handle.is_null()); let ipc = ipc as libc::c_int; - assert_eq!(uvll::uv_pipe_init(loop_.native_handle(), handle, ipc), 0); + assert_eq!(uvll::uv_pipe_init(loop_.handle, handle, ipc), 0); handle } } @@ -196,7 +196,7 @@ impl UvHandle for PipeListener { extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { let msg = match status { 0 => { - let loop_ = NativeHandle::from_native_handle(unsafe { + let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); let client = PipeWatcher::alloc(&loop_, false); diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 7b44c350f13de..20af8e212216a 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -19,7 +19,7 @@ use std::rt::rtio::RtioProcess; use std::rt::sched::{Scheduler, SchedHandle}; use std::vec; -use super::{Loop, NativeHandle, UvHandle, UvError, uv_error_to_io_error}; +use super::{Loop, UvHandle, UvError, uv_error_to_io_error}; use uvio::HomingIO; use uvll; use pipe::PipeWatcher; @@ -77,7 +77,7 @@ impl Process { let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); match unsafe { - uvll::uv_spawn(loop_.native_handle(), handle, options) + uvll::uv_spawn(loop_.handle, handle, options) } { 0 => { let process = ~Process { diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index d8ecc25db6d1d..3c5efe63f96df 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -32,7 +32,8 @@ impl SignalWatcher { channel: SharedChan) -> Result<~SignalWatcher, UvError> { let handle = UvHandle::alloc(None::, uvll::UV_SIGNAL); assert_eq!(unsafe { - uvll::uv_signal_init(loop_.native_handle(), handle) + uvll::uv_signal_init(loop_.handle, handle) + }, 0); match unsafe { uvll::uv_signal_start(handle, signal_cb, signum as c_int) } { diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index 01bc02a50be90..4958ca4838e7f 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -201,8 +201,8 @@ extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { let req = Request::wrap(req); let wcx: &mut WriteContext = unsafe { cast::transmute(req.get_data()) }; wcx.result = status; + req.defuse(); let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(wcx.task.take_unwrap()); - req.defuse(); } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 46731993bc7b6..18b05073e8306 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -16,7 +16,7 @@ use std::rt::rtio::RtioTimer; use std::rt::sched::{Scheduler, SchedHandle}; use uvll; -use super::{Loop, NativeHandle, UvHandle}; +use super::{Loop, UvHandle}; use uvio::HomingIO; pub struct TimerWatcher { @@ -35,7 +35,7 @@ impl TimerWatcher { pub fn new(loop_: &mut Loop) -> ~TimerWatcher { let handle = UvHandle::alloc(None::, uvll::UV_TIMER); assert_eq!(unsafe { - uvll::uv_timer_init(loop_.native_handle(), handle) + uvll::uv_timer_init(loop_.handle, handle) }, 0); let me = ~TimerWatcher { handle: handle, diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 316a817354db1..9d84f785f25da 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -33,7 +33,7 @@ impl TtyWatcher { let handle = UvHandle::alloc(None::, uvll::UV_TTY); match unsafe { - uvll::uv_tty_init(loop_.native_handle(), handle, fd as libc::c_int, + uvll::uv_tty_init(loop_.handle, handle, fd as libc::c_int, readable as libc::c_int) } { 0 => { diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index d0a160ba8ce9e..740943c4de5d8 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -12,18 +12,18 @@ use std::c_str::CString; use std::cast::transmute; use std::cast; use std::comm::{SharedChan, GenericChan}; -use std::libc; use std::libc::c_int; -use std::str; -use std::rt::io; +use std::libc; +use std::path::Path; use std::rt::io::IoError; use std::rt::io::net::ip::SocketAddr; use std::rt::io::process::ProcessConfig; +use std::rt::io; use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::task::Task; -use std::path::Path; +use std::str; use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR, S_IWUSR}; use std::rt::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, @@ -39,11 +39,8 @@ use ai = std::rt::io::net::addrinfo; #[cfg(test)] use std::rt::comm::oneshot; use super::*; -use idle::IdleWatcher; use addrinfo::GetAddrInfoRequest; -// XXX we should not be calling uvll functions in here. - pub trait HomingIO { fn home<'r>(&'r mut self) -> &'r mut SchedHandle; @@ -238,7 +235,7 @@ impl IoFactory for UvIoFactory { fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior) -> ~RtioFileStream { - let loop_ = Loop {handle: self.uv_loop().native_handle()}; + let loop_ = Loop::wrap(self.uv_loop().handle); ~FileWatcher::new(loop_, fd, close) as ~RtioFileStream } diff --git a/src/libstd/logging.rs b/src/libstd/logging.rs index 35a3ca3cff05d..1c464110ce051 100644 --- a/src/libstd/logging.rs +++ b/src/libstd/logging.rs @@ -107,14 +107,16 @@ pub fn log(_level: u32, args: &fmt::Arguments) { let optional_task: Option<*mut Task> = Local::try_unsafe_borrow(); match optional_task { Some(local) => { - // Use the available logger - (*local).logger.log(args); - } - None => { - // There is no logger anywhere, just write to stderr - let mut logger = StdErrLogger::new(); - logger.log(args); + match (*local).logger { + // Use the available logger if we have one + Some(ref mut logger) => return logger.log(args), + None => {} + } } + None => {} } + // There is no logger anywhere, just write to stderr + let mut logger = StdErrLogger::new(); + logger.log(args); } } diff --git a/src/libstd/rt/logging.rs b/src/libstd/rt/logging.rs index cb66d6f6199ae..c37195a7b1553 100644 --- a/src/libstd/rt/logging.rs +++ b/src/libstd/rt/logging.rs @@ -172,20 +172,18 @@ pub trait Logger { /// This logger emits output to the stderr of the process, and contains a lazily /// initialized event-loop driven handle to the stream. pub struct StdErrLogger { - priv handle: Option>, + priv handle: LineBufferedWriter, } impl StdErrLogger { - pub fn new() -> StdErrLogger { StdErrLogger { handle: None } } + pub fn new() -> StdErrLogger { + StdErrLogger { handle: LineBufferedWriter::new(io::stderr()) } + } } impl Logger for StdErrLogger { fn log(&mut self, args: &fmt::Arguments) { - // First time logging? Get a handle to the stderr of this process. - if self.handle.is_none() { - self.handle = Some(LineBufferedWriter::new(io::stderr())); - } - fmt::writeln(self.handle.get_mut_ref() as &mut io::Writer, args); + fmt::writeln(&mut self.handle as &mut io::Writer, args); } } diff --git a/src/libstd/rt/macros.rs b/src/libstd/rt/macros.rs index c6ff3427c15f6..2c89bfd8c764f 100644 --- a/src/libstd/rt/macros.rs +++ b/src/libstd/rt/macros.rs @@ -34,7 +34,7 @@ macro_rules! rtassert ( ( $arg:expr ) => ( { if ::rt::util::ENFORCE_SANITY { if !$arg { - rtabort!("assertion failed: {}", stringify!($arg)); + rtabort!(" assertion failed: {}", stringify!($arg)); } } } ) @@ -42,7 +42,8 @@ macro_rules! rtassert ( macro_rules! rtabort ( - ($($msg:tt)*) => ( { - ::rt::util::abort(format!($($msg)*)); + ($msg:expr $($arg:tt)*) => ( { + ::rt::util::abort(format!(concat!(file!(), ":", line!(), " ", $msg) + $($arg)*)); } ) ) diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index cf7c291d189e4..7e374fc602138 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -50,7 +50,7 @@ pub struct Task { heap: LocalHeap, priv gc: GarbageCollector, storage: LocalStorage, - logger: StdErrLogger, + logger: Option, unwinder: Unwinder, taskgroup: Option, death: Death, @@ -180,7 +180,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, death: Death::new(), @@ -215,7 +215,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, death: Death::new(), @@ -238,7 +238,7 @@ impl Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: StdErrLogger::new(), + logger: None, unwinder: Unwinder { unwinding: false, cause: None }, taskgroup: None, // FIXME(#7544) make watching optional @@ -320,6 +320,7 @@ impl Task { } None => {} } + self.logger.take(); } } From 497d63f0bcc6436ae5a5f824946caca8f6d6fb53 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 15:30:42 -0800 Subject: [PATCH 15/27] Don't overflow in a converting stat times to u64 Closes #10297 --- src/librustuv/file.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index e042b7744be7a..5cbf2d0e2b74b 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -256,7 +256,9 @@ impl FsRequest { let path = unsafe { Path::new(CString::new(path, false)) }; let stat = self.get_stat(); fn to_msec(stat: uvll::uv_timespec_t) -> u64 { - (stat.tv_sec * 1000 + stat.tv_nsec / 1000000) as u64 + // Be sure to cast to u64 first to prevent overflowing if the tv_sec + // field is a 32-bit integer. + (stat.tv_sec as u64) * 1000 + (stat.tv_nsec as u64) / 1000000 } let kind = match (stat.st_mode as c_int) & libc::S_IFMT { libc::S_IFREG => io::TypeFile, From f9abd998d6a5368c54162deb0bf187e94e31dc27 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 15:48:27 -0800 Subject: [PATCH 16/27] Add bindings to uv's utime function This exposes the ability to change the modification and access times on a file. Closes #10266 --- src/librustuv/file.rs | 10 ++++++++++ src/librustuv/uvio.rs | 6 ++++++ src/librustuv/uvll.rs | 5 ++++- src/libstd/rt/io/fs.rs | 44 +++++++++++++++++++++++++++++++++++++++-- src/libstd/rt/io/mod.rs | 5 +++-- src/libstd/rt/rtio.rs | 2 ++ 6 files changed, 67 insertions(+), 5 deletions(-) diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 5cbf2d0e2b74b..45f4125d79202 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -237,6 +237,16 @@ impl FsRequest { }) } + pub fn utime(loop_: &Loop, path: &CString, atime: u64, mtime: u64) + -> Result<(), UvError> + { + execute_nop(|req, cb| unsafe { + uvll::uv_fs_utime(loop_.handle, req, path.with_ref(|p| p), + atime as libc::c_double, mtime as libc::c_double, + cb) + }) + } + pub fn get_result(&self) -> c_int { unsafe { uvll::get_result_from_fs_req(self.req) } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 740943c4de5d8..2aac43072dd01 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -313,6 +313,12 @@ impl IoFactory for UvIoFactory { let r = FsRequest::readlink(self.uv_loop(), path); r.map_err(uv_error_to_io_error) } + fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64) + -> Result<(), IoError> + { + let r = FsRequest::utime(self.uv_loop(), path, atime, mtime); + r.map_err(uv_error_to_io_error) + } fn spawn(&mut self, config: ProcessConfig) -> Result<(~RtioProcess, ~[Option<~RtioPipe>]), IoError> diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 42e0f58d87d41..09a1f8f37bdfa 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -29,7 +29,7 @@ #[allow(non_camel_case_types)]; // C types -use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; +use std::libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t, c_double}; use std::libc::ssize_t; use std::libc::{malloc, free}; use std::libc; @@ -824,6 +824,9 @@ externfn!(fn uv_fs_symlink(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, dst: *c_char, flags: c_int, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_rename(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, dst: *c_char, cb: uv_fs_cb) -> c_int) +externfn!(fn uv_fs_utime(handle: *uv_loop_t, req: *uv_fs_t, path: *c_char, + atime: c_double, mtime: c_double, + cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_link(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, dst: *c_char, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_chown(handle: *uv_loop_t, req: *uv_fs_t, src: *c_char, diff --git a/src/libstd/rt/io/fs.rs b/src/libstd/rt/io/fs.rs index 22d7ea55f3b45..f9e622b1f1e96 100644 --- a/src/libstd/rt/io/fs.rs +++ b/src/libstd/rt/io/fs.rs @@ -587,6 +587,21 @@ pub fn rmdir_recursive(path: &Path) { rmdir(path); } +/// Changes the timestamps for a file's last modification and access time. +/// The file at the path specified will have its last access time set to +/// `atime` and its modification time set to `mtime`. +/// +/// # Errors +/// +/// This function will raise on the `io_error` condition if an error +/// happens. +// FIXME(#10301) these arguments should not be u64 +pub fn change_file_times(path: &Path, atime: u64, mtime: u64) { + do io_raise |io| { + io.fs_utime(&path.to_c_str(), atime, mtime) + }; +} + impl Reader for File { fn read(&mut self, buf: &mut [u8]) -> Option { match self.fd.read(buf) { @@ -704,8 +719,8 @@ mod test { use rt::io; use str; use super::{File, rmdir, mkdir, readdir, rmdir_recursive, mkdir_recursive, - copy, unlink, stat, symlink, link, readlink, chmod, chown, - lstat}; + copy, unlink, stat, symlink, link, readlink, chmod, + lstat, change_file_times}; fn tmpdir() -> Path { use os; @@ -1244,4 +1259,29 @@ mod test { rmdir_recursive(&tmpdir); } + + #[test] + fn utime() { + let tmpdir = tmpdir(); + let path = tmpdir.join("a"); + File::create(&path); + + change_file_times(&path, 100, 200); + assert_eq!(path.stat().accessed, 100); + assert_eq!(path.stat().modified, 200); + + rmdir_recursive(&tmpdir); + } + + #[test] + fn utime_noexist() { + let tmpdir = tmpdir(); + + match io::result(|| change_file_times(&tmpdir.join("a"), 100, 200)) { + Ok(*) => fail!(), + Err(*) => {} + } + + rmdir_recursive(&tmpdir); + } } diff --git a/src/libstd/rt/io/mod.rs b/src/libstd/rt/io/mod.rs index f01ce5012eb25..e8ab4670233e3 100644 --- a/src/libstd/rt/io/mod.rs +++ b/src/libstd/rt/io/mod.rs @@ -1142,8 +1142,9 @@ pub struct FileStat { /// The file permissions currently on the file perm: FilePermission, - // XXX: These time fields are pretty useless without an actual time - // representation, what are the milliseconds relative to? + // FIXME(#10301): These time fields are pretty useless without an actual + // time representation, what are the milliseconds relative + // to? /// The time that the file was created at, in platform-dependent /// milliseconds diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index f8b87abb9f677..96ba512345614 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -125,6 +125,8 @@ pub trait IoFactory { fn fs_readlink(&mut self, path: &CString) -> Result; fn fs_symlink(&mut self, src: &CString, dst: &CString) -> Result<(), IoError>; fn fs_link(&mut self, src: &CString, dst: &CString) -> Result<(), IoError>; + fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) -> + Result<(), IoError>; // misc fn timer_init(&mut self) -> Result<~RtioTimer, IoError>; From 1bdaea827ed957ce404fffee27923e9606584ce0 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 19:14:17 -0800 Subject: [PATCH 17/27] Migrate all streams to synchronous closing --- src/librustuv/net.rs | 2 +- src/librustuv/pipe.rs | 2 +- src/librustuv/stream.rs | 35 +++++++++++------------------------ src/librustuv/tty.rs | 7 +------ 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 01847a19304f6..28c2c4df12a0e 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -313,7 +313,7 @@ impl rtio::RtioTcpStream for TcpWatcher { impl Drop for TcpWatcher { fn drop(&mut self) { let _m = self.fire_missiles(); - self.stream.close(true); + self.stream.close(); } } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index e1cb8464114c2..f79043797aedb 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -136,7 +136,7 @@ impl HomingIO for PipeWatcher { impl Drop for PipeWatcher { fn drop(&mut self) { let _m = self.fire_missiles(); - self.stream.close(true); // close synchronously + self.stream.close(); } } diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index 4958ca4838e7f..745cb5a6fa090 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -10,7 +10,6 @@ use std::cast; use std::libc::{c_int, size_t, ssize_t, c_void}; -use std::ptr; use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::sched::Scheduler; @@ -124,35 +123,23 @@ impl StreamWatcher { // This will deallocate an internally used memory, along with closing the // handle (and freeing it). - // - // The `synchronous` flag dictates whether this handle is closed - // synchronously (the task is blocked) or asynchronously (the task is not - // block, but the handle is still deallocated). - pub fn close(&mut self, synchronous: bool) { - if synchronous { - let mut closing_task = None; - unsafe { - uvll::set_data_for_uv_handle(self.handle, &closing_task); - } + pub fn close(&mut self) { + let mut closing_task = None; + unsafe { + uvll::set_data_for_uv_handle(self.handle, &closing_task); + } - // Wait for this stream to close because it possibly represents a remote - // connection which may have consequences if we close asynchronously. - let sched: ~Scheduler = Local::take(); - do sched.deschedule_running_task_and_then |_, task| { - closing_task = Some(task); - unsafe { uvll::uv_close(self.handle, close_cb) } - } - } else { - unsafe { - uvll::set_data_for_uv_handle(self.handle, ptr::null::()); - uvll::uv_close(self.handle, close_cb) - } + // Wait for this stream to close because it possibly represents a remote + // connection which may have consequences if we close asynchronously. + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + closing_task = Some(task); + unsafe { uvll::uv_close(self.handle, close_cb) } } extern fn close_cb(handle: *uvll::uv_handle_t) { let data: *c_void = unsafe { uvll::get_data_for_uv_handle(handle) }; unsafe { uvll::free_handle(handle) } - if data.is_null() { return } let closing_task: &mut Option = unsafe { cast::transmute(data) diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 9d84f785f25da..b1bc378e61766 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -101,13 +101,8 @@ impl HomingIO for TtyWatcher { } impl Drop for TtyWatcher { - // TTY handles are used for the logger in a task, so this destructor is run - // when a task is destroyed. When a task is being destroyed, a local - // scheduler isn't available, so we can't do the normal "take the scheduler - // and resume once close is done". Instead close operations on a TTY are - // asynchronous. fn drop(&mut self) { let _m = self.fire_missiles(); - self.stream.close(false); + self.stream.close(); } } From 0df8b0057c8beaf108dfb34e53829b74fd2a7abc Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 19:16:48 -0800 Subject: [PATCH 18/27] Work around bugs in 32-bit enum FFI cc #10308 --- src/librustuv/tty.rs | 2 +- src/librustuv/uvll.rs | 7 ++++++- src/rt/rust_uv.cpp | 5 +++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index b1bc378e61766..e224806cec1e6 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -88,7 +88,7 @@ impl RtioTTY for TtyWatcher { } fn isatty(&self) -> bool { - unsafe { uvll::uv_guess_handle(self.fd) == uvll::UV_TTY } + unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as libc::c_int } } } diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 09a1f8f37bdfa..d009201e8409a 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -671,6 +671,11 @@ pub unsafe fn populate_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t) { rust_uv_populate_uv_stat(req_in, stat_out) } +pub unsafe fn guess_handle(handle: c_int) -> c_int { + #[fixed_stack_segment]; #[inline(never)]; + + rust_uv_guess_handle(handle) +} // uv_support is the result of compiling rust_uv.cpp @@ -728,6 +733,7 @@ extern { fn rust_set_stdio_container_stream(c: *uv_stdio_container_t, stream: *uv_stream_t); fn rust_uv_process_pid(p: *uv_process_t) -> c_int; + fn rust_uv_guess_handle(fd: c_int) -> c_int; } // generic uv functions @@ -861,7 +867,6 @@ externfn!(fn uv_tty_init(l: *uv_loop_t, tty: *uv_tty_t, fd: c_int, externfn!(fn uv_tty_set_mode(tty: *uv_tty_t, mode: c_int) -> c_int) externfn!(fn uv_tty_get_winsize(tty: *uv_tty_t, width: *c_int, height: *c_int) -> c_int) -externfn!(fn uv_guess_handle(fd: c_int) -> uv_handle_type) // signals externfn!(fn uv_signal_init(loop_: *uv_loop_t, handle: *uv_signal_t) -> c_int) diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 2745c6062e6e4..6f619431ad711 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -334,3 +334,8 @@ extern "C" int rust_uv_process_pid(uv_process_t* p) { return p->pid; } + +extern "C" int +rust_uv_guess_handle(int fd) { + return uv_guess_handle(fd); +} From d08aadcc9aff233165a063df73c6436e0969f79c Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 6 Nov 2013 11:03:11 -0800 Subject: [PATCH 19/27] Update all uv tests to pass again --- src/librustuv/addrinfo.rs | 31 +- src/librustuv/async.rs | 76 ++-- src/librustuv/file.rs | 367 ++++++------------- src/librustuv/lib.rs | 94 +++-- src/librustuv/net.rs | 739 ++++++++++++++++++++++++-------------- src/librustuv/timer.rs | 110 ++---- src/librustuv/uvio.rs | 630 -------------------------------- 7 files changed, 706 insertions(+), 1341 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 965e97893b640..d5bfd729eb56a 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -189,28 +189,27 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { #[cfg(test)] mod test { - use Loop; use std::rt::io::net::ip::{SocketAddr, Ipv4Addr}; use super::*; + use super::super::run_uv_loop; #[test] fn getaddrinfo_test() { - let mut loop_ = Loop::new(); - let mut req = GetAddrInfoRequest::new(); - do req.getaddrinfo(&loop_, Some("localhost"), None, None) |_, addrinfo, _| { - let sockaddrs = accum_addrinfo(addrinfo); - let mut found_local = false; - let local_addr = &SocketAddr { - ip: Ipv4Addr(127, 0, 0, 1), - port: 0 - }; - for addr in sockaddrs.iter() { - found_local = found_local || addr.address == *local_addr; + do run_uv_loop |l| { + match GetAddrInfoRequest::run(l, Some("localhost"), None, None) { + Ok(infos) => { + let mut found_local = false; + let local_addr = &SocketAddr { + ip: Ipv4Addr(127, 0, 0, 1), + port: 0 + }; + for addr in infos.iter() { + found_local = found_local || addr.address == *local_addr; + } + assert!(found_local); + } + Err(e) => fail!("{:?}", e), } - assert!(found_local); } - loop_.run(); - loop_.close(); - req.delete(); } } diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index f4c7f633ee264..334e154a397f4 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -126,62 +126,56 @@ impl Drop for AsyncWatcher { #[cfg(test)] mod test_remote { use std::cell::Cell; - use std::rt::test::*; + use std::rt::rtio::Callback; use std::rt::thread::Thread; use std::rt::tube::Tube; - use std::rt::rtio::EventLoop; - use std::rt::local::Local; - use std::rt::sched::Scheduler; + use super::*; + use super::super::run_uv_loop; + + // Make sure that we can fire watchers in remote threads #[test] fn test_uv_remote() { - do run_in_mt_newsched_task { - let mut tube = Tube::new(); - let tube_clone = tube.clone(); - let remote_cell = Cell::new_empty(); - do Local::borrow |sched: &mut Scheduler| { - let tube_clone = tube_clone.clone(); - let tube_clone_cell = Cell::new(tube_clone); - let remote = do sched.event_loop.remote_callback { - // This could be called multiple times - if !tube_clone_cell.is_empty() { - tube_clone_cell.take().send(1); - } - }; - remote_cell.put_back(remote); + struct MyCallback(Option>); + impl Callback for MyCallback { + fn call(&mut self) { + // this can get called more than once, but we only want to send + // once + if self.is_some() { + self.take_unwrap().send(1); + } } + } + + do run_uv_loop |l| { + let mut tube = Tube::new(); + let cb = ~MyCallback(Some(tube.clone())); + let watcher = Cell::new(AsyncWatcher::new(l, cb as ~Callback)); + let thread = do Thread::start { - remote_cell.take().fire(); + watcher.take().fire(); }; - assert!(tube.recv() == 1); + assert_eq!(tube.recv(), 1); thread.join(); } } -} - -#[cfg(test)] -mod test { - - use super::*; - use Loop; - use std::unstable::run_in_bare_thread; - use std::rt::thread::Thread; - use std::cell::Cell; #[test] fn smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let watcher = AsyncWatcher::new(&mut loop_, |w, _| w.close(||()) ); - let watcher_cell = Cell::new(watcher); - let thread = do Thread::start { - let mut watcher = watcher_cell.take(); - watcher.send(); - }; - loop_.run(); - loop_.close(); - thread.join(); + static mut hits: uint = 0; + + struct MyCallback; + impl Callback for MyCallback { + fn call(&mut self) { + unsafe { hits += 1; } + } + } + + do run_uv_loop |l| { + let mut watcher = AsyncWatcher::new(l, ~MyCallback as ~Callback); + watcher.fire(); } + assert!(unsafe { hits > 0 }); } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 45f4125d79202..3b4760e0ff4e1 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -455,297 +455,136 @@ impl rtio::RtioFileStream for FileWatcher { #[cfg(test)] mod test { - use super::*; - //use std::rt::test::*; - use std::libc::{STDOUT_FILENO, c_int}; - use std::vec; - use std::str; - use std::unstable::run_in_bare_thread; - use super::super::{Loop, Buf, slice_to_uv_buf}; + use std::libc::c_int; use std::libc::{O_CREAT, O_RDWR, O_RDONLY, S_IWUSR, S_IRUSR}; - - #[test] - fn file_test_full_simple() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let create_flags = O_RDWR | O_CREAT; - let read_flags = O_RDONLY; - // 0644 BZZT! WRONG! 0600! See below. - let mode = S_IWUSR |S_IRUSR; - // these aren't defined in std::libc :( - //map_mode(S_IRGRP) | - //map_mode(S_IROTH); - let path_str = "./tmp/file_full_simple.txt"; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); - let write_buf_ptr: *Buf = &write_buf; - let read_buf_len = 1028; - let read_mem = vec::from_elem(read_buf_len, 0u8); - let read_buf = slice_to_uv_buf(read_mem); - let read_buf_ptr: *Buf = &read_buf; - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path_str.to_c_str(), create_flags as int, - mode as int) |req, uverr| { - assert!(uverr.is_none()); - let fd = req.get_result(); - let buf = unsafe { *write_buf_ptr }; - let write_req = FsRequest::new(); - do write_req.write(&req.get_loop(), fd, buf, -1) |req, uverr| { - let close_req = FsRequest::new(); - do close_req.close(&req.get_loop(), fd) |req, _| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path_str.to_c_str(), - read_flags as int,0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let fd = req.get_result(); - let read_buf = unsafe { *read_buf_ptr }; - let read_req = FsRequest::new(); - do read_req.read(&loop_, fd, read_buf, 0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - // we know nread >=0 because uverr is none.. - let nread = req.get_result() as uint; - // nread == 0 would be EOF - if nread > 0 { - let read_str = unsafe { - let read_buf = *read_buf_ptr; - str::from_utf8( - vec::from_buf( - read_buf.base, nread)) - }; - assert!(read_str == ~"hello"); - let close_req = FsRequest::new(); - do close_req.close(&loop_, fd) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = &req.get_loop(); - let unlink_req = FsRequest::new(); - do unlink_req.unlink(loop_, - &path_str.to_c_str()) - |_,uverr| { - assert!(uverr.is_none()); - }; - }; - }; - }; - }; - }; - }; - }; - loop_.run(); - loop_.close(); - } - } + use std::rt::io; + use std::str; + use std::vec; + use super::*; + use super::super::{run_uv_loop}; #[test] fn file_test_full_simple_sync() { - do run_in_bare_thread { - // setup - let mut loop_ = Loop::new(); - let create_flags = O_RDWR | - O_CREAT; + do run_uv_loop |l| { + let create_flags = O_RDWR | O_CREAT; let read_flags = O_RDONLY; - // 0644 - let mode = S_IWUSR | - S_IRUSR; - //S_IRGRP | - //S_IROTH; + let mode = S_IWUSR | S_IRUSR; let path_str = "./tmp/file_full_simple_sync.txt"; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); - // open/create - let open_req = FsRequest::new(); - let result = open_req.open_sync(&loop_, &path_str.to_c_str(), - create_flags as int, mode as int); - assert!(result.is_ok()); - let fd = result.unwrap(); - // write - let write_req = FsRequest::new(); - let result = write_req.write_sync(&loop_, fd, write_buf, -1); - assert!(result.is_ok()); - // close - let close_req = FsRequest::new(); - let result = close_req.close_sync(&loop_, fd); - assert!(result.is_ok()); - // re-open - let open_req = FsRequest::new(); - let result = open_req.open_sync(&loop_, &path_str.to_c_str(), - read_flags as int,0); - assert!(result.is_ok()); - let len = 1028; - let fd = result.unwrap(); - // read - let read_mem: ~[u8] = vec::from_elem(len, 0u8); - let buf = slice_to_uv_buf(read_mem); - let read_req = FsRequest::new(); - let result = read_req.read_sync(&loop_, fd, buf, 0); - assert!(result.is_ok()); - let nread = result.unwrap(); - // nread == 0 would be EOF.. we know it's >= zero because otherwise - // the above assert would fail - if nread > 0 { - let read_str = str::from_utf8( - read_mem.slice(0, nread as uint)); - assert!(read_str == ~"hello"); + + { + // open/create + let result = FsRequest::open(l, &path_str.to_c_str(), + create_flags as int, mode as int); + assert!(result.is_ok()); + let result = result.unwrap(); + let fd = result.fd; + + // write + let result = FsRequest::write(l, fd, "hello".as_bytes(), -1); + assert!(result.is_ok()); + // close - let close_req = FsRequest::new(); - let result = close_req.close_sync(&loop_, fd); + let result = FsRequest::close(l, fd, true); assert!(result.is_ok()); + } + + { + // re-open + let result = FsRequest::open(l, &path_str.to_c_str(), + read_flags as int, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + let fd = result.fd; + + // read + let mut read_mem = vec::from_elem(1000, 0u8); + let result = FsRequest::read(l, fd, read_mem, 0); + assert!(result.is_ok()); + + let nread = result.unwrap(); + assert!(nread > 0); + let read_str = str::from_utf8(read_mem.slice(0, nread as uint)); + assert_eq!(read_str, ~"hello"); + + // close + let result = FsRequest::close(l, fd, true); + assert!(result.is_ok()); + // unlink - let unlink_req = FsRequest::new(); - let result = unlink_req.unlink_sync(&loop_, &path_str.to_c_str()); + let result = FsRequest::unlink(l, &path_str.to_c_str()); assert!(result.is_ok()); - } else { fail!("nread was 0.. wudn't expectin' that."); } - loop_.close(); + } } } - fn naive_print(loop_: &Loop, input: &str) { - let write_val = input.as_bytes(); - let write_buf = slice_to_uv_buf(write_val); - let write_req = FsRequest::new(); - write_req.write_sync(loop_, STDOUT_FILENO, write_buf, -1); - } - #[test] - fn file_test_write_to_stdout() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - naive_print(&loop_, "zanzibar!\n"); - loop_.run(); - loop_.close(); - }; - } - #[test] - fn file_test_stat_simple() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/file_test_stat_simple.txt"; - let create_flags = O_RDWR | - O_CREAT; - let mode = S_IWUSR | - S_IRUSR; - let write_val = "hello".as_bytes().to_owned(); - let write_buf = slice_to_uv_buf(write_val); - let write_buf_ptr: *Buf = &write_buf; - let open_req = FsRequest::new(); - do open_req.open(&loop_, &path.to_c_str(), create_flags as int, - mode as int) |req, uverr| { - assert!(uverr.is_none()); - let fd = req.get_result(); - let buf = unsafe { *write_buf_ptr }; - let write_req = FsRequest::new(); - do write_req.write(&req.get_loop(), fd, buf, 0) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat = req.get_stat(); - let sz: uint = stat.st_size as uint; - assert!(sz > 0); - let close_req = FsRequest::new(); - do close_req.close(&loop_, fd) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let unlink_req = FsRequest::new(); - do unlink_req.unlink(&loop_, - &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, - &path.to_c_str()) |_, uverr| { - // should cause an error because the - // file doesn't exist anymore - assert!(uverr.is_some()); - }; - }; - }; - }; - }; - }; - loop_.run(); - loop_.close(); + fn file_test_stat() { + do run_uv_loop |l| { + let path = &"./tmp/file_test_stat_simple".to_c_str(); + let create_flags = (O_RDWR | O_CREAT) as int; + let mode = (S_IWUSR | S_IRUSR) as int; + + let result = FsRequest::open(l, path, create_flags, mode); + assert!(result.is_ok()); + let file = result.unwrap(); + + let result = FsRequest::write(l, file.fd, "hello".as_bytes(), 0); + assert!(result.is_ok()); + + let result = FsRequest::stat(l, path); + assert!(result.is_ok()); + assert_eq!(result.unwrap().size, 5); + + fn free(_: T) {} + free(file); + + let result = FsRequest::unlink(l, path); + assert!(result.is_ok()); } } #[test] fn file_test_mk_rm_dir() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/mk_rm_dir"; - let mode = S_IWUSR | - S_IRUSR; - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), - mode as c_int) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |req, uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat = req.get_stat(); - naive_print(&loop_, format!("{:?}", stat)); - assert!(stat.is_dir()); - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let stat_req = FsRequest::new(); - do stat_req.stat(&loop_, &path.to_c_str()) |_req, uverr| { - assert!(uverr.is_some()); - } - } - } - } - loop_.run(); - loop_.close(); + do run_uv_loop |l| { + let path = &"./tmp/mk_rm_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; + + let result = FsRequest::mkdir(l, path, mode); + assert!(result.is_ok()); + + let result = FsRequest::stat(l, path); + assert!(result.is_ok()); + assert!(result.unwrap().kind == io::TypeDirectory); + + let result = FsRequest::rmdir(l, path); + assert!(result.is_ok()); + + let result = FsRequest::stat(l, path); + assert!(result.is_err()); } } + #[test] fn file_test_mkdir_chokes_on_double_create() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/double_create_dir"; - let mode = S_IWUSR | - S_IRUSR; - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), mode as c_int) |req,uverr| { - assert!(uverr.is_none()); - let loop_ = req.get_loop(); - let mkdir_req = FsRequest::new(); - do mkdir_req.mkdir(&loop_, &path.to_c_str(), - mode as c_int) |req,uverr| { - assert!(uverr.is_some()); - let loop_ = req.get_loop(); - let _stat = req.get_stat(); - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |req,uverr| { - assert!(uverr.is_none()); - let _loop = req.get_loop(); - } - } - } - loop_.run(); - loop_.close(); + do run_uv_loop |l| { + let path = &"./tmp/double_create_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; + + let result = FsRequest::mkdir(l, path, mode as c_int); + assert!(result.is_ok()); + let result = FsRequest::mkdir(l, path, mode as c_int); + assert!(result.is_err()); + let result = FsRequest::rmdir(l, path); + assert!(result.is_ok()); } } + #[test] fn file_test_rmdir_chokes_on_nonexistant_path() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let path = "./tmp/never_existed_dir"; - let rmdir_req = FsRequest::new(); - do rmdir_req.rmdir(&loop_, &path.to_c_str()) |_req, uverr| { - assert!(uverr.is_some()); - } - loop_.run(); - loop_.close(); + do run_uv_loop |l| { + let path = &"./tmp/never_existed_dir".to_c_str(); + let result = FsRequest::rmdir(l, path); + assert!(result.is_err()); } } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 1afc9b1d0ea67..5bedba08fb0ee 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -50,15 +50,13 @@ use std::str::raw::from_c_str; use std::vec; use std::ptr; use std::str; -use std::libc::{c_void, c_int, size_t, malloc, free}; +use std::libc::{c_void, c_int, malloc, free}; use std::cast::transmute; use std::ptr::null; use std::unstable::finally::Finally; use std::rt::io::IoError; -//#[cfg(test)] use unstable::run_in_bare_thread; - pub use self::async::AsyncWatcher; pub use self::file::{FsRequest, FileWatcher}; pub use self::idle::IdleWatcher; @@ -302,62 +300,58 @@ pub fn slice_to_uv_buf(v: &[u8]) -> Buf { uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } -// XXX: Do these conversions without copying - -/// Transmute an owned vector to a Buf -pub fn vec_to_uv_buf(v: ~[u8]) -> Buf { - #[fixed_stack_segment]; #[inline(never)]; - - unsafe { - let data = malloc(v.len() as size_t) as *u8; - assert!(data.is_not_null()); - do v.as_imm_buf |b, l| { - let data = data as *mut u8; - ptr::copy_memory(data, b, l) +fn run_uv_loop(f: proc(&mut Loop)) { + use std::rt::local::Local; + use std::rt::test::run_in_uv_task; + use std::rt::sched::Scheduler; + use std::cell::Cell; + + let f = Cell::new(f); + do run_in_uv_task { + let mut io = None; + do Local::borrow |sched: &mut Scheduler| { + sched.event_loop.io(|i| unsafe { + let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) = + cast::transmute(i); + io = Some(uvio); + }); } - uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } + f.take()(io.unwrap().uv_loop()); } } -/// Transmute a Buf that was once a ~[u8] back to ~[u8] -pub fn vec_from_uv_buf(buf: Buf) -> Option<~[u8]> { - #[fixed_stack_segment]; #[inline(never)]; +#[cfg(test)] +mod test { + use std::cast::transmute; + use std::ptr; + use std::unstable::run_in_bare_thread; - if !(buf.len == 0 && buf.base.is_null()) { - let v = unsafe { vec::from_buf(buf.base, buf.len as uint) }; - unsafe { free(buf.base as *c_void) }; - return Some(v); - } else { - // No buffer - uvdebug!("No buffer!"); - return None; - } -} -/* -#[test] -fn test_slice_to_uv_buf() { - let slice = [0, .. 20]; - let buf = slice_to_uv_buf(slice); + use super::{slice_to_uv_buf, Loop}; - assert!(buf.len == 20); + #[test] + fn test_slice_to_uv_buf() { + let slice = [0, .. 20]; + let buf = slice_to_uv_buf(slice); - unsafe { - let base = transmute::<*u8, *mut u8>(buf.base); - (*base) = 1; - (*ptr::mut_offset(base, 1)) = 2; - } + assert_eq!(buf.len, 20); - assert!(slice[0] == 1); - assert!(slice[1] == 2); -} + unsafe { + let base = transmute::<*u8, *mut u8>(buf.base); + (*base) = 1; + (*ptr::mut_offset(base, 1)) = 2; + } + assert!(slice[0] == 1); + assert!(slice[1] == 2); + } -#[test] -fn loop_smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - loop_.run(); - loop_.close(); + + #[test] + fn loop_smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + loop_.run(); + loop_.close(); + } } } -*/ diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 28c2c4df12a0e..9fd771b973950 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -705,350 +705,559 @@ impl Drop for UdpWatcher { #[cfg(test)] mod test { - use super::*; - use std::util::ignore; use std::cell::Cell; - use std::vec; - use std::unstable::run_in_bare_thread; - use std::rt::thread::Thread; + use std::comm::oneshot; use std::rt::test::*; - use super::super::{Loop, AllocCallback}; - use super::super::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf}; + use std::rt::rtio::{RtioTcpStream, RtioTcpListener, RtioTcpAcceptor, + RtioUdpSocket}; + use std::task; + + use super::*; + use super::super::{Loop, run_uv_loop}; #[test] fn connect_close_ip4() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - // Connect to a port where nobody is listening - let addr = next_test_ip4(); - do tcp_watcher.connect(addr) |stream_watcher, status| { - uvdebug!("tcp_watcher.connect!"); - assert!(status.is_some()); - assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); - stream_watcher.close(||()); + do run_uv_loop |l| { + match TcpWatcher::connect(l, next_test_ip4()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } - loop_.run(); - loop_.close(); } } #[test] fn connect_close_ip6() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - // Connect to a port where nobody is listening - let addr = next_test_ip6(); - do tcp_watcher.connect(addr) |stream_watcher, status| { - uvdebug!("tcp_watcher.connect!"); - assert!(status.is_some()); - assert_eq!(status.unwrap().name(), ~"ECONNREFUSED"); - stream_watcher.close(||()); + do run_uv_loop |l| { + match TcpWatcher::connect(l, next_test_ip6()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } - loop_.run(); - loop_.close(); } } #[test] fn udp_bind_close_ip4() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; - let addr = next_test_ip4(); - udp_watcher.bind(addr); - udp_watcher.close(||()); - loop_.run(); - loop_.close(); + do run_uv_loop |l| { + match UdpWatcher::bind(l, next_test_ip4()) { + Ok(*) => {} + Err(*) => fail!() + } } } #[test] fn udp_bind_close_ip6() { - do run_in_bare_thread() { - let mut loop_ = Loop::new(); - let mut udp_watcher = { UdpWatcher::new(&mut loop_) }; - let addr = next_test_ip6(); - udp_watcher.bind(addr); - udp_watcher.close(||()); - loop_.run(); - loop_.close(); + do run_uv_loop |l| { + match UdpWatcher::bind(l, next_test_ip6()) { + Ok(*) => {} + Err(*) => fail!() + } } } #[test] fn listen_ip4() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; + do run_uv_loop |l| { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); let addr = next_test_ip4(); - server_tcp_watcher.bind(addr); - let loop_ = loop_; - uvdebug!("listening"); - let mut stream = server_tcp_watcher.as_stream(); - let res = do stream.listen |mut server_stream_watcher, status| { - uvdebug!("listened!"); - assert!(status.is_none()); - let mut loop_ = loop_; - let client_tcp_watcher = TcpWatcher::new(&mut loop_); - let mut client_tcp_watcher = client_tcp_watcher.as_stream(); - server_stream_watcher.accept(client_tcp_watcher); - let count_cell = Cell::new(0); - let server_stream_watcher = server_stream_watcher; - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) + + let handle = l.handle; + do spawn { + let w = match TcpListener::bind(&mut Loop::wrap(handle), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), }; - do client_tcp_watcher.read_start(alloc) |stream_watcher, nread, buf, status| { - - uvdebug!("i'm reading!"); - let buf = vec_from_uv_buf(buf); - let mut count = count_cell.take(); - if status.is_none() { - uvdebug!("got {} bytes", nread); - let buf = buf.unwrap(); - for byte in buf.slice(0, nread as uint).iter() { - assert!(*byte == count as u8); - uvdebug!("{}", *byte as uint); - count += 1; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), } - } else { - assert_eq!(count, MAX); - do stream_watcher.close { - server_stream_watcher.close(||()); + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } } - count_cell.put_back(count); + Err(e) => fail!("{:?}", e) } - }; + } - assert!(res.is_ok()); - - let client_thread = do Thread::start { - uvdebug!("starting client thread"); - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - do tcp_watcher.connect(addr) |mut stream_watcher, status| { - uvdebug!("connecting"); - assert!(status.is_none()); - let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; - let buf = slice_to_uv_buf(msg); - let msg_cell = Cell::new(msg); - do stream_watcher.write(buf) |stream_watcher, status| { - uvdebug!("writing"); - assert!(status.is_none()); - let msg_cell = Cell::new(msg_cell.take()); - stream_watcher.close(||ignore(msg_cell.take())); - } - } - loop_.run(); - loop_.close(); + port.recv(); + let mut w = match TcpWatcher::connect(&mut Loop::wrap(handle), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) }; - - let mut loop_ = loop_; - loop_.run(); - loop_.close(); - client_thread.join(); - }; + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } + } } #[test] fn listen_ip6() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; + do run_uv_loop |l| { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); let addr = next_test_ip6(); - server_tcp_watcher.bind(addr); - let loop_ = loop_; - uvdebug!("listening"); - let mut stream = server_tcp_watcher.as_stream(); - let res = do stream.listen |mut server_stream_watcher, status| { - uvdebug!("listened!"); - assert!(status.is_none()); - let mut loop_ = loop_; - let client_tcp_watcher = TcpWatcher::new(&mut loop_); - let mut client_tcp_watcher = client_tcp_watcher.as_stream(); - server_stream_watcher.accept(client_tcp_watcher); - let count_cell = Cell::new(0); - let server_stream_watcher = server_stream_watcher; - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) + + let handle = l.handle; + do spawn { + let w = match TcpListener::bind(&mut Loop::wrap(handle), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), }; - do client_tcp_watcher.read_start(alloc) - |stream_watcher, nread, buf, status| { - - uvdebug!("i'm reading!"); - let buf = vec_from_uv_buf(buf); - let mut count = count_cell.take(); - if status.is_none() { - uvdebug!("got {} bytes", nread); - let buf = buf.unwrap(); - let r = buf.slice(0, nread as uint); - for byte in r.iter() { - assert!(*byte == count as u8); - uvdebug!("{}", *byte as uint); - count += 1; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), } - } else { - assert_eq!(count, MAX); - do stream_watcher.close { - server_stream_watcher.close(||()); + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } } - count_cell.put_back(count); + Err(e) => fail!("{:?}", e) } + } + + port.recv(); + let mut w = match TcpWatcher::connect(&mut Loop::wrap(handle), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) }; - assert!(res.is_ok()); - - let client_thread = do Thread::start { - uvdebug!("starting client thread"); - let mut loop_ = Loop::new(); - let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - do tcp_watcher.connect(addr) |mut stream_watcher, status| { - uvdebug!("connecting"); - assert!(status.is_none()); - let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; - let buf = slice_to_uv_buf(msg); - let msg_cell = Cell::new(msg); - do stream_watcher.write(buf) |stream_watcher, status| { - uvdebug!("writing"); - assert!(status.is_none()); - let msg_cell = Cell::new(msg_cell.take()); - stream_watcher.close(||ignore(msg_cell.take())); + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } + } + } + + #[test] + fn udp_recv_ip4() { + do run_uv_loop |l| { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip4(); + let server = next_test_ip4(); + + let handle = l.handle; + do spawn { + match UdpWatcher::bind(&mut Loop::wrap(handle), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); + } } + Err(e) => fail!("{:?}", e) } - loop_.run(); - loop_.close(); + } + + port.recv(); + let mut w = match UdpWatcher::bind(&mut Loop::wrap(handle), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } + } + } + + #[test] + fn udp_recv_ip6() { + do run_uv_loop |l| { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip6(); + let server = next_test_ip6(); + + let handle = l.handle; + do spawn { + match UdpWatcher::bind(&mut Loop::wrap(handle), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); + } + } + Err(e) => fail!("{:?}", e) + } + } - let mut loop_ = loop_; - loop_.run(); - loop_.close(); - client_thread.join(); + port.recv(); + let mut w = match UdpWatcher::bind(&mut Loop::wrap(handle), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } } } #[test] - fn udp_recv_ip4() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); + fn test_read_read_read() { + do run_uv_loop |l| { + let addr = next_test_ip4(); + static MAX: uint = 500000; + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + let handle = l.handle; + do spawntask { + let l = &mut Loop::wrap(handle); + let listener = TcpListener::bind(l, addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let buf = [1, .. 2048]; + let mut total_bytes_written = 0; + while total_bytes_written < MAX { + stream.write(buf); + total_bytes_written += buf.len(); + } + } + + do spawntask { + let l = &mut Loop::wrap(handle); + port.take().recv(); + let mut stream = TcpWatcher::connect(l, addr).unwrap(); + let mut buf = [0, .. 2048]; + let mut total_bytes_read = 0; + while total_bytes_read < MAX { + let nread = stream.read(buf).unwrap(); + uvdebug!("read {} bytes", nread); + total_bytes_read += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); + } + } + uvdebug!("read {} bytes total", total_bytes_read); + } + } + } + + #[test] + #[ignore(cfg(windows))] // FIXME(#10102) the server never sees the second send + fn test_udp_twice() { + do run_uv_loop |l| { let server_addr = next_test_ip4(); let client_addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + let handle = l.handle; + do spawntask { + let l = &mut Loop::wrap(handle); + let mut client = UdpWatcher::bind(l, client_addr).unwrap(); + port.take().recv(); + assert!(client.sendto([1], server_addr).is_ok()); + assert!(client.sendto([2], server_addr).is_ok()); + } - let mut server = UdpWatcher::new(&loop_); - assert!(server.bind(server_addr).is_ok()); + do spawntask { + let l = &mut Loop::wrap(handle); + let mut server = UdpWatcher::bind(l, server_addr).unwrap(); + chan.take().send(()); + let mut buf1 = [0]; + let mut buf2 = [0]; + let (nread1, src1) = server.recvfrom(buf1).unwrap(); + let (nread2, src2) = server.recvfrom(buf2).unwrap(); + assert_eq!(nread1, 1); + assert_eq!(nread2, 1); + assert_eq!(src1, client_addr); + assert_eq!(src2, client_addr); + assert_eq!(buf1[0], 1); + assert_eq!(buf2[0], 2); + } + } + } - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) - }; + #[test] + fn test_udp_many_read() { + do run_uv_loop |l| { + let server_out_addr = next_test_ip4(); + let server_in_addr = next_test_ip4(); + let client_out_addr = next_test_ip4(); + let client_in_addr = next_test_ip4(); + static MAX: uint = 500_000; + + let (p1, c1) = oneshot(); + let (p2, c2) = oneshot(); + + let first = Cell::new((p1, c2)); + let second = Cell::new((p2, c1)); + + let handle = l.handle; + do spawntask { + let l = &mut Loop::wrap(handle); + let mut server_out = UdpWatcher::bind(l, server_out_addr).unwrap(); + let mut server_in = UdpWatcher::bind(l, server_in_addr).unwrap(); + let (port, chan) = first.take(); + chan.send(()); + port.recv(); + let msg = [1, .. 2048]; + let mut total_bytes_sent = 0; + let mut buf = [1]; + while buf[0] == 1 { + // send more data + assert!(server_out.sendto(msg, client_in_addr).is_ok()); + total_bytes_sent += msg.len(); + // check if the client has received enough + let res = server_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(nread, 1); + assert_eq!(src, client_out_addr); + } + assert!(total_bytes_sent >= MAX); + } - do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { - server.recv_stop(); - uvdebug!("i'm reading!"); - assert!(status.is_none()); - assert_eq!(flags, 0); - assert_eq!(src, client_addr); - - let buf = vec_from_uv_buf(buf); - let mut count = 0; - uvdebug!("got {} bytes", nread); - - let buf = buf.unwrap(); - for &byte in buf.slice(0, nread as uint).iter() { - assert!(byte == count as u8); - uvdebug!("{}", byte as uint); - count += 1; + do spawntask { + let l = &mut Loop::wrap(handle); + let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); + let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); + let (port, chan) = second.take(); + port.recv(); + chan.send(()); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); + } } - assert_eq!(count, MAX); + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); + } + } + } + + #[test] + fn test_read_and_block() { + do run_uv_loop |l| { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + let handle = l.handle; + do spawntask { + let l = &mut Loop::wrap(handle); + let listener = TcpListener::bind(l, addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let (port2, chan2) = stream(); + chan.take().send(port2); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + + let expected = 32; + let mut current = 0; + let mut reads = 0; + + while current < expected { + let nread = stream.read(buf).unwrap(); + for i in range(0u, nread) { + let val = buf[i] as uint; + assert_eq!(val, current % 8); + current += 1; + } + reads += 1; + + chan2.send(()); + } + + // Make sure we had multiple reads + assert!(reads > 1); + } + + do spawntask { + let l = &mut Loop::wrap(handle); + let port2 = port.take().recv(); + let mut stream = TcpWatcher::connect(l, addr).unwrap(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); + } + } + } - server.close(||{}); + #[test] + fn test_simple_tcp_server_and_client_on_diff_threads() { + let addr = next_test_ip4(); + + do task::spawn_sched(task::SingleThreaded) { + do run_uv_loop |l| { + let listener = TcpListener::bind(l, addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + let nread = stream.read(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + assert_eq!(buf[i], i as u8); + } } + } - let thread = do Thread::start { - let mut loop_ = Loop::new(); - let mut client = UdpWatcher::new(&loop_); - assert!(client.bind(client_addr).is_ok()); - let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let buf = slice_to_uv_buf(msg); - do client.send(buf, server_addr) |client, status| { - uvdebug!("writing"); - assert!(status.is_none()); - client.close(||{}); + do task::spawn_sched(task::SingleThreaded) { + do run_uv_loop |l| { + let mut stream = TcpWatcher::connect(l, addr); + while stream.is_err() { + stream = TcpWatcher::connect(l, addr); } + stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); + } + } + } - loop_.run(); - loop_.close(); - }; + // On one thread, create a udp socket. Then send that socket to another + // thread and destroy the socket on the remote thread. This should make sure + // that homing kicks in for the socket to go back home to the original + // thread, close itself, and then come back to the last thread. + #[test] + fn test_homing_closes_correctly() { + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do task::spawn_sched(task::SingleThreaded) { + let chan = Cell::new(chan.take()); + do run_uv_loop |l| { + let listener = UdpWatcher::bind(l, next_test_ip4()).unwrap(); + chan.take().send(listener); + } + } - loop_.run(); - loop_.close(); - thread.join(); + do task::spawn_sched(task::SingleThreaded) { + let port = Cell::new(port.take()); + do run_uv_loop |_l| { + port.take().recv(); + } } } + // This is a bit of a crufty old test, but it has its uses. #[test] - fn udp_recv_ip6() { - do run_in_bare_thread() { - static MAX: int = 10; - let mut loop_ = Loop::new(); - let server_addr = next_test_ip6(); - let client_addr = next_test_ip6(); - - let mut server = UdpWatcher::new(&loop_); - assert!(server.bind(server_addr).is_ok()); - - uvdebug!("starting read"); - let alloc: AllocCallback = |size| { - vec_to_uv_buf(vec::from_elem(size, 0u8)) + fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { + use std::cast; + use std::rt::local::Local; + use std::rt::rtio::{EventLoop, IoFactory}; + use std::rt::sched::Scheduler; + use std::rt::sched::{Shutdown, TaskFromFriend}; + use std::rt::sleeper_list::SleeperList; + use std::rt::task::Task; + use std::rt::task::UnwindResult; + use std::rt::thread::Thread; + use std::rt::work_queue::WorkQueue; + use std::unstable::run_in_bare_thread; + use uvio::UvEventLoop; + + do run_in_bare_thread { + let sleepers = SleeperList::new(); + let work_queue1 = WorkQueue::new(); + let work_queue2 = WorkQueue::new(); + let queues = ~[work_queue1.clone(), work_queue2.clone()]; + + let loop1 = ~UvEventLoop::new() as ~EventLoop; + let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), + sleepers.clone()); + let loop2 = ~UvEventLoop::new() as ~EventLoop; + let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), + sleepers.clone()); + + let handle1 = Cell::new(sched1.make_handle()); + let handle2 = Cell::new(sched2.make_handle()); + let tasksFriendHandle = Cell::new(sched2.make_handle()); + + let on_exit: ~fn(UnwindResult) = |exit_status| { + handle1.take().send(Shutdown); + handle2.take().send(Shutdown); + assert!(exit_status.is_success()); }; - do server.recv_start(alloc) |mut server, nread, buf, src, flags, status| { - server.recv_stop(); - uvdebug!("i'm reading!"); - assert!(status.is_none()); - assert_eq!(flags, 0); - assert_eq!(src, client_addr); - - let buf = vec_from_uv_buf(buf); - let mut count = 0; - uvdebug!("got {} bytes", nread); - - let buf = buf.unwrap(); - for &byte in buf.slice(0, nread as uint).iter() { - assert!(byte == count as u8); - uvdebug!("{}", byte as uint); - count += 1; + unsafe fn local_io() -> &'static mut IoFactory { + do Local::borrow |sched: &mut Scheduler| { + let mut io = None; + sched.event_loop.io(|i| io = Some(i)); + cast::transmute(io.unwrap()) } - assert_eq!(count, MAX); - - server.close(||{}); } - let thread = do Thread::start { - let mut loop_ = Loop::new(); - let mut client = UdpWatcher::new(&loop_); - assert!(client.bind(client_addr).is_ok()); - let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let buf = slice_to_uv_buf(msg); - do client.send(buf, server_addr) |client, status| { - uvdebug!("writing"); - assert!(status.is_none()); - client.close(||{}); + let test_function: ~fn() = || { + let io = unsafe { local_io() }; + let addr = next_test_ip4(); + let maybe_socket = io.udp_bind(addr); + // this socket is bound to this event loop + assert!(maybe_socket.is_ok()); + + // block self on sched1 + do task::unkillable { // FIXME(#8674) + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + // unblock task + do task.wake().map |task| { + // send self to sched2 + tasksFriendHandle.take().send(TaskFromFriend(task)); + }; + // sched1 should now sleep since it has nothing else to do + } } + // sched2 will wake up and get the task as we do nothing else, + // the function ends and the socket goes out of scope sched2 + // will start to run the destructor the destructor will first + // block the task, set it's home as sched1, then enqueue it + // sched2 will dequeue the task, see that it has a home, and + // send it to sched1 sched1 will wake up, exec the close + // function on the correct loop, and then we're done + }; + + let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, + test_function); + main_task.death.on_exit = Some(on_exit); + let main_task = Cell::new(main_task); + + let null_task = Cell::new(~do Task::new_root(&mut sched2.stack_pool, + None) || {}); - loop_.run(); - loop_.close(); + let sched1 = Cell::new(sched1); + let sched2 = Cell::new(sched2); + + let thread1 = do Thread::start { + sched1.take().bootstrap(main_task.take()); + }; + let thread2 = do Thread::start { + sched2.take().bootstrap(null_task.take()); }; - loop_.run(); - loop_.close(); - thread.join(); + thread1.join(); + thread2.join(); } } + } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 18b05073e8306..bf24ec405c2f9 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -123,92 +123,52 @@ impl Drop for TimerWatcher { #[cfg(test)] mod test { use super::*; - use Loop; - use std::unstable::run_in_bare_thread; + use std::rt::rtio::RtioTimer; + use super::super::run_uv_loop; #[test] - fn smoke_test() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(10, 0) |timer, status| { - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - timer.close(||()); - } - loop_.run(); - loop_.close(); - assert!(count == 1); + fn oneshot() { + do run_uv_loop |l| { + let mut timer = TimerWatcher::new(l); + let port = timer.oneshot(1); + port.recv(); + let port = timer.oneshot(1); + port.recv(); } } #[test] - fn start_twice() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(10, 0) |timer, status| { - let mut timer = timer; - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - do timer.start(10, 0) |timer, status| { - assert!(status.is_none()); - unsafe { *count_ptr += 1 }; - timer.close(||()); - } - } - loop_.run(); - loop_.close(); - assert!(count == 2); + fn override() { + do run_uv_loop |l| { + let mut timer = TimerWatcher::new(l); + let oport = timer.oneshot(1); + let pport = timer.period(1); + timer.sleep(1); + assert_eq!(oport.try_recv(), None); + assert_eq!(pport.try_recv(), None); + timer.oneshot(1).recv(); } } #[test] - fn repeat_stop() { - do run_in_bare_thread { - let mut count = 0; - let count_ptr: *mut int = &mut count; - let mut loop_ = Loop::new(); - let mut timer = TimerWatcher::new(&mut loop_); - do timer.start(1, 2) |timer, status| { - assert!(status.is_none()); - unsafe { - *count_ptr += 1; - - if *count_ptr == 10 { - - // Stop the timer and do something else - let mut timer = timer; - timer.stop(); - // Freeze timer so it can be captured - let timer = timer; - - let mut loop_ = timer.event_loop(); - let mut timer2 = TimerWatcher::new(&mut loop_); - do timer2.start(10, 0) |timer2, _| { - - *count_ptr += 1; - - timer2.close(||()); - - // Restart the original timer - let mut timer = timer; - do timer.start(1, 0) |timer, _| { - *count_ptr += 1; - timer.close(||()); - } - } - } - }; - } - loop_.run(); - loop_.close(); - assert!(count == 12); + fn period() { + do run_uv_loop |l| { + let mut timer = TimerWatcher::new(l); + let port = timer.period(1); + port.recv(); + port.recv(); + let port = timer.period(1); + port.recv(); + port.recv(); } } + #[test] + fn sleep() { + do run_uv_loop |l| { + let mut timer = TimerWatcher::new(l); + timer.sleep(1); + timer.sleep(1); + } + } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 2aac43072dd01..e9d8aab2e8b66 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -9,8 +9,6 @@ // except according to those terms. use std::c_str::CString; -use std::cast::transmute; -use std::cast; use std::comm::{SharedChan, GenericChan}; use std::libc::c_int; use std::libc; @@ -23,7 +21,6 @@ use std::rt::local::Local; use std::rt::rtio::*; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::task::Task; -use std::str; use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR, S_IWUSR}; use std::rt::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, @@ -33,10 +30,6 @@ use std::task; use ai = std::rt::io::net::addrinfo; #[cfg(test)] use std::unstable::run_in_bare_thread; -#[cfg(test)] use std::rt::test::{spawntask, - next_test_ip4, - run_in_mt_newsched_task}; -#[cfg(test)] use std::rt::comm::oneshot; use super::*; use addrinfo::GetAddrInfoRequest; @@ -370,626 +363,3 @@ impl IoFactory for UvIoFactory { } } } - -// this function is full of lies -unsafe fn local_io() -> &'static mut IoFactory { - do Local::borrow |sched: &mut Scheduler| { - let mut io = None; - sched.event_loop.io(|i| io = Some(i)); - cast::transmute(io.unwrap()) - } -} - -#[test] -fn test_simple_io_no_connect() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let addr = next_test_ip4(); - let maybe_chan = io.tcp_connect(addr); - assert!(maybe_chan.is_err()); - } - } -} - -#[test] -fn test_simple_udp_io_bind_only() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let addr = next_test_ip4(); - let maybe_socket = io.udp_bind(addr); - assert!(maybe_socket.is_ok()); - } - } -} - -#[test] -fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::sched::{Shutdown, TaskFromFriend}; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - let work_queue1 = WorkQueue::new(); - let work_queue2 = WorkQueue::new(); - let queues = ~[work_queue1.clone(), work_queue2.clone()]; - - let loop1 = ~UvEventLoop::new() as ~EventLoop; - let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), - sleepers.clone()); - let loop2 = ~UvEventLoop::new() as ~EventLoop; - let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), - sleepers.clone()); - - let handle1 = Cell::new(sched1.make_handle()); - let handle2 = Cell::new(sched2.make_handle()); - let tasksFriendHandle = Cell::new(sched2.make_handle()); - - let on_exit: ~fn(UnwindResult) = |exit_status| { - handle1.take().send(Shutdown); - handle2.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let test_function: ~fn() = || { - let io = unsafe { local_io() }; - let addr = next_test_ip4(); - let maybe_socket = io.udp_bind(addr); - // this socket is bound to this event loop - assert!(maybe_socket.is_ok()); - - // block self on sched1 - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - // unblock task - do task.wake().map |task| { - // send self to sched2 - tasksFriendHandle.take().send(TaskFromFriend(task)); - }; - // sched1 should now sleep since it has nothing else to do - } - } - // sched2 will wake up and get the task - // as we do nothing else, the function ends and the socket goes out of scope - // sched2 will start to run the destructor - // the destructor will first block the task, set it's home as sched1, then enqueue it - // sched2 will dequeue the task, see that it has a home, and send it to sched1 - // sched1 will wake up, exec the close function on the correct loop, and then we're done - }; - - let mut main_task = ~Task::new_root(&mut sched1.stack_pool, None, test_function); - main_task.death.on_exit = Some(on_exit); - let main_task = Cell::new(main_task); - - let null_task = Cell::new(~do Task::new_root(&mut sched2.stack_pool, None) || {}); - - let sched1 = Cell::new(sched1); - let sched2 = Cell::new(sched2); - - let thread1 = do Thread::start { - sched1.take().bootstrap(main_task.take()); - }; - let thread2 = do Thread::start { - sched2.take().bootstrap(null_task.take()); - }; - - thread1.join(); - thread2.join(); - } -} - -#[test] -fn test_simple_homed_udp_io_bind_then_move_handle_then_home_and_close() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::comm::oneshot; - use std::rt::sched::Shutdown; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - let work_queue1 = WorkQueue::new(); - let work_queue2 = WorkQueue::new(); - let queues = ~[work_queue1.clone(), work_queue2.clone()]; - - let loop1 = ~UvEventLoop::new() as ~EventLoop; - let mut sched1 = ~Scheduler::new(loop1, work_queue1, queues.clone(), - sleepers.clone()); - let loop2 = ~UvEventLoop::new() as ~EventLoop; - let mut sched2 = ~Scheduler::new(loop2, work_queue2, queues.clone(), - sleepers.clone()); - - let handle1 = Cell::new(sched1.make_handle()); - let handle2 = Cell::new(sched2.make_handle()); - - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - let body1: ~fn() = || { - let io = unsafe { local_io() }; - let addr = next_test_ip4(); - let socket = io.udp_bind(addr); - assert!(socket.is_ok()); - chan.take().send(socket); - }; - - let body2: ~fn() = || { - let socket = port.take().recv(); - assert!(socket.is_ok()); - /* The socket goes out of scope and the destructor is called. - * The destructor: - * - sends itself back to sched1 - * - frees the socket - * - resets the home of the task to whatever it was previously - */ - }; - - let on_exit: ~fn(UnwindResult) = |exit| { - handle1.take().send(Shutdown); - handle2.take().send(Shutdown); - assert!(exit.is_success()); - }; - - let task1 = Cell::new(~Task::new_root(&mut sched1.stack_pool, None, body1)); - - let mut task2 = ~Task::new_root(&mut sched2.stack_pool, None, body2); - task2.death.on_exit = Some(on_exit); - let task2 = Cell::new(task2); - - let sched1 = Cell::new(sched1); - let sched2 = Cell::new(sched2); - - let thread1 = do Thread::start { - sched1.take().bootstrap(task1.take()); - }; - let thread2 = do Thread::start { - sched2.take().bootstrap(task2.take()); - }; - - thread1.join(); - thread2.join(); - } -} - -#[test] -fn test_simple_tcp_server_and_client() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - // Start the server first so it's listening when we connect - do spawntask { - unsafe { - let io = local_io(); - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - uvdebug!("{}", buf[i]); - assert_eq!(buf[i], i as u8); - } - } - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - } - } - } -} - -#[test] -fn test_simple_tcp_server_and_client_on_diff_threads() { - use std::rt::sleeper_list::SleeperList; - use std::rt::work_queue::WorkQueue; - use std::rt::thread::Thread; - use std::rt::task::Task; - use std::rt::sched::{Shutdown}; - use std::rt::task::UnwindResult; - do run_in_bare_thread { - let sleepers = SleeperList::new(); - - let server_addr = next_test_ip4(); - let client_addr = server_addr.clone(); - - let server_work_queue = WorkQueue::new(); - let client_work_queue = WorkQueue::new(); - let queues = ~[server_work_queue.clone(), client_work_queue.clone()]; - - let sloop = ~UvEventLoop::new() as ~EventLoop; - let mut server_sched = ~Scheduler::new(sloop, server_work_queue, - queues.clone(), sleepers.clone()); - let cloop = ~UvEventLoop::new() as ~EventLoop; - let mut client_sched = ~Scheduler::new(cloop, client_work_queue, - queues.clone(), sleepers.clone()); - - let server_handle = Cell::new(server_sched.make_handle()); - let client_handle = Cell::new(client_sched.make_handle()); - - let server_on_exit: ~fn(UnwindResult) = |exit_status| { - server_handle.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let client_on_exit: ~fn(UnwindResult) = |exit_status| { - client_handle.take().send(Shutdown); - assert!(exit_status.is_success()); - }; - - let server_fn: ~fn() = || { - let io = unsafe { local_io() }; - let listener = io.tcp_bind(server_addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - assert_eq!(buf[i], i as u8); - } - }; - - let client_fn: ~fn() = || { - let io = unsafe { local_io() }; - let mut stream = io.tcp_connect(client_addr); - while stream.is_err() { - stream = io.tcp_connect(client_addr); - } - stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); - }; - - let mut server_task = ~Task::new_root(&mut server_sched.stack_pool, None, server_fn); - server_task.death.on_exit = Some(server_on_exit); - let server_task = Cell::new(server_task); - - let mut client_task = ~Task::new_root(&mut client_sched.stack_pool, None, client_fn); - client_task.death.on_exit = Some(client_on_exit); - let client_task = Cell::new(client_task); - - let server_sched = Cell::new(server_sched); - let client_sched = Cell::new(client_sched); - - let server_thread = do Thread::start { - server_sched.take().bootstrap(server_task.take()); - }; - let client_thread = do Thread::start { - client_sched.take().bootstrap(client_task.take()); - }; - - server_thread.join(); - client_thread.join(); - } -} - -#[test] -fn test_simple_udp_server_and_client() { - do run_in_mt_newsched_task { - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let mut server_socket = io.udp_bind(server_addr).unwrap(); - chan.take().send(()); - let mut buf = [0, .. 2048]; - let (nread,src) = server_socket.recvfrom(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - uvdebug!("{}", buf[i]); - assert_eq!(buf[i], i as u8); - } - assert_eq!(src, client_addr); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut client_socket = io.udp_bind(client_addr).unwrap(); - port.take().recv(); - client_socket.sendto([0, 1, 2, 3, 4, 5, 6, 7], server_addr); - } - } - } -} - -#[test] #[ignore(reason = "busted")] -fn test_read_and_block() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - let io = unsafe { local_io() }; - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for i in range(0u, nread) { - let val = buf[i] as uint; - assert_eq!(val, current % 8); - current += 1; - } - reads += 1; - - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - // Yield to the other task in hopes that it - // will trigger a read callback while we are - // not ready for it - do scheduler.deschedule_running_task_and_then |sched, task| { - let task = Cell::new(task); - sched.enqueue_blocked_task(task.take()); - } - } - } - - // Make sure we had multiple reads - assert!(reads > 1); - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - } - } - - } -} - -#[test] -fn test_read_read_read() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: uint = 500000; - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let listener = io.tcp_bind(addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let buf = [1, .. 2048]; - let mut total_bytes_written = 0; - while total_bytes_written < MAX { - stream.write(buf); - total_bytes_written += buf.len(); - } - } - } - - do spawntask { - unsafe { - port.take().recv(); - let io = local_io(); - let mut stream = io.tcp_connect(addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - uvdebug!("read {} bytes", nread); - total_bytes_read += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } - } - uvdebug!("read {} bytes total", total_bytes_read); - } - } - } -} - -#[test] -#[ignore(cfg(windows))] // FIXME(#10102) the server never sees the second send -fn test_udp_twice() { - do run_in_mt_newsched_task { - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - do spawntask { - unsafe { - let io = local_io(); - let mut client = io.udp_bind(client_addr).unwrap(); - port.take().recv(); - assert!(client.sendto([1], server_addr).is_ok()); - assert!(client.sendto([2], server_addr).is_ok()); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut server = io.udp_bind(server_addr).unwrap(); - chan.take().send(()); - let mut buf1 = [0]; - let mut buf2 = [0]; - let (nread1, src1) = server.recvfrom(buf1).unwrap(); - let (nread2, src2) = server.recvfrom(buf2).unwrap(); - assert_eq!(nread1, 1); - assert_eq!(nread2, 1); - assert_eq!(src1, client_addr); - assert_eq!(src2, client_addr); - assert_eq!(buf1[0], 1); - assert_eq!(buf2[0], 2); - } - } - } -} - -#[test] -fn test_udp_many_read() { - do run_in_mt_newsched_task { - let server_out_addr = next_test_ip4(); - let server_in_addr = next_test_ip4(); - let client_out_addr = next_test_ip4(); - let client_in_addr = next_test_ip4(); - static MAX: uint = 500_000; - - let (p1, c1) = oneshot(); - let (p2, c2) = oneshot(); - - let first = Cell::new((p1, c2)); - let second = Cell::new((p2, c1)); - - do spawntask { - unsafe { - let io = local_io(); - let mut server_out = io.udp_bind(server_out_addr).unwrap(); - let mut server_in = io.udp_bind(server_in_addr).unwrap(); - let (port, chan) = first.take(); - chan.send(()); - port.recv(); - let msg = [1, .. 2048]; - let mut total_bytes_sent = 0; - let mut buf = [1]; - while buf[0] == 1 { - // send more data - assert!(server_out.sendto(msg, client_in_addr).is_ok()); - total_bytes_sent += msg.len(); - // check if the client has received enough - let res = server_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(nread, 1); - assert_eq!(src, client_out_addr); - } - assert!(total_bytes_sent >= MAX); - } - } - - do spawntask { - unsafe { - let io = local_io(); - let mut client_out = io.udp_bind(client_out_addr).unwrap(); - let mut client_in = io.udp_bind(client_in_addr).unwrap(); - let (port, chan) = second.take(); - port.recv(); - chan.send(()); - let mut total_bytes_recv = 0; - let mut buf = [0, .. 2048]; - while total_bytes_recv < MAX { - // ask for more - assert!(client_out.sendto([1], server_in_addr).is_ok()); - // wait for data - let res = client_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(src, server_out_addr); - total_bytes_recv += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } - } - // tell the server we're done - assert!(client_out.sendto([0], server_in_addr).is_ok()); - } - } - } -} - -#[test] -fn test_timer_sleep_simple() { - do run_in_mt_newsched_task { - unsafe { - let io = local_io(); - let timer = io.timer_init(); - do timer.map |mut t| { t.sleep(1) }; - } - } -} - -fn file_test_uvio_full_simple_impl() { - use std::rt::io::{Open, ReadWrite, Read}; - unsafe { - let io = local_io(); - let write_val = "hello uvio!"; - let path = "./tmp/file_test_uvio_full.txt"; - { - let create_fm = Open; - let create_fa = ReadWrite; - let mut fd = io.fs_open(&path.to_c_str(), create_fm, create_fa).unwrap(); - let write_buf = write_val.as_bytes(); - fd.write(write_buf); - } - { - let ro_fm = Open; - let ro_fa = Read; - let mut fd = io.fs_open(&path.to_c_str(), ro_fm, ro_fa).unwrap(); - let mut read_vec = [0, .. 1028]; - let nread = fd.read(read_vec).unwrap(); - let read_val = str::from_utf8(read_vec.slice(0, nread as uint)); - assert!(read_val == write_val.to_owned()); - } - io.fs_unlink(&path.to_c_str()); - } -} - -#[test] -fn file_test_uvio_full_simple() { - do run_in_mt_newsched_task { - file_test_uvio_full_simple_impl(); - } -} - -fn uvio_naive_print(input: &str) { - unsafe { - use std::libc::{STDOUT_FILENO}; - let io = local_io(); - { - let mut fd = io.fs_from_raw_fd(STDOUT_FILENO, DontClose); - let write_buf = input.as_bytes(); - fd.write(write_buf); - } - } -} - -#[test] -fn file_test_uvio_write_to_stdout() { - do run_in_mt_newsched_task { - uvio_naive_print("jubilation\n"); - } -} From b545751597a8cdeee4554338318f0ed6339634fd Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 6 Nov 2013 11:38:53 -0800 Subject: [PATCH 20/27] Rework the idle callback to have a safer interface It turns out that the uv implementation would cause use-after-free if the idle callback was used after the call to `close`, and additionally nothing would ever really work that well if `start()` were called twice. To change this, the `start` and `close` methods were removed in favor of specifying the callback at creation, and allowing destruction to take care of closing the watcher. --- src/librustuv/idle.rs | 129 +++++++++++++++++++++-------------------- src/librustuv/uvio.rs | 4 +- src/libstd/rt/basic.rs | 21 ++----- src/libstd/rt/rtio.rs | 4 +- src/libstd/rt/sched.rs | 7 ++- 5 files changed, 79 insertions(+), 86 deletions(-) diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index f4072c7c6813b..b3527ce9fb421 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -19,11 +19,11 @@ pub struct IdleWatcher { handle: *uvll::uv_idle_t, idle_flag: bool, closed: bool, - callback: Option<~Callback>, + callback: ~Callback, } impl IdleWatcher { - pub fn new(loop_: &mut Loop) -> ~IdleWatcher { + pub fn new(loop_: &mut Loop, cb: ~Callback) -> ~IdleWatcher { let handle = UvHandle::alloc(None::, uvll::UV_IDLE); assert_eq!(unsafe { uvll::uv_idle_init(loop_.handle, handle) @@ -32,7 +32,7 @@ impl IdleWatcher { handle: handle, idle_flag: false, closed: false, - callback: None, + callback: cb, }; return me.install(); } @@ -64,12 +64,6 @@ impl IdleWatcher { } impl PausibleIdleCallback for IdleWatcher { - fn start(&mut self, cb: ~Callback) { - assert!(self.callback.is_none()); - self.callback = Some(cb); - assert_eq!(unsafe { uvll::uv_idle_start(self.handle, idle_cb) }, 0) - self.idle_flag = true; - } fn pause(&mut self) { if self.idle_flag == true { assert_eq!(unsafe {uvll::uv_idle_stop(self.handle) }, 0); @@ -82,13 +76,6 @@ impl PausibleIdleCallback for IdleWatcher { self.idle_flag = true; } } - fn close(&mut self) { - self.pause(); - if !self.closed { - self.closed = true; - self.close_async_(); - } - } } impl UvHandle for IdleWatcher { @@ -96,70 +83,86 @@ impl UvHandle for IdleWatcher { } extern fn idle_cb(handle: *uvll::uv_idle_t, status: c_int) { + if status == uvll::ECANCELED { return } assert_eq!(status, 0); let idle: &mut IdleWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; - assert!(idle.callback.is_some()); - idle.callback.get_mut_ref().call(); + idle.callback.call(); +} + +impl Drop for IdleWatcher { + fn drop(&mut self) { + self.pause(); + self.close_async_(); + } } #[cfg(test)] mod test { - - use Loop; use super::*; - use std::unstable::run_in_bare_thread; + use std::rt::tube::Tube; + use std::rt::rtio::{Callback, PausibleIdleCallback}; + use super::super::run_uv_loop; + + struct MyCallback(Tube, int); + impl Callback for MyCallback { + fn call(&mut self) { + match *self { + MyCallback(ref mut tube, val) => tube.send(val) + } + } + } #[test] - #[ignore(reason = "valgrind - loop destroyed before watcher?")] - fn idle_new_then_close() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let idle_watcher = { IdleWatcher::new(&mut loop_) }; - idle_watcher.close(||()); + fn not_used() { + do run_uv_loop |l| { + let cb = ~MyCallback(Tube::new(), 1); + let _idle = IdleWatcher::new(l, cb as ~Callback); } } #[test] - fn idle_smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - let mut count = 10; - let count_ptr: *mut int = &mut count; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - if unsafe { *count_ptr == 10 } { - idle_watcher.stop(); - idle_watcher.close(||()); - } else { - unsafe { *count_ptr = *count_ptr + 1; } - } - } - loop_.run(); - loop_.close(); - assert_eq!(count, 10); + fn smoke_test() { + do run_uv_loop |l| { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(l, cb as ~Callback); + idle.resume(); + tube.recv(); } } #[test] - fn idle_start_stop_start() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - idle_watcher.stop(); - do idle_watcher.start |idle_watcher, status| { - assert!(status.is_none()); - let mut idle_watcher = idle_watcher; - idle_watcher.stop(); - idle_watcher.close(||()); - } - } - loop_.run(); - loop_.close(); + fn fun_combinations_of_methods() { + do run_uv_loop |l| { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(l, cb as ~Callback); + idle.resume(); + tube.recv(); + idle.pause(); + idle.resume(); + idle.resume(); + tube.recv(); + idle.pause(); + idle.pause(); + idle.resume(); + tube.recv(); + } + } + + #[test] + fn pause_pauses() { + do run_uv_loop |l| { + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle1 = IdleWatcher::new(l, cb as ~Callback); + let cb = ~MyCallback(tube.clone(), 2); + let mut idle2 = IdleWatcher::new(l, cb as ~Callback); + idle2.resume(); + assert_eq!(tube.recv(), 2); + idle2.pause(); + idle1.resume(); + assert_eq!(tube.recv(), 1); } } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index e9d8aab2e8b66..6ae2c174e18b4 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -148,8 +148,8 @@ impl EventLoop for UvEventLoop { IdleWatcher::onetime(self.uvio.uv_loop(), f); } - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { - IdleWatcher::new(self.uvio.uv_loop()) as ~PausibleIdleCallback + fn pausible_idle_callback(&mut self, cb: ~Callback) -> ~PausibleIdleCallback { + IdleWatcher::new(self.uvio.uv_loop(), cb) as ~PausibleIdleCallback } fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs index 0c8d192d89ac1..322c58bc2b807 100644 --- a/src/libstd/rt/basic.rs +++ b/src/libstd/rt/basic.rs @@ -107,7 +107,7 @@ impl BasicLoop { match self.idle { Some(idle) => { if (*idle).active { - (*idle).work.get_mut_ref().call(); + (*idle).work.call(); } } None => {} @@ -150,8 +150,8 @@ impl EventLoop for BasicLoop { } // XXX: Seems like a really weird requirement to have an event loop provide. - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback { - let callback = ~BasicPausible::new(self); + fn pausible_idle_callback(&mut self, cb: ~Callback) -> ~PausibleIdleCallback { + let callback = ~BasicPausible::new(self, cb); rtassert!(self.idle.is_none()); unsafe { let cb_ptr: &*mut BasicPausible = cast::transmute(&callback); @@ -204,36 +204,27 @@ impl Drop for BasicRemote { struct BasicPausible { eloop: *mut BasicLoop, - work: Option<~Callback>, + work: ~Callback, active: bool, } impl BasicPausible { - fn new(eloop: &mut BasicLoop) -> BasicPausible { + fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausible { BasicPausible { active: false, - work: None, + work: cb, eloop: eloop, } } } impl PausibleIdleCallback for BasicPausible { - fn start(&mut self, f: ~Callback) { - rtassert!(!self.active && self.work.is_none()); - self.active = true; - self.work = Some(f); - } fn pause(&mut self) { self.active = false; } fn resume(&mut self) { self.active = true; } - fn close(&mut self) { - self.active = false; - self.work = None; - } } impl Drop for BasicPausible { diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 96ba512345614..1e12da8645ce7 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -31,7 +31,7 @@ pub trait Callback { pub trait EventLoop { fn run(&mut self); fn callback(&mut self, proc()); - fn pausible_idle_callback(&mut self) -> ~PausibleIdleCallback; + fn pausible_idle_callback(&mut self, ~Callback) -> ~PausibleIdleCallback; fn remote_callback(&mut self, ~Callback) -> ~RemoteCallback; /// The asynchronous I/O services. Not all event loops may provide one @@ -226,10 +226,8 @@ pub trait RtioTTY { } pub trait PausibleIdleCallback { - fn start(&mut self, f: ~Callback); fn pause(&mut self); fn resume(&mut self); - fn close(&mut self); } pub trait RtioSignal {} diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index f84e10fe98949..c2e665f490307 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -169,7 +169,8 @@ impl Scheduler { pub fn bootstrap(mut ~self, task: ~Task) { // Build an Idle callback. - self.idle_callback = Some(self.event_loop.pausible_idle_callback()); + let cb = ~SchedRunner as ~Callback; + self.idle_callback = Some(self.event_loop.pausible_idle_callback(cb)); // Initialize the TLS key. local_ptr::init_tls_key(); @@ -184,7 +185,7 @@ impl Scheduler { // Before starting our first task, make sure the idle callback // is active. As we do not start in the sleep state this is // important. - self.idle_callback.get_mut_ref().start(~SchedRunner as ~Callback); + self.idle_callback.get_mut_ref().resume(); // Now, as far as all the scheduler state is concerned, we are // inside the "scheduler" context. So we can act like the @@ -202,7 +203,7 @@ impl Scheduler { // Close the idle callback. let mut sched: ~Scheduler = Local::take(); - sched.idle_callback.get_mut_ref().close(); + sched.idle_callback.take(); // Make one go through the loop to run the close callback. sched.run(); From 5e6bbc6bfa82f3ad0a014df24b40cbc042f24035 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Tue, 5 Nov 2013 23:29:11 -0800 Subject: [PATCH 21/27] Assorted test fixes and merge conflicts --- src/librustuv/file.rs | 78 ++++++++++++------------------ src/librustuv/net.rs | 57 +++++++++++----------- src/librustuv/pipe.rs | 102 ++++++++++++++++++++------------------- src/librustuv/process.rs | 12 ++--- src/librustuv/signal.rs | 2 +- src/librustuv/timer.rs | 8 +-- src/librustuv/tty.rs | 10 ++-- src/libstd/rt/io/fs.rs | 9 ++-- 8 files changed, 134 insertions(+), 144 deletions(-) diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 3b4760e0ff4e1..ac89ef38e8ecb 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -95,27 +95,6 @@ impl FsRequest { } } - pub fn close(loop_: &Loop, fd: c_int, sync: bool) -> Result<(), UvError> { - if sync { - execute_nop(|req, cb| unsafe { - uvll::uv_fs_close(loop_.handle, req, fd, cb) - }) - } else { - unsafe { - let req = uvll::malloc_req(uvll::UV_FS); - uvll::uv_fs_close(loop_.handle, req, fd, close_cb); - return Ok(()); - } - - extern fn close_cb(req: *uvll::uv_fs_t) { - unsafe { - uvll::uv_fs_req_cleanup(req); - uvll::free_req(req); - } - } - } - } - pub fn mkdir(loop_: &Loop, path: &CString, mode: c_int) -> Result<(), UvError> { @@ -240,10 +219,12 @@ impl FsRequest { pub fn utime(loop_: &Loop, path: &CString, atime: u64, mtime: u64) -> Result<(), UvError> { + // libuv takes seconds + let atime = atime as libc::c_double / 1000.0; + let mtime = mtime as libc::c_double / 1000.0; execute_nop(|req, cb| unsafe { uvll::uv_fs_utime(loop_.handle, req, path.with_ref(|p| p), - atime as libc::c_double, mtime as libc::c_double, - cb) + atime, mtime, cb) }) } @@ -368,12 +349,12 @@ impl FileWatcher { } fn base_read(&mut self, buf: &mut [u8], offset: i64) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); let r = FsRequest::read(&self.loop_, self.fd, buf, offset); r.map_err(uv_error_to_io_error) } fn base_write(&mut self, buf: &[u8], offset: i64) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); let r = FsRequest::write(&self.loop_, self.fd, buf, offset); r.map_err(uv_error_to_io_error) } @@ -397,14 +378,26 @@ impl FileWatcher { impl Drop for FileWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); match self.close { rtio::DontClose => {} rtio::CloseAsynchronously => { - FsRequest::close(&self.loop_, self.fd, false); + unsafe { + let req = uvll::malloc_req(uvll::UV_FS); + uvll::uv_fs_close(self.loop_.handle, req, self.fd, close_cb); + } + + extern fn close_cb(req: *uvll::uv_fs_t) { + unsafe { + uvll::uv_fs_req_cleanup(req); + uvll::free_req(req); + } + } } rtio::CloseSynchronously => { - FsRequest::close(&self.loop_, self.fd, true); + execute_nop(|req, cb| unsafe { + uvll::uv_fs_close(self.loop_.handle, req, self.fd, cb) + }); } } } @@ -439,15 +432,15 @@ impl rtio::RtioFileStream for FileWatcher { self_.seek_common(0, SEEK_CUR) } fn fsync(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); FsRequest::fsync(&self.loop_, self.fd).map_err(uv_error_to_io_error) } fn datasync(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); FsRequest::datasync(&self.loop_, self.fd).map_err(uv_error_to_io_error) } fn truncate(&mut self, offset: i64) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); let r = FsRequest::truncate(&self.loop_, self.fd, offset); r.map_err(uv_error_to_io_error) } @@ -482,10 +475,6 @@ mod test { // write let result = FsRequest::write(l, fd, "hello".as_bytes(), -1); assert!(result.is_ok()); - - // close - let result = FsRequest::close(l, fd, true); - assert!(result.is_ok()); } { @@ -505,15 +494,10 @@ mod test { assert!(nread > 0); let read_str = str::from_utf8(read_mem.slice(0, nread as uint)); assert_eq!(read_str, ~"hello"); - - // close - let result = FsRequest::close(l, fd, true); - assert!(result.is_ok()); - - // unlink - let result = FsRequest::unlink(l, &path_str.to_c_str()); - assert!(result.is_ok()); } + // unlink + let result = FsRequest::unlink(l, &path_str.to_c_str()); + assert!(result.is_ok()); } } @@ -570,12 +554,14 @@ mod test { let path = &"./tmp/double_create_dir".to_c_str(); let mode = S_IWUSR | S_IRUSR; + let result = FsRequest::stat(l, path); + assert!(result.is_err(), "{:?}", result); let result = FsRequest::mkdir(l, path, mode as c_int); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result); let result = FsRequest::mkdir(l, path, mode as c_int); - assert!(result.is_err()); + assert!(result.is_err(), "{:?}", result); let result = FsRequest::rmdir(l, path); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result); } } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 9fd771b973950..5d228cd78486b 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -259,43 +259,43 @@ impl HomingIO for TcpWatcher { impl rtio::RtioSocket for TcpWatcher { fn socket_name(&mut self) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); socket_name(Tcp, self.handle) } } impl rtio::RtioTcpStream for TcpWatcher { fn read(&mut self, buf: &mut [u8]) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.read(buf).map_err(uv_error_to_io_error) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.write(buf).map_err(uv_error_to_io_error) } fn peer_name(&mut self) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); socket_name(TcpPeer, self.handle) } fn control_congestion(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_nodelay(self.handle, 0 as c_int) }) } fn nodelay(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_nodelay(self.handle, 1 as c_int) }) } fn keepalive(&mut self, delay_in_seconds: uint) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_keepalive(self.handle, 1 as c_int, delay_in_seconds as c_uint) @@ -303,7 +303,7 @@ impl rtio::RtioTcpStream for TcpWatcher { } fn letdie(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_keepalive(self.handle, 0 as c_int, 0 as c_uint) }) @@ -312,7 +312,7 @@ impl rtio::RtioTcpStream for TcpWatcher { impl Drop for TcpWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.close(); } } @@ -356,7 +356,7 @@ impl UvHandle for TcpListener { impl rtio::RtioSocket for TcpListener { fn socket_name(&mut self) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); socket_name(Tcp, self.handle) } } @@ -370,7 +370,7 @@ impl rtio::RtioTcpListener for TcpListener { incoming: incoming, }; - let _m = acceptor.fire_missiles(); + let _m = acceptor.fire_homing_missile(); // XXX: the 128 backlog should be configurable match unsafe { uvll::uv_listen(acceptor.listener.handle, 128, listen_cb) } { 0 => Ok(acceptor as ~rtio::RtioTcpAcceptor), @@ -399,7 +399,7 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { impl Drop for TcpListener { fn drop(&mut self) { - let (_m, sched) = self.fire_missiles_sched(); + let (_m, sched) = self.fire_homing_missile_sched(); do sched.deschedule_running_task_and_then |_, task| { self.closing_task = Some(task); @@ -424,26 +424,26 @@ impl HomingIO for TcpAcceptor { impl rtio::RtioSocket for TcpAcceptor { fn socket_name(&mut self) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); socket_name(Tcp, self.listener.handle) } } impl rtio::RtioTcpAcceptor for TcpAcceptor { fn accept(&mut self) -> Result<~rtio::RtioTcpStream, IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.incoming.recv() } fn accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 1) }) } fn dont_accept_simultaneously(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_tcp_simultaneous_accepts(self.listener.handle, 0) }) @@ -489,7 +489,7 @@ impl HomingIO for UdpWatcher { impl rtio::RtioSocket for UdpWatcher { fn socket_name(&mut self) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); socket_name(Udp, self.handle) } } @@ -503,7 +503,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { buf: Option, result: Option<(ssize_t, SocketAddr)>, } - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); return match unsafe { uvll::uv_udp_recv_start(self.handle, alloc_cb, recv_cb) @@ -564,7 +564,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> Result<(), IoError> { struct Ctx { task: Option, result: c_int } - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); let req = Request::new(uvll::UV_UDP_SEND); let buf = slice_to_uv_buf(buf); @@ -607,7 +607,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn join_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { do multi.to_str().with_c_str |m_addr| { uvll::uv_udp_set_membership(self.handle, @@ -618,7 +618,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn leave_multicast(&mut self, multi: IpAddr) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { do multi.to_str().with_c_str |m_addr| { uvll::uv_udp_set_membership(self.handle, @@ -629,7 +629,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn loop_multicast_locally(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_loop(self.handle, 1 as c_int) @@ -637,7 +637,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn dont_loop_multicast_locally(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_loop(self.handle, 0 as c_int) @@ -645,7 +645,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn multicast_time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_multicast_ttl(self.handle, ttl as c_int) @@ -653,14 +653,14 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn time_to_live(&mut self, ttl: int) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_ttl(self.handle, ttl as c_int) }) } fn hear_broadcasts(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_broadcast(self.handle, 1 as c_int) @@ -668,7 +668,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } fn ignore_broadcasts(&mut self) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); status_to_io_result(unsafe { uvll::uv_udp_set_broadcast(self.handle, 0 as c_int) @@ -679,7 +679,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { impl Drop for UdpWatcher { fn drop(&mut self) { // Send ourselves home to close this handle (blocking while doing so). - let (_m, sched) = self.fire_missiles_sched(); + let (_m, sched) = self.fire_homing_missile_sched(); let mut slot = None; unsafe { uvll::set_data_for_uv_handle(self.handle, &slot); @@ -693,6 +693,7 @@ impl Drop for UdpWatcher { let slot: &mut Option = unsafe { cast::transmute(uvll::get_data_for_uv_handle(handle)) }; + unsafe { uvll::free_handle(handle) } let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(slot.take_unwrap()); } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index f79043797aedb..1f28e043dfb49 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -26,6 +26,7 @@ use uvll; pub struct PipeWatcher { stream: StreamWatcher, home: SchedHandle, + priv defused: bool, } pub struct PipeListener { @@ -43,47 +44,43 @@ pub struct PipeAcceptor { // PipeWatcher implementation and traits impl PipeWatcher { - pub fn new(pipe: *uvll::uv_pipe_t) -> PipeWatcher { - PipeWatcher { - stream: StreamWatcher::new(pipe), - home: get_handle_to_current_scheduler!(), - } - } - - pub fn alloc(loop_: &Loop, ipc: bool) -> *uvll::uv_pipe_t { - unsafe { + // Creates an uninitialized pipe watcher. The underlying uv pipe is ready to + // get bound to some other source (this is normally a helper method paired + // with another call). + pub fn new(loop_: &Loop, ipc: bool) -> PipeWatcher { + let handle = unsafe { let handle = uvll::malloc_handle(uvll::UV_NAMED_PIPE); assert!(!handle.is_null()); let ipc = ipc as libc::c_int; assert_eq!(uvll::uv_pipe_init(loop_.handle, handle, ipc), 0); handle + }; + PipeWatcher { + stream: StreamWatcher::new(handle), + home: get_handle_to_current_scheduler!(), + defused: false, } } pub fn open(loop_: &Loop, file: libc::c_int) -> Result { - let handle = PipeWatcher::alloc(loop_, false); - match unsafe { uvll::uv_pipe_open(handle, file) } { - 0 => Ok(PipeWatcher::new(handle)), - n => { - unsafe { uvll::uv_close(handle, pipe_close_cb) } - Err(UvError(n)) - } + let pipe = PipeWatcher::new(loop_, false); + match unsafe { uvll::uv_pipe_open(pipe.handle(), file) } { + 0 => Ok(pipe), + n => Err(UvError(n)) } } pub fn connect(loop_: &Loop, name: &CString) -> Result { - struct Ctx { - task: Option, - result: Option>, - } - let mut cx = Ctx { task: None, result: None }; + struct Ctx { task: Option, result: libc::c_int, } + let mut cx = Ctx { task: None, result: 0 }; let req = Request::new(uvll::UV_CONNECT); + let pipe = PipeWatcher::new(loop_, false); unsafe { uvll::set_data_for_req(req.handle, &cx as *Ctx); uvll::uv_pipe_connect(req.handle, - PipeWatcher::alloc(loop_, false), + pipe.handle(), name.with_ref(|p| p), connect_cb) } @@ -93,38 +90,41 @@ impl PipeWatcher { do sched.deschedule_running_task_and_then |_, task| { cx.task = Some(task); } - assert!(cx.task.is_none()); - return cx.result.take().expect("pipe connect needs a result"); + return match cx.result { + 0 => Ok(pipe), + n => Err(UvError(n)) + }; extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { let _req = Request::wrap(req); if status == uvll::ECANCELED { return } unsafe { let cx: &mut Ctx = cast::transmute(uvll::get_data_for_req(req)); - let stream = uvll::get_stream_handle_from_connect_req(req); - cx.result = Some(match status { - 0 => Ok(PipeWatcher::new(stream)), - n => { - uvll::free_handle(stream); - Err(UvError(n)) - } - }); - + cx.result = status; let sched: ~Scheduler = Local::take(); sched.resume_blocked_task_immediately(cx.task.take_unwrap()); } } } + + pub fn handle(&self) -> *uvll::uv_pipe_t { self.stream.handle } + + // Unwraps the underlying uv pipe. This cancels destruction of the pipe and + // allows the pipe to get moved elsewhere + fn unwrap(mut self) -> *uvll::uv_pipe_t { + self.defused = true; + return self.stream.handle; + } } impl RtioPipe for PipeWatcher { fn read(&mut self, buf: &mut [u8]) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.read(buf).map_err(uv_error_to_io_error) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.write(buf).map_err(uv_error_to_io_error) } } @@ -135,8 +135,10 @@ impl HomingIO for PipeWatcher { impl Drop for PipeWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); - self.stream.close(); + if !self.defused { + let _m = self.fire_homing_missile(); + self.stream.close(); + } } } @@ -148,21 +150,21 @@ extern fn pipe_close_cb(handle: *uvll::uv_handle_t) { impl PipeListener { pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> { - let pipe = PipeWatcher::alloc(loop_, false); - match unsafe { uvll::uv_pipe_bind(pipe, name.with_ref(|p| p)) } { + let pipe = PipeWatcher::new(loop_, false); + match unsafe { uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p)) } { 0 => { + // If successful, unwrap the PipeWatcher because we control how + // we close the pipe differently. We can't rely on + // StreamWatcher's default close method. let p = ~PipeListener { home: get_handle_to_current_scheduler!(), - pipe: pipe, + pipe: pipe.unwrap(), closing_task: None, outgoing: Tube::new(), }; Ok(p.install()) } - n => { - unsafe { uvll::uv_close(pipe, pipe_close_cb) } - Err(UvError(n)) - } + n => Err(UvError(n)) } } } @@ -176,7 +178,7 @@ impl RtioUnixListener for PipeListener { incoming: incoming, }; - let _m = acceptor.fire_missiles(); + let _m = acceptor.fire_homing_missile(); // XXX: the 128 backlog should be configurable match unsafe { uvll::uv_listen(acceptor.listener.pipe, 128, listen_cb) } { 0 => Ok(acceptor as ~RtioUnixAcceptor), @@ -199,9 +201,9 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { let loop_ = Loop::wrap(unsafe { uvll::get_loop_for_uv_handle(server) }); - let client = PipeWatcher::alloc(&loop_, false); - assert_eq!(unsafe { uvll::uv_accept(server, client) }, 0); - Ok(~PipeWatcher::new(client) as ~RtioPipe) + let client = PipeWatcher::new(&loop_, false); + assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0); + Ok(~client as ~RtioPipe) } uvll::ECANCELED => return, n => Err(uv_error_to_io_error(UvError(n))) @@ -213,7 +215,7 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { impl Drop for PipeListener { fn drop(&mut self) { - let (_m, sched) = self.fire_missiles_sched(); + let (_m, sched) = self.fire_homing_missile_sched(); do sched.deschedule_running_task_and_then |_, task| { self.closing_task = Some(task); @@ -234,7 +236,7 @@ extern fn listener_close_cb(handle: *uvll::uv_handle_t) { impl RtioUnixAcceptor for PipeAcceptor { fn accept(&mut self) -> Result<~RtioPipe, IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.incoming.recv() } } diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 20af8e212216a..15d5ae1c33ca5 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -144,10 +144,10 @@ unsafe fn set_stdio(dst: *uvll::uv_stdio_container_t, if writable { flags |= uvll::STDIO_WRITABLE_PIPE as libc::c_int; } - let pipe_handle = PipeWatcher::alloc(loop_, false); + let pipe = PipeWatcher::new(loop_, false); uvll::set_stdio_container_flags(dst, flags); - uvll::set_stdio_container_stream(dst, pipe_handle); - Some(PipeWatcher::new(pipe_handle)) + uvll::set_stdio_container_stream(dst, pipe.handle()); + Some(pipe) } } } @@ -204,7 +204,7 @@ impl RtioProcess for Process { } fn kill(&mut self, signal: int) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); match unsafe { uvll::uv_process_kill(self.handle, signal as libc::c_int) } { @@ -215,7 +215,7 @@ impl RtioProcess for Process { fn wait(&mut self) -> int { // Make sure (on the home scheduler) that we have an exit status listed - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); match self.exit_status { Some(*) => {} None => { @@ -238,7 +238,7 @@ impl RtioProcess for Process { impl Drop for Process { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); assert!(self.to_wake.is_none()); self.close_async_(); } diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index 3c5efe63f96df..b7a37473fb944 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -73,7 +73,7 @@ impl RtioSignal for SignalWatcher {} impl Drop for SignalWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.close_async_(); } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index bf24ec405c2f9..df35a4892e978 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -66,7 +66,7 @@ impl UvHandle for TimerWatcher { impl RtioTimer for TimerWatcher { fn sleep(&mut self, msecs: u64) { - let (_m, sched) = self.fire_missiles_sched(); + let (_m, sched) = self.fire_homing_missile_sched(); do sched.deschedule_running_task_and_then |_sched, task| { self.action = Some(WakeTask(task)); self.start(msecs, 0); @@ -77,7 +77,7 @@ impl RtioTimer for TimerWatcher { fn oneshot(&mut self, msecs: u64) -> PortOne<()> { let (port, chan) = oneshot(); - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.action = Some(SendOnce(chan)); self.start(msecs, 0); @@ -87,7 +87,7 @@ impl RtioTimer for TimerWatcher { fn period(&mut self, msecs: u64) -> Port<()> { let (port, chan) = stream(); - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.action = Some(SendMany(chan)); self.start(msecs, msecs); @@ -113,7 +113,7 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { impl Drop for TimerWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.action = None; self.stop(); self.close_async_(); diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index e224806cec1e6..c072ab5156121 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -54,18 +54,18 @@ impl TtyWatcher { impl RtioTTY for TtyWatcher { fn read(&mut self, buf: &mut [u8]) -> Result { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.read(buf).map_err(uv_error_to_io_error) } fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.write(buf).map_err(uv_error_to_io_error) } fn set_raw(&mut self, raw: bool) -> Result<(), IoError> { let raw = raw as libc::c_int; - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); match unsafe { uvll::uv_tty_set_mode(self.tty, raw) } { 0 => Ok(()), n => Err(uv_error_to_io_error(UvError(n))) @@ -79,7 +79,7 @@ impl RtioTTY for TtyWatcher { let widthptr: *libc::c_int = &width; let heightptr: *libc::c_int = &width; - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); match unsafe { uvll::uv_tty_get_winsize(self.tty, widthptr, heightptr) } { 0 => Ok((width as int, height as int)), @@ -102,7 +102,7 @@ impl HomingIO for TtyWatcher { impl Drop for TtyWatcher { fn drop(&mut self) { - let _m = self.fire_missiles(); + let _m = self.fire_homing_missile(); self.stream.close(); } } diff --git a/src/libstd/rt/io/fs.rs b/src/libstd/rt/io/fs.rs index f9e622b1f1e96..06c07308cf634 100644 --- a/src/libstd/rt/io/fs.rs +++ b/src/libstd/rt/io/fs.rs @@ -589,7 +589,8 @@ pub fn rmdir_recursive(path: &Path) { /// Changes the timestamps for a file's last modification and access time. /// The file at the path specified will have its last access time set to -/// `atime` and its modification time set to `mtime`. +/// `atime` and its modification time set to `mtime`. The times specified should +/// be in milliseconds. /// /// # Errors /// @@ -1266,9 +1267,9 @@ mod test { let path = tmpdir.join("a"); File::create(&path); - change_file_times(&path, 100, 200); - assert_eq!(path.stat().accessed, 100); - assert_eq!(path.stat().modified, 200); + change_file_times(&path, 1000, 2000); + assert_eq!(path.stat().accessed, 1000); + assert_eq!(path.stat().modified, 2000); rmdir_recursive(&tmpdir); } From df4c0b8e4349d50f317553de5a47d0cd56cdc227 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 7 Nov 2013 15:13:06 -0800 Subject: [PATCH 22/27] Make the uv bindings resilient to linked failure In the ideal world, uv I/O could be canceled safely at any time. In reality, however, we are unable to do this. Right now linked failure is fairly flaky as implemented in the runtime, making it very difficult to test whether the linked failure mechanisms inside of the uv bindings are ready for this kind of interaction. Right now, all constructors will execute in a task::unkillable block, and all homing I/O operations will prevent linked failure in the duration of the homing operation. What this means is that tasks which perform I/O are still susceptible to linked failure, but the I/O operations themselves will never get interrupted. Instead, the linked failure will be received at the edge of the I/O operation. --- src/librustuv/addrinfo.rs | 44 +- src/librustuv/async.rs | 43 +- src/librustuv/file.rs | 176 ++++---- src/librustuv/idle.rs | 71 ++- src/librustuv/lib.rs | 118 +++-- src/librustuv/net.rs | 889 +++++++++++++++++++------------------- src/librustuv/pipe.rs | 114 +++-- src/librustuv/process.rs | 9 +- src/librustuv/stream.rs | 82 ++-- src/librustuv/timer.rs | 61 ++- src/librustuv/tty.rs | 2 +- src/librustuv/uvio.rs | 65 +-- 12 files changed, 846 insertions(+), 828 deletions(-) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index d5bfd729eb56a..56f6eda53575c 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -9,7 +9,6 @@ // except according to those terms. use ai = std::rt::io::net::addrinfo; -use std::cast; use std::libc::c_int; use std::ptr::null; use std::rt::BlockedTask; @@ -17,7 +16,7 @@ use std::rt::local::Local; use std::rt::sched::Scheduler; use net; -use super::{Loop, UvError, Request}; +use super::{Loop, UvError, Request, wait_until_woken_after}; use uvll; struct Addrinfo { @@ -76,7 +75,7 @@ impl GetAddrInfoRequest { } }); let hint_ptr = hint.as_ref().map_default(null(), |x| x as *uvll::addrinfo); - let req = Request::new(uvll::UV_GETADDRINFO); + let mut req = Request::new(uvll::UV_GETADDRINFO); return match unsafe { uvll::uv_getaddrinfo(loop_.handle, req.handle, @@ -84,12 +83,11 @@ impl GetAddrInfoRequest { hint_ptr) } { 0 => { + req.defuse(); // uv callback now owns this request let mut cx = Ctx { slot: None, status: 0, addrinfo: None }; - req.set_data(&cx); - req.defuse(); - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - cx.slot = Some(task); + + do wait_until_woken_after(&mut cx.slot) { + req.set_data(&cx); } match cx.status { @@ -105,8 +103,8 @@ impl GetAddrInfoRequest { status: c_int, res: *uvll::addrinfo) { let req = Request::wrap(req); - if status == uvll::ECANCELED { return } - let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; cx.status = status; cx.addrinfo = Some(Addrinfo { handle: res }); @@ -191,25 +189,23 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { mod test { use std::rt::io::net::ip::{SocketAddr, Ipv4Addr}; use super::*; - use super::super::run_uv_loop; + use super::super::local_loop; #[test] fn getaddrinfo_test() { - do run_uv_loop |l| { - match GetAddrInfoRequest::run(l, Some("localhost"), None, None) { - Ok(infos) => { - let mut found_local = false; - let local_addr = &SocketAddr { - ip: Ipv4Addr(127, 0, 0, 1), - port: 0 - }; - for addr in infos.iter() { - found_local = found_local || addr.address == *local_addr; - } - assert!(found_local); + match GetAddrInfoRequest::run(local_loop(), Some("localhost"), None, None) { + Ok(infos) => { + let mut found_local = false; + let local_addr = &SocketAddr { + ip: Ipv4Addr(127, 0, 0, 1), + port: 0 + }; + for addr in infos.iter() { + found_local = found_local || addr.address == *local_addr; } - Err(e) => fail!("{:?}", e), + assert!(found_local); } + Err(e) => fail!("{:?}", e), } } } diff --git a/src/librustuv/async.rs b/src/librustuv/async.rs index 334e154a397f4..04e7bce5bd181 100644 --- a/src/librustuv/async.rs +++ b/src/librustuv/async.rs @@ -131,11 +131,12 @@ mod test_remote { use std::rt::tube::Tube; use super::*; - use super::super::run_uv_loop; + use super::super::local_loop; - // Make sure that we can fire watchers in remote threads + // Make sure that we can fire watchers in remote threads and that they + // actually trigger what they say they will. #[test] - fn test_uv_remote() { + fn smoke_test() { struct MyCallback(Option>); impl Callback for MyCallback { fn call(&mut self) { @@ -147,35 +148,15 @@ mod test_remote { } } - do run_uv_loop |l| { - let mut tube = Tube::new(); - let cb = ~MyCallback(Some(tube.clone())); - let watcher = Cell::new(AsyncWatcher::new(l, cb as ~Callback)); - - let thread = do Thread::start { - watcher.take().fire(); - }; + let mut tube = Tube::new(); + let cb = ~MyCallback(Some(tube.clone())); + let watcher = Cell::new(AsyncWatcher::new(local_loop(), cb as ~Callback)); - assert_eq!(tube.recv(), 1); - thread.join(); - } - } - - #[test] - fn smoke_test() { - static mut hits: uint = 0; + let thread = do Thread::start { + watcher.take().fire(); + }; - struct MyCallback; - impl Callback for MyCallback { - fn call(&mut self) { - unsafe { hits += 1; } - } - } - - do run_uv_loop |l| { - let mut watcher = AsyncWatcher::new(l, ~MyCallback as ~Callback); - watcher.fire(); - } - assert!(unsafe { hits > 0 }); + assert_eq!(tube.recv(), 1); + thread.join(); } } diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index ac89ef38e8ecb..bdb1429f5b625 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -15,14 +15,14 @@ use std::cast; use std::libc::{c_int, c_char, c_void, c_uint}; use std::libc; use std::rt::BlockedTask; -use std::rt::io; use std::rt::io::{FileStat, IoError}; -use std::rt::rtio; +use std::rt::io; use std::rt::local::Local; +use std::rt::rtio; use std::rt::sched::{Scheduler, SchedHandle}; use std::vec; -use super::{Loop, UvError, uv_error_to_io_error}; +use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after}; use uvio::HomingIO; use uvll; @@ -305,10 +305,8 @@ fn execute(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) 0 => { req.fired = true; let mut slot = None; - unsafe { uvll::set_data_for_req(req.req, &slot) } - let sched: ~Scheduler = Local::take(); - do sched.deschedule_running_task_and_then |_, task| { - slot = Some(task); + do wait_until_woken_after(&mut slot) { + unsafe { uvll::set_data_for_req(req.req, &slot) } } match req.get_result() { n if n < 0 => Err(UvError(n)), @@ -454,123 +452,113 @@ mod test { use std::str; use std::vec; use super::*; - use super::super::{run_uv_loop}; + use l = super::super::local_loop; #[test] fn file_test_full_simple_sync() { - do run_uv_loop |l| { - let create_flags = O_RDWR | O_CREAT; - let read_flags = O_RDONLY; - let mode = S_IWUSR | S_IRUSR; - let path_str = "./tmp/file_full_simple_sync.txt"; - - { - // open/create - let result = FsRequest::open(l, &path_str.to_c_str(), - create_flags as int, mode as int); - assert!(result.is_ok()); - let result = result.unwrap(); - let fd = result.fd; - - // write - let result = FsRequest::write(l, fd, "hello".as_bytes(), -1); - assert!(result.is_ok()); - } + let create_flags = O_RDWR | O_CREAT; + let read_flags = O_RDONLY; + let mode = S_IWUSR | S_IRUSR; + let path_str = "./tmp/file_full_simple_sync.txt"; + + { + // open/create + let result = FsRequest::open(l(), &path_str.to_c_str(), + create_flags as int, mode as int); + assert!(result.is_ok()); + let result = result.unwrap(); + let fd = result.fd; - { - // re-open - let result = FsRequest::open(l, &path_str.to_c_str(), - read_flags as int, 0); - assert!(result.is_ok()); - let result = result.unwrap(); - let fd = result.fd; - - // read - let mut read_mem = vec::from_elem(1000, 0u8); - let result = FsRequest::read(l, fd, read_mem, 0); - assert!(result.is_ok()); - - let nread = result.unwrap(); - assert!(nread > 0); - let read_str = str::from_utf8(read_mem.slice(0, nread as uint)); - assert_eq!(read_str, ~"hello"); - } - // unlink - let result = FsRequest::unlink(l, &path_str.to_c_str()); + // write + let result = FsRequest::write(l(), fd, "hello".as_bytes(), -1); assert!(result.is_ok()); } + + { + // re-open + let result = FsRequest::open(l(), &path_str.to_c_str(), + read_flags as int, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + let fd = result.fd; + + // read + let mut read_mem = vec::from_elem(1000, 0u8); + let result = FsRequest::read(l(), fd, read_mem, 0); + assert!(result.is_ok()); + + let nread = result.unwrap(); + assert!(nread > 0); + let read_str = str::from_utf8(read_mem.slice(0, nread as uint)); + assert_eq!(read_str, ~"hello"); + } + // unlink + let result = FsRequest::unlink(l(), &path_str.to_c_str()); + assert!(result.is_ok()); } #[test] fn file_test_stat() { - do run_uv_loop |l| { - let path = &"./tmp/file_test_stat_simple".to_c_str(); - let create_flags = (O_RDWR | O_CREAT) as int; - let mode = (S_IWUSR | S_IRUSR) as int; + let path = &"./tmp/file_test_stat_simple".to_c_str(); + let create_flags = (O_RDWR | O_CREAT) as int; + let mode = (S_IWUSR | S_IRUSR) as int; - let result = FsRequest::open(l, path, create_flags, mode); - assert!(result.is_ok()); - let file = result.unwrap(); + let result = FsRequest::open(l(), path, create_flags, mode); + assert!(result.is_ok()); + let file = result.unwrap(); - let result = FsRequest::write(l, file.fd, "hello".as_bytes(), 0); - assert!(result.is_ok()); + let result = FsRequest::write(l(), file.fd, "hello".as_bytes(), 0); + assert!(result.is_ok()); - let result = FsRequest::stat(l, path); - assert!(result.is_ok()); - assert_eq!(result.unwrap().size, 5); + let result = FsRequest::stat(l(), path); + assert!(result.is_ok()); + assert_eq!(result.unwrap().size, 5); - fn free(_: T) {} - free(file); + fn free(_: T) {} + free(file); - let result = FsRequest::unlink(l, path); - assert!(result.is_ok()); - } + let result = FsRequest::unlink(l(), path); + assert!(result.is_ok()); } #[test] fn file_test_mk_rm_dir() { - do run_uv_loop |l| { - let path = &"./tmp/mk_rm_dir".to_c_str(); - let mode = S_IWUSR | S_IRUSR; + let path = &"./tmp/mk_rm_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; - let result = FsRequest::mkdir(l, path, mode); - assert!(result.is_ok()); + let result = FsRequest::mkdir(l(), path, mode); + assert!(result.is_ok()); - let result = FsRequest::stat(l, path); - assert!(result.is_ok()); - assert!(result.unwrap().kind == io::TypeDirectory); + let result = FsRequest::stat(l(), path); + assert!(result.is_ok()); + assert!(result.unwrap().kind == io::TypeDirectory); - let result = FsRequest::rmdir(l, path); - assert!(result.is_ok()); + let result = FsRequest::rmdir(l(), path); + assert!(result.is_ok()); - let result = FsRequest::stat(l, path); - assert!(result.is_err()); - } + let result = FsRequest::stat(l(), path); + assert!(result.is_err()); } #[test] fn file_test_mkdir_chokes_on_double_create() { - do run_uv_loop |l| { - let path = &"./tmp/double_create_dir".to_c_str(); - let mode = S_IWUSR | S_IRUSR; - - let result = FsRequest::stat(l, path); - assert!(result.is_err(), "{:?}", result); - let result = FsRequest::mkdir(l, path, mode as c_int); - assert!(result.is_ok(), "{:?}", result); - let result = FsRequest::mkdir(l, path, mode as c_int); - assert!(result.is_err(), "{:?}", result); - let result = FsRequest::rmdir(l, path); - assert!(result.is_ok(), "{:?}", result); - } + let path = &"./tmp/double_create_dir".to_c_str(); + let mode = S_IWUSR | S_IRUSR; + + let result = FsRequest::stat(l(), path); + assert!(result.is_err(), "{:?}", result); + let result = FsRequest::mkdir(l(), path, mode as c_int); + assert!(result.is_ok(), "{:?}", result); + let result = FsRequest::mkdir(l(), path, mode as c_int); + assert!(result.is_err(), "{:?}", result); + let result = FsRequest::rmdir(l(), path); + assert!(result.is_ok(), "{:?}", result); } #[test] fn file_test_rmdir_chokes_on_nonexistant_path() { - do run_uv_loop |l| { - let path = &"./tmp/never_existed_dir".to_c_str(); - let result = FsRequest::rmdir(l, path); - assert!(result.is_err()); - } + let path = &"./tmp/never_existed_dir".to_c_str(); + let result = FsRequest::rmdir(l(), path); + assert!(result.is_err()); } } diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index b3527ce9fb421..83fc53dce1cd7 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -83,7 +83,6 @@ impl UvHandle for IdleWatcher { } extern fn idle_cb(handle: *uvll::uv_idle_t, status: c_int) { - if status == uvll::ECANCELED { return } assert_eq!(status, 0); let idle: &mut IdleWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; idle.callback.call(); @@ -101,7 +100,7 @@ mod test { use super::*; use std::rt::tube::Tube; use std::rt::rtio::{Callback, PausibleIdleCallback}; - use super::super::run_uv_loop; + use super::super::local_loop; struct MyCallback(Tube, int); impl Callback for MyCallback { @@ -114,55 +113,47 @@ mod test { #[test] fn not_used() { - do run_uv_loop |l| { - let cb = ~MyCallback(Tube::new(), 1); - let _idle = IdleWatcher::new(l, cb as ~Callback); - } + let cb = ~MyCallback(Tube::new(), 1); + let _idle = IdleWatcher::new(local_loop(), cb as ~Callback); } #[test] fn smoke_test() { - do run_uv_loop |l| { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle = IdleWatcher::new(l, cb as ~Callback); - idle.resume(); - tube.recv(); - } + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + tube.recv(); } #[test] fn fun_combinations_of_methods() { - do run_uv_loop |l| { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle = IdleWatcher::new(l, cb as ~Callback); - idle.resume(); - tube.recv(); - idle.pause(); - idle.resume(); - idle.resume(); - tube.recv(); - idle.pause(); - idle.pause(); - idle.resume(); - tube.recv(); - } + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + tube.recv(); + idle.pause(); + idle.resume(); + idle.resume(); + tube.recv(); + idle.pause(); + idle.pause(); + idle.resume(); + tube.recv(); } #[test] fn pause_pauses() { - do run_uv_loop |l| { - let mut tube = Tube::new(); - let cb = ~MyCallback(tube.clone(), 1); - let mut idle1 = IdleWatcher::new(l, cb as ~Callback); - let cb = ~MyCallback(tube.clone(), 2); - let mut idle2 = IdleWatcher::new(l, cb as ~Callback); - idle2.resume(); - assert_eq!(tube.recv(), 2); - idle2.pause(); - idle1.resume(); - assert_eq!(tube.recv(), 1); - } + let mut tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle1 = IdleWatcher::new(local_loop(), cb as ~Callback); + let cb = ~MyCallback(tube.clone(), 2); + let mut idle2 = IdleWatcher::new(local_loop(), cb as ~Callback); + idle2.resume(); + assert_eq!(tube.recv(), 2); + idle2.pause(); + idle1.resume(); + assert_eq!(tube.recv(), 1); } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 5bedba08fb0ee..4da5ad4275f79 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -45,15 +45,19 @@ via `close` and `delete` methods. #[feature(macro_rules, globs)]; +use std::cast::transmute; use std::cast; -use std::str::raw::from_c_str; -use std::vec; +use std::libc::{c_int, malloc, free}; +use std::ptr::null; use std::ptr; +use std::rt::BlockedTask; +use std::rt::local::Local; +use std::rt::sched::Scheduler; +use std::str::raw::from_c_str; use std::str; -use std::libc::{c_void, c_int, malloc, free}; -use std::cast::transmute; -use std::ptr::null; +use std::task; use std::unstable::finally::Finally; +use std::vec; use std::rt::io::IoError; @@ -124,27 +128,90 @@ pub trait UvHandle { uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb) } } + + fn close(&mut self) { + let mut slot = None; + + unsafe { + uvll::uv_close(self.uv_handle() as *uvll::uv_handle_t, close_cb); + uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null::<()>()); + + do wait_until_woken_after(&mut slot) { + uvll::set_data_for_uv_handle(self.uv_handle(), &slot); + } + } + + extern fn close_cb(handle: *uvll::uv_handle_t) { + unsafe { + let data = uvll::get_data_for_uv_handle(handle); + uvll::free_handle(handle); + if data == ptr::null() { return } + let slot: &mut Option = cast::transmute(data); + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(slot.take_unwrap()); + } + } + } +} + +pub struct ForbidUnwind { + msg: &'static str, + failing_before: bool, +} + +impl ForbidUnwind { + fn new(s: &'static str) -> ForbidUnwind { + ForbidUnwind { + msg: s, failing_before: task::failing(), + } + } +} + +impl Drop for ForbidUnwind { + fn drop(&mut self) { + assert!(self.failing_before == task::failing(), + "failing sadface {}", self.msg); + } +} + +fn wait_until_woken_after(slot: *mut Option, f: &fn()) { + let _f = ForbidUnwind::new("wait_until_woken_after"); + unsafe { + assert!((*slot).is_none()); + let sched: ~Scheduler = Local::take(); + do sched.deschedule_running_task_and_then |_, task| { + f(); + *slot = Some(task); + } + } } pub struct Request { handle: *uvll::uv_req_t, + priv defused: bool, } impl Request { pub fn new(ty: uvll::uv_req_type) -> Request { - Request::wrap(unsafe { uvll::malloc_req(ty) }) + unsafe { + let handle = uvll::malloc_req(ty); + uvll::set_data_for_req(handle, null::<()>()); + Request::wrap(handle) + } } pub fn wrap(handle: *uvll::uv_req_t) -> Request { - Request { handle: handle } + Request { handle: handle, defused: false } } pub fn set_data(&self, t: *T) { unsafe { uvll::set_data_for_req(self.handle, t) } } - pub fn get_data(&self) -> *c_void { - unsafe { uvll::get_data_for_req(self.handle) } + pub unsafe fn get_data(&self) -> &'static mut T { + let data = uvll::get_data_for_req(self.handle); + assert!(data != null()); + cast::transmute(data) } // This function should be used when the request handle has been given to an @@ -155,17 +222,15 @@ impl Request { // This is still a problem in blocking situations due to linked failure. In // the connection callback the handle should be re-wrapped with the `wrap` // function to ensure its destruction. - pub fn defuse(mut self) { - self.handle = ptr::null(); + pub fn defuse(&mut self) { + self.defused = true; } } impl Drop for Request { fn drop(&mut self) { - unsafe { - if self.handle != ptr::null() { - uvll::free_req(self.handle) - } + if !self.defused { + unsafe { uvll::free_req(self.handle) } } } } @@ -300,23 +365,18 @@ pub fn slice_to_uv_buf(v: &[u8]) -> Buf { uvll::uv_buf_t { base: data, len: v.len() as uvll::uv_buf_len_t } } -fn run_uv_loop(f: proc(&mut Loop)) { - use std::rt::local::Local; - use std::rt::test::run_in_uv_task; - use std::rt::sched::Scheduler; - use std::cell::Cell; - - let f = Cell::new(f); - do run_in_uv_task { - let mut io = None; - do Local::borrow |sched: &mut Scheduler| { - sched.event_loop.io(|i| unsafe { +#[cfg(test)] +fn local_loop() -> &'static mut Loop { + unsafe { + cast::transmute(do Local::borrow |sched: &mut Scheduler| { + let mut io = None; + do sched.event_loop.io |i| { let (_vtable, uvio): (uint, &'static mut uvio::UvIoFactory) = cast::transmute(i); io = Some(uvio); - }); - } - f.take()(io.unwrap().uv_loop()); + } + io.unwrap() + }.uv_loop()) } } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 5d228cd78486b..bf5f6c88527e2 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -19,11 +19,13 @@ use std::rt::rtio; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; use std::str; +use std::task; use std::vec; use stream::StreamWatcher; use super::{Loop, Request, UvError, Buf, status_to_io_result, - uv_error_to_io_error, UvHandle, slice_to_uv_buf}; + uv_error_to_io_error, UvHandle, slice_to_uv_buf, + wait_until_woken_after}; use uvio::HomingIO; use uvll; @@ -206,46 +208,46 @@ impl TcpWatcher { { struct Ctx { status: c_int, task: Option } - let tcp = TcpWatcher::new(loop_); - let ret = do socket_addr_as_uv_socket_addr(address) |addr| { - let req = Request::new(uvll::UV_CONNECT); - let result = match addr { - UvIpv4SocketAddr(addr) => unsafe { - uvll::tcp_connect(req.handle, tcp.handle, addr, - connect_cb) - }, - UvIpv6SocketAddr(addr) => unsafe { - uvll::tcp_connect6(req.handle, tcp.handle, addr, - connect_cb) - }, - }; - match result { - 0 => { - let mut cx = Ctx { status: 0, task: None }; - req.set_data(&cx); - req.defuse(); - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - cx.task = Some(task); - } - match cx.status { - 0 => Ok(()), - n => Err(UvError(n)), + return do task::unkillable { + let tcp = TcpWatcher::new(loop_); + let ret = do socket_addr_as_uv_socket_addr(address) |addr| { + let mut req = Request::new(uvll::UV_CONNECT); + let result = match addr { + UvIpv4SocketAddr(addr) => unsafe { + uvll::tcp_connect(req.handle, tcp.handle, addr, + connect_cb) + }, + UvIpv6SocketAddr(addr) => unsafe { + uvll::tcp_connect6(req.handle, tcp.handle, addr, + connect_cb) + }, + }; + match result { + 0 => { + req.defuse(); // uv callback now owns this request + let mut cx = Ctx { status: 0, task: None }; + do wait_until_woken_after(&mut cx.task) { + req.set_data(&cx); + } + match cx.status { + 0 => Ok(()), + n => Err(UvError(n)), + } } + n => Err(UvError(n)) } - n => Err(UvError(n)) - } - }; + }; - return match ret { - Ok(()) => Ok(tcp), - Err(e) => Err(e), + match ret { + Ok(()) => Ok(tcp), + Err(e) => Err(e), + } }; extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { let req = Request::wrap(req); - if status == uvll::ECANCELED { return } - let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; cx.status = status; let scheduler: ~Scheduler = Local::take(); scheduler.resume_blocked_task_immediately(cx.task.take_unwrap()); @@ -310,10 +312,14 @@ impl rtio::RtioTcpStream for TcpWatcher { } } +impl UvHandle for TcpWatcher { + fn uv_handle(&self) -> *uvll::uv_tcp_t { self.stream.handle } +} + impl Drop for TcpWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); - self.stream.close(); + self.close(); } } @@ -323,25 +329,27 @@ impl TcpListener { pub fn bind(loop_: &mut Loop, address: SocketAddr) -> Result<~TcpListener, UvError> { - let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; - assert_eq!(unsafe { - uvll::uv_tcp_init(loop_.handle, handle) - }, 0); - let l = ~TcpListener { - home: get_handle_to_current_scheduler!(), - handle: handle, - closing_task: None, - outgoing: Tube::new(), - }; - let res = socket_addr_as_uv_socket_addr(address, |addr| unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_bind(l.handle, addr), - UvIpv6SocketAddr(addr) => uvll::tcp_bind6(l.handle, addr), + do task::unkillable { + let handle = unsafe { uvll::malloc_handle(uvll::UV_TCP) }; + assert_eq!(unsafe { + uvll::uv_tcp_init(loop_.handle, handle) + }, 0); + let l = ~TcpListener { + home: get_handle_to_current_scheduler!(), + handle: handle, + closing_task: None, + outgoing: Tube::new(), + }; + let res = socket_addr_as_uv_socket_addr(address, |addr| unsafe { + match addr { + UvIpv4SocketAddr(addr) => uvll::tcp_bind(l.handle, addr), + UvIpv6SocketAddr(addr) => uvll::tcp_bind6(l.handle, addr), + } + }); + match res { + 0 => Ok(l.install()), + n => Err(UvError(n)) } - }); - match res { - 0 => Ok(l.install()), - n => Err(UvError(n)) } } } @@ -380,6 +388,7 @@ impl rtio::RtioTcpListener for TcpListener { } extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { + assert!(status != uvll::ECANCELED); let msg = match status { 0 => { let loop_ = Loop::wrap(unsafe { @@ -389,7 +398,6 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { assert_eq!(unsafe { uvll::uv_accept(server, client.handle) }, 0); Ok(~client as ~rtio::RtioTcpStream) } - uvll::ECANCELED => return, n => Err(uv_error_to_io_error(UvError(n))) }; @@ -399,12 +407,8 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: c_int) { impl Drop for TcpListener { fn drop(&mut self) { - let (_m, sched) = self.fire_homing_missile_sched(); - - do sched.deschedule_running_task_and_then |_, task| { - self.closing_task = Some(task); - unsafe { uvll::uv_close(self.handle, listener_close_cb) } - } + let _m = self.fire_homing_missile(); + self.close(); } } @@ -463,26 +467,34 @@ impl UdpWatcher { pub fn bind(loop_: &Loop, address: SocketAddr) -> Result { - let udp = UdpWatcher { - handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, - home: get_handle_to_current_scheduler!(), - }; - assert_eq!(unsafe { - uvll::uv_udp_init(loop_.handle, udp.handle) - }, 0); - let result = socket_addr_as_uv_socket_addr(address, |addr| unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::udp_bind(udp.handle, addr, 0u32), - UvIpv6SocketAddr(addr) => uvll::udp_bind6(udp.handle, addr, 0u32), + do task::unkillable { + let udp = UdpWatcher { + handle: unsafe { uvll::malloc_handle(uvll::UV_UDP) }, + home: get_handle_to_current_scheduler!(), + }; + assert_eq!(unsafe { + uvll::uv_udp_init(loop_.handle, udp.handle) + }, 0); + let result = socket_addr_as_uv_socket_addr(address, |addr| unsafe { + match addr { + UvIpv4SocketAddr(addr) => + uvll::udp_bind(udp.handle, addr, 0u32), + UvIpv6SocketAddr(addr) => + uvll::udp_bind6(udp.handle, addr, 0u32), + } + }); + match result { + 0 => Ok(udp), + n => Err(UvError(n)), } - }); - match result { - 0 => Ok(udp), - n => Err(UvError(n)), } } } +impl UvHandle for UdpWatcher { + fn uv_handle(&self) -> *uvll::uv_udp_t { self.handle } +} + impl HomingIO for UdpWatcher { fn home<'r>(&'r mut self) -> &'r mut SchedHandle { &mut self.home } } @@ -505,7 +517,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { } let _m = self.fire_homing_missile(); - return match unsafe { + let a = match unsafe { uvll::uv_udp_recv_start(self.handle, alloc_cb, recv_cb) } { 0 => { @@ -514,10 +526,8 @@ impl rtio::RtioUdpSocket for UdpWatcher { buf: Some(slice_to_uv_buf(buf)), result: None, }; - unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) } - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - cx.task = Some(task); + do wait_until_woken_after(&mut cx.task) { + unsafe { uvll::set_data_for_uv_handle(self.handle, &cx) } } match cx.result.take_unwrap() { (n, _) if n < 0 => @@ -527,23 +537,30 @@ impl rtio::RtioUdpSocket for UdpWatcher { } n => Err(uv_error_to_io_error(UvError(n))) }; + return a; extern fn alloc_cb(handle: *uvll::uv_udp_t, _suggested_size: size_t) -> Buf { let cx: &mut Ctx = unsafe { cast::transmute(uvll::get_data_for_uv_handle(handle)) }; - cx.buf.take().expect("alloc_cb called more than once") + cx.buf.take().expect("recv alloc_cb called more than once") } - extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, _buf: Buf, + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, addr: *uvll::sockaddr, _flags: c_uint) { + assert!(nread != uvll::ECANCELED as ssize_t); + let cx: &mut Ctx = unsafe { + cast::transmute(uvll::get_data_for_uv_handle(handle)) + }; // When there's no data to read the recv callback can be a no-op. // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring // this we just drop back to kqueue and wait for the next callback. - if nread == 0 { return } - if nread == uvll::ECANCELED as ssize_t { return } + if nread == 0 { + cx.buf = Some(buf); + return + } unsafe { assert_eq!(uvll::uv_udp_recv_stop(handle), 0) @@ -566,7 +583,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { let _m = self.fire_homing_missile(); - let req = Request::new(uvll::UV_UDP_SEND); + let mut req = Request::new(uvll::UV_UDP_SEND); let buf = slice_to_uv_buf(buf); let result = socket_addr_as_uv_socket_addr(dst, |dst| unsafe { match dst { @@ -579,15 +596,11 @@ impl rtio::RtioUdpSocket for UdpWatcher { return match result { 0 => { + req.defuse(); // uv callback now owns this request let mut cx = Ctx { task: None, result: 0 }; - req.set_data(&cx); - req.defuse(); - - let sched: ~Scheduler = Local::take(); - do sched.deschedule_running_task_and_then |_, task| { - cx.task = Some(task); + do wait_until_woken_after(&mut cx.task) { + req.set_data(&cx); } - match cx.result { 0 => Ok(()), n => Err(uv_error_to_io_error(UvError(n))) @@ -598,7 +611,8 @@ impl rtio::RtioUdpSocket for UdpWatcher { extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { let req = Request::wrap(req); - let cx: &mut Ctx = unsafe { cast::transmute(req.get_data()) }; + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; cx.result = status; let sched: ~Scheduler = Local::take(); @@ -679,24 +693,8 @@ impl rtio::RtioUdpSocket for UdpWatcher { impl Drop for UdpWatcher { fn drop(&mut self) { // Send ourselves home to close this handle (blocking while doing so). - let (_m, sched) = self.fire_homing_missile_sched(); - let mut slot = None; - unsafe { - uvll::set_data_for_uv_handle(self.handle, &slot); - uvll::uv_close(self.handle, close_cb); - } - do sched.deschedule_running_task_and_then |_, task| { - slot = Some(task); - } - - extern fn close_cb(handle: *uvll::uv_handle_t) { - let slot: &mut Option = unsafe { - cast::transmute(uvll::get_data_for_uv_handle(handle)) - }; - unsafe { uvll::free_handle(handle) } - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(slot.take_unwrap()); - } + let _m = self.fire_homing_missile(); + self.close(); } } @@ -714,397 +712,357 @@ mod test { use std::task; use super::*; - use super::super::{Loop, run_uv_loop}; + use super::super::local_loop; #[test] fn connect_close_ip4() { - do run_uv_loop |l| { - match TcpWatcher::connect(l, next_test_ip4()) { - Ok(*) => fail!(), - Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), - } + match TcpWatcher::connect(local_loop(), next_test_ip4()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } } #[test] fn connect_close_ip6() { - do run_uv_loop |l| { - match TcpWatcher::connect(l, next_test_ip6()) { - Ok(*) => fail!(), - Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), - } + match TcpWatcher::connect(local_loop(), next_test_ip6()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"ECONNREFUSED"), } } #[test] fn udp_bind_close_ip4() { - do run_uv_loop |l| { - match UdpWatcher::bind(l, next_test_ip4()) { - Ok(*) => {} - Err(*) => fail!() - } + match UdpWatcher::bind(local_loop(), next_test_ip4()) { + Ok(*) => {} + Err(*) => fail!() } } #[test] fn udp_bind_close_ip6() { - do run_uv_loop |l| { - match UdpWatcher::bind(l, next_test_ip6()) { - Ok(*) => {} - Err(*) => fail!() - } + match UdpWatcher::bind(local_loop(), next_test_ip6()) { + Ok(*) => {} + Err(*) => fail!() } } #[test] fn listen_ip4() { - do run_uv_loop |l| { - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - let addr = next_test_ip4(); - - let handle = l.handle; - do spawn { - let w = match TcpListener::bind(&mut Loop::wrap(handle), addr) { - Ok(w) => w, Err(e) => fail!("{:?}", e) - }; - let mut w = match w.listen() { - Ok(w) => w, Err(e) => fail!("{:?}", e), - }; - chan.take().send(()); - match w.accept() { - Ok(mut stream) => { - let mut buf = [0u8, ..10]; - match stream.read(buf) { - Ok(10) => {} e => fail!("{:?}", e), - } - for i in range(0, 10u8) { - assert_eq!(buf[i], i + 1); - } - } - Err(e) => fail!("{:?}", e) - } - } + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); - port.recv(); - let mut w = match TcpWatcher::connect(&mut Loop::wrap(handle), addr) { + do spawn { + let w = match TcpListener::bind(local_loop(), addr) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; - match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { - Ok(()) => {}, Err(e) => fail!("{:?}", e) + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), + }; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); + } + } + Err(e) => fail!("{:?}", e) } } + + port.recv(); + let mut w = match TcpWatcher::connect(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } } #[test] fn listen_ip6() { - do run_uv_loop |l| { - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - let addr = next_test_ip6(); - - let handle = l.handle; - do spawn { - let w = match TcpListener::bind(&mut Loop::wrap(handle), addr) { - Ok(w) => w, Err(e) => fail!("{:?}", e) - }; - let mut w = match w.listen() { - Ok(w) => w, Err(e) => fail!("{:?}", e), - }; - chan.take().send(()); - match w.accept() { - Ok(mut stream) => { - let mut buf = [0u8, ..10]; - match stream.read(buf) { - Ok(10) => {} e => fail!("{:?}", e), - } - for i in range(0, 10u8) { - assert_eq!(buf[i], i + 1); - } - } - Err(e) => fail!("{:?}", e) - } - } + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip6(); - port.recv(); - let mut w = match TcpWatcher::connect(&mut Loop::wrap(handle), addr) { + do spawn { + let w = match TcpListener::bind(local_loop(), addr) { Ok(w) => w, Err(e) => fail!("{:?}", e) }; - match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { - Ok(()) => {}, Err(e) => fail!("{:?}", e) + let mut w = match w.listen() { + Ok(w) => w, Err(e) => fail!("{:?}", e), + }; + chan.take().send(()); + match w.accept() { + Ok(mut stream) => { + let mut buf = [0u8, ..10]; + match stream.read(buf) { + Ok(10) => {} e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); + } + } + Err(e) => fail!("{:?}", e) } } + + port.recv(); + let mut w = match TcpWatcher::connect(local_loop(), addr) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.write([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) + } } #[test] fn udp_recv_ip4() { - do run_uv_loop |l| { - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - let client = next_test_ip4(); - let server = next_test_ip4(); - - let handle = l.handle; - do spawn { - match UdpWatcher::bind(&mut Loop::wrap(handle), server) { - Ok(mut w) => { - chan.take().send(()); - let mut buf = [0u8, ..10]; - match w.recvfrom(buf) { - Ok((10, addr)) => assert_eq!(addr, client), - e => fail!("{:?}", e), - } - for i in range(0, 10u8) { - assert_eq!(buf[i], i + 1); - } + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip4(); + let server = next_test_ip4(); + + do spawn { + match UdpWatcher::bind(local_loop(), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } - Err(e) => fail!("{:?}", e) } + Err(e) => fail!("{:?}", e) } + } - port.recv(); - let mut w = match UdpWatcher::bind(&mut Loop::wrap(handle), client) { - Ok(w) => w, Err(e) => fail!("{:?}", e) - }; - match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { - Ok(()) => {}, Err(e) => fail!("{:?}", e) - } + port.recv(); + let mut w = match UdpWatcher::bind(local_loop(), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn udp_recv_ip6() { - do run_uv_loop |l| { - let (port, chan) = oneshot(); - let chan = Cell::new(chan); - let client = next_test_ip6(); - let server = next_test_ip6(); - - let handle = l.handle; - do spawn { - match UdpWatcher::bind(&mut Loop::wrap(handle), server) { - Ok(mut w) => { - chan.take().send(()); - let mut buf = [0u8, ..10]; - match w.recvfrom(buf) { - Ok((10, addr)) => assert_eq!(addr, client), - e => fail!("{:?}", e), - } - for i in range(0, 10u8) { - assert_eq!(buf[i], i + 1); - } + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let client = next_test_ip6(); + let server = next_test_ip6(); + + do spawn { + match UdpWatcher::bind(local_loop(), server) { + Ok(mut w) => { + chan.take().send(()); + let mut buf = [0u8, ..10]; + match w.recvfrom(buf) { + Ok((10, addr)) => assert_eq!(addr, client), + e => fail!("{:?}", e), + } + for i in range(0, 10u8) { + assert_eq!(buf[i], i + 1); } - Err(e) => fail!("{:?}", e) } + Err(e) => fail!("{:?}", e) } + } - port.recv(); - let mut w = match UdpWatcher::bind(&mut Loop::wrap(handle), client) { - Ok(w) => w, Err(e) => fail!("{:?}", e) - }; - match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { - Ok(()) => {}, Err(e) => fail!("{:?}", e) - } + port.recv(); + let mut w = match UdpWatcher::bind(local_loop(), client) { + Ok(w) => w, Err(e) => fail!("{:?}", e) + }; + match w.sendto([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], server) { + Ok(()) => {}, Err(e) => fail!("{:?}", e) } } #[test] fn test_read_read_read() { - do run_uv_loop |l| { - let addr = next_test_ip4(); - static MAX: uint = 500000; - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - let handle = l.handle; - do spawntask { - let l = &mut Loop::wrap(handle); - let listener = TcpListener::bind(l, addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - chan.take().send(()); - let mut stream = acceptor.accept().unwrap(); - let buf = [1, .. 2048]; - let mut total_bytes_written = 0; - while total_bytes_written < MAX { - stream.write(buf); - total_bytes_written += buf.len(); - } + use std::rt::rtio::*; + let addr = next_test_ip4(); + static MAX: uint = 5000; + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawn { + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + chan.take().send(()); + let mut stream = acceptor.accept().unwrap(); + let buf = [1, .. 2048]; + let mut total_bytes_written = 0; + while total_bytes_written < MAX { + assert!(stream.write(buf).is_ok()); + uvdebug!("wrote bytes"); + total_bytes_written += buf.len(); } + } - do spawntask { - let l = &mut Loop::wrap(handle); - port.take().recv(); - let mut stream = TcpWatcher::connect(l, addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - uvdebug!("read {} bytes", nread); - total_bytes_read += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } + do spawn { + port.take().recv(); + let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); + let mut buf = [0, .. 2048]; + let mut total_bytes_read = 0; + while total_bytes_read < MAX { + let nread = stream.read(buf).unwrap(); + total_bytes_read += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } - uvdebug!("read {} bytes total", total_bytes_read); } + uvdebug!("read {} bytes total", total_bytes_read); } } #[test] - #[ignore(cfg(windows))] // FIXME(#10102) the server never sees the second send fn test_udp_twice() { - do run_uv_loop |l| { - let server_addr = next_test_ip4(); - let client_addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - let handle = l.handle; - do spawntask { - let l = &mut Loop::wrap(handle); - let mut client = UdpWatcher::bind(l, client_addr).unwrap(); - port.take().recv(); - assert!(client.sendto([1], server_addr).is_ok()); - assert!(client.sendto([2], server_addr).is_ok()); - } + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); - do spawntask { - let l = &mut Loop::wrap(handle); - let mut server = UdpWatcher::bind(l, server_addr).unwrap(); - chan.take().send(()); - let mut buf1 = [0]; - let mut buf2 = [0]; - let (nread1, src1) = server.recvfrom(buf1).unwrap(); - let (nread2, src2) = server.recvfrom(buf2).unwrap(); - assert_eq!(nread1, 1); - assert_eq!(nread2, 1); - assert_eq!(src1, client_addr); - assert_eq!(src2, client_addr); - assert_eq!(buf1[0], 1); - assert_eq!(buf2[0], 2); - } + do spawn { + let mut client = UdpWatcher::bind(local_loop(), client_addr).unwrap(); + port.take().recv(); + assert!(client.sendto([1], server_addr).is_ok()); + assert!(client.sendto([2], server_addr).is_ok()); } + + let mut server = UdpWatcher::bind(local_loop(), server_addr).unwrap(); + chan.take().send(()); + let mut buf1 = [0]; + let mut buf2 = [0]; + let (nread1, src1) = server.recvfrom(buf1).unwrap(); + let (nread2, src2) = server.recvfrom(buf2).unwrap(); + assert_eq!(nread1, 1); + assert_eq!(nread2, 1); + assert_eq!(src1, client_addr); + assert_eq!(src2, client_addr); + assert_eq!(buf1[0], 1); + assert_eq!(buf2[0], 2); } #[test] fn test_udp_many_read() { - do run_uv_loop |l| { - let server_out_addr = next_test_ip4(); - let server_in_addr = next_test_ip4(); - let client_out_addr = next_test_ip4(); - let client_in_addr = next_test_ip4(); - static MAX: uint = 500_000; - - let (p1, c1) = oneshot(); - let (p2, c2) = oneshot(); - - let first = Cell::new((p1, c2)); - let second = Cell::new((p2, c1)); - - let handle = l.handle; - do spawntask { - let l = &mut Loop::wrap(handle); - let mut server_out = UdpWatcher::bind(l, server_out_addr).unwrap(); - let mut server_in = UdpWatcher::bind(l, server_in_addr).unwrap(); - let (port, chan) = first.take(); - chan.send(()); - port.recv(); - let msg = [1, .. 2048]; - let mut total_bytes_sent = 0; - let mut buf = [1]; - while buf[0] == 1 { - // send more data - assert!(server_out.sendto(msg, client_in_addr).is_ok()); - total_bytes_sent += msg.len(); - // check if the client has received enough - let res = server_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(nread, 1); - assert_eq!(src, client_out_addr); - } - assert!(total_bytes_sent >= MAX); + let server_out_addr = next_test_ip4(); + let server_in_addr = next_test_ip4(); + let client_out_addr = next_test_ip4(); + let client_in_addr = next_test_ip4(); + static MAX: uint = 500_000; + + let (p1, c1) = oneshot(); + let (p2, c2) = oneshot(); + + let first = Cell::new((p1, c2)); + let second = Cell::new((p2, c1)); + + do spawn { + let l = local_loop(); + let mut server_out = UdpWatcher::bind(l, server_out_addr).unwrap(); + let mut server_in = UdpWatcher::bind(l, server_in_addr).unwrap(); + let (port, chan) = first.take(); + chan.send(()); + port.recv(); + let msg = [1, .. 2048]; + let mut total_bytes_sent = 0; + let mut buf = [1]; + while buf[0] == 1 { + // send more data + assert!(server_out.sendto(msg, client_in_addr).is_ok()); + total_bytes_sent += msg.len(); + // check if the client has received enough + let res = server_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(nread, 1); + assert_eq!(src, client_out_addr); } + assert!(total_bytes_sent >= MAX); + } - do spawntask { - let l = &mut Loop::wrap(handle); - let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); - let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); - let (port, chan) = second.take(); - port.recv(); - chan.send(()); - let mut total_bytes_recv = 0; - let mut buf = [0, .. 2048]; - while total_bytes_recv < MAX { - // ask for more - assert!(client_out.sendto([1], server_in_addr).is_ok()); - // wait for data - let res = client_in.recvfrom(buf); - assert!(res.is_ok()); - let (nread, src) = res.unwrap(); - assert_eq!(src, server_out_addr); - total_bytes_recv += nread; - for i in range(0u, nread) { - assert_eq!(buf[i], 1); - } + do spawn { + let l = local_loop(); + let mut client_out = UdpWatcher::bind(l, client_out_addr).unwrap(); + let mut client_in = UdpWatcher::bind(l, client_in_addr).unwrap(); + let (port, chan) = second.take(); + port.recv(); + chan.send(()); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for i in range(0u, nread) { + assert_eq!(buf[i], 1); } - // tell the server we're done - assert!(client_out.sendto([0], server_in_addr).is_ok()); } + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); } } #[test] fn test_read_and_block() { - do run_uv_loop |l| { - let addr = next_test_ip4(); - let (port, chan) = oneshot(); - let port = Cell::new(port); - let chan = Cell::new(chan); - - let handle = l.handle; - do spawntask { - let l = &mut Loop::wrap(handle); - let listener = TcpListener::bind(l, addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - let (port2, chan2) = stream(); - chan.take().send(port2); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for i in range(0u, nread) { - let val = buf[i] as uint; - assert_eq!(val, current % 8); - current += 1; - } - reads += 1; + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let port = Cell::new(port); + let chan = Cell::new(chan); + + do spawn { + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let (port2, chan2) = stream(); + chan.take().send(port2); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + + let expected = 32; + let mut current = 0; + let mut reads = 0; - chan2.send(()); + while current < expected { + let nread = stream.read(buf).unwrap(); + for i in range(0u, nread) { + let val = buf[i] as uint; + assert_eq!(val, current % 8); + current += 1; } + reads += 1; - // Make sure we had multiple reads - assert!(reads > 1); + chan2.send(()); } - do spawntask { - let l = &mut Loop::wrap(handle); - let port2 = port.take().recv(); - let mut stream = TcpWatcher::connect(l, addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - port2.recv(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - port2.recv(); - } + // Make sure we had multiple reads + assert!(reads > 1); + } + + do spawn { + let port2 = port.take().recv(); + let mut stream = TcpWatcher::connect(local_loop(), addr).unwrap(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + stream.write([0, 1, 2, 3, 4, 5, 6, 7]); + port2.recv(); } } @@ -1113,27 +1071,23 @@ mod test { let addr = next_test_ip4(); do task::spawn_sched(task::SingleThreaded) { - do run_uv_loop |l| { - let listener = TcpListener::bind(l, addr).unwrap(); - let mut acceptor = listener.listen().unwrap(); - let mut stream = acceptor.accept().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert_eq!(nread, 8); - for i in range(0u, nread) { - assert_eq!(buf[i], i as u8); - } + let listener = TcpListener::bind(local_loop(), addr).unwrap(); + let mut acceptor = listener.listen().unwrap(); + let mut stream = acceptor.accept().unwrap(); + let mut buf = [0, .. 2048]; + let nread = stream.read(buf).unwrap(); + assert_eq!(nread, 8); + for i in range(0u, nread) { + assert_eq!(buf[i], i as u8); } } do task::spawn_sched(task::SingleThreaded) { - do run_uv_loop |l| { - let mut stream = TcpWatcher::connect(l, addr); - while stream.is_err() { - stream = TcpWatcher::connect(l, addr); - } - stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); + let mut stream = TcpWatcher::connect(local_loop(), addr); + while stream.is_err() { + stream = TcpWatcher::connect(local_loop(), addr); } + stream.unwrap().write([0, 1, 2, 3, 4, 5, 6, 7]); } } @@ -1149,17 +1103,13 @@ mod test { do task::spawn_sched(task::SingleThreaded) { let chan = Cell::new(chan.take()); - do run_uv_loop |l| { - let listener = UdpWatcher::bind(l, next_test_ip4()).unwrap(); - chan.take().send(listener); - } + let listener = UdpWatcher::bind(local_loop(), next_test_ip4()).unwrap(); + chan.take().send(listener); } do task::spawn_sched(task::SingleThreaded) { let port = Cell::new(port.take()); - do run_uv_loop |_l| { - port.take().recv(); - } + port.take().recv(); } } @@ -1261,4 +1211,69 @@ mod test { } } + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure1() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + w.accept(); + } + + port.recv(); + fail!(); + } + + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure2() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + let mut buf = [0]; + w.accept().unwrap().read(buf); + } + + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + + fail!(); + } + + #[should_fail] + #[test] + #[ignore(reason = "linked failure")] + fn linked_failure3() { + let (port, chan) = stream(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do spawn { + let chan = chan.take(); + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.send(()); + let mut conn = w.accept().unwrap(); + chan.send(()); + let buf = [0, ..65536]; + conn.write(buf); + } + + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + port.recv(); + fail!(); + } } diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 1f28e043dfb49..89a86a2ff7dce 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -9,7 +9,6 @@ // except according to those terms. use std::c_str::CString; -use std::cast; use std::libc; use std::rt::BlockedTask; use std::rt::io::IoError; @@ -17,9 +16,11 @@ use std::rt::local::Local; use std::rt::rtio::{RtioPipe, RtioUnixListener, RtioUnixAcceptor}; use std::rt::sched::{Scheduler, SchedHandle}; use std::rt::tube::Tube; +use std::task; use stream::StreamWatcher; -use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error}; +use super::{Loop, UvError, UvHandle, Request, uv_error_to_io_error, + wait_until_woken_after}; use uvio::HomingIO; use uvll; @@ -32,7 +33,6 @@ pub struct PipeWatcher { pub struct PipeListener { home: SchedHandle, pipe: *uvll::uv_pipe_t, - priv closing_task: Option, priv outgoing: Tube>, } @@ -74,36 +74,35 @@ impl PipeWatcher { pub fn connect(loop_: &Loop, name: &CString) -> Result { struct Ctx { task: Option, result: libc::c_int, } - let mut cx = Ctx { task: None, result: 0 }; - let req = Request::new(uvll::UV_CONNECT); - let pipe = PipeWatcher::new(loop_, false); - unsafe { - uvll::set_data_for_req(req.handle, &cx as *Ctx); - uvll::uv_pipe_connect(req.handle, - pipe.handle(), - name.with_ref(|p| p), - connect_cb) - } - req.defuse(); + return do task::unkillable { + let mut cx = Ctx { task: None, result: 0 }; + let mut req = Request::new(uvll::UV_CONNECT); + let pipe = PipeWatcher::new(loop_, false); + + do wait_until_woken_after(&mut cx.task) { + unsafe { + uvll::uv_pipe_connect(req.handle, + pipe.handle(), + name.with_ref(|p| p), + connect_cb) + } + req.set_data(&cx); + req.defuse(); // uv callback now owns this request + } + match cx.result { + 0 => Ok(pipe), + n => Err(UvError(n)) + } - let sched: ~Scheduler = Local::take(); - do sched.deschedule_running_task_and_then |_, task| { - cx.task = Some(task); - } - return match cx.result { - 0 => Ok(pipe), - n => Err(UvError(n)) }; - extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) { - let _req = Request::wrap(req); - if status == uvll::ECANCELED { return } - unsafe { - let cx: &mut Ctx = cast::transmute(uvll::get_data_for_req(req)); - cx.result = status; - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(cx.task.take_unwrap()); - } + extern fn connect_cb(req: *uvll::uv_connect_t, status: libc::c_int) {; + let req = Request::wrap(req); + assert!(status != uvll::ECANCELED); + let cx: &mut Ctx = unsafe { req.get_data() }; + cx.result = status; + let sched: ~Scheduler = Local::take(); + sched.resume_blocked_task_immediately(cx.task.take_unwrap()); } } @@ -133,11 +132,15 @@ impl HomingIO for PipeWatcher { fn home<'a>(&'a mut self) -> &'a mut SchedHandle { &mut self.home } } +impl UvHandle for PipeWatcher { + fn uv_handle(&self) -> *uvll::uv_pipe_t { self.stream.handle } +} + impl Drop for PipeWatcher { fn drop(&mut self) { if !self.defused { let _m = self.fire_homing_missile(); - self.stream.close(); + self.close(); } } } @@ -150,21 +153,24 @@ extern fn pipe_close_cb(handle: *uvll::uv_handle_t) { impl PipeListener { pub fn bind(loop_: &Loop, name: &CString) -> Result<~PipeListener, UvError> { - let pipe = PipeWatcher::new(loop_, false); - match unsafe { uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p)) } { - 0 => { - // If successful, unwrap the PipeWatcher because we control how - // we close the pipe differently. We can't rely on - // StreamWatcher's default close method. - let p = ~PipeListener { - home: get_handle_to_current_scheduler!(), - pipe: pipe.unwrap(), - closing_task: None, - outgoing: Tube::new(), - }; - Ok(p.install()) + do task::unkillable { + let pipe = PipeWatcher::new(loop_, false); + match unsafe { + uvll::uv_pipe_bind(pipe.handle(), name.with_ref(|p| p)) + } { + 0 => { + // If successful, unwrap the PipeWatcher because we control how + // we close the pipe differently. We can't rely on + // StreamWatcher's default close method. + let p = ~PipeListener { + home: get_handle_to_current_scheduler!(), + pipe: pipe.unwrap(), + outgoing: Tube::new(), + }; + Ok(p.install()) + } + n => Err(UvError(n)) } - n => Err(UvError(n)) } } } @@ -196,6 +202,7 @@ impl UvHandle for PipeListener { } extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { + assert!(status != uvll::ECANCELED); let msg = match status { 0 => { let loop_ = Loop::wrap(unsafe { @@ -205,7 +212,6 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { assert_eq!(unsafe { uvll::uv_accept(server, client.handle()) }, 0); Ok(~client as ~RtioPipe) } - uvll::ECANCELED => return, n => Err(uv_error_to_io_error(UvError(n))) }; @@ -215,23 +221,11 @@ extern fn listen_cb(server: *uvll::uv_stream_t, status: libc::c_int) { impl Drop for PipeListener { fn drop(&mut self) { - let (_m, sched) = self.fire_homing_missile_sched(); - - do sched.deschedule_running_task_and_then |_, task| { - self.closing_task = Some(task); - unsafe { uvll::uv_close(self.pipe, listener_close_cb) } - } + let _m = self.fire_homing_missile(); + self.close(); } } -extern fn listener_close_cb(handle: *uvll::uv_handle_t) { - let pipe: &mut PipeListener = unsafe { UvHandle::from_uv_handle(&handle) }; - unsafe { uvll::free_handle(handle) } - - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(pipe.closing_task.take_unwrap()); -} - // PipeAcceptor implementation and traits impl RtioUnixAcceptor for PipeAcceptor { diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 15d5ae1c33ca5..17a7510aa19b7 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -19,7 +19,8 @@ use std::rt::rtio::RtioProcess; use std::rt::sched::{Scheduler, SchedHandle}; use std::vec; -use super::{Loop, UvHandle, UvError, uv_error_to_io_error}; +use super::{Loop, UvHandle, UvError, uv_error_to_io_error, + wait_until_woken_after}; use uvio::HomingIO; use uvll; use pipe::PipeWatcher; @@ -222,11 +223,7 @@ impl RtioProcess for Process { // If there's no exit code previously listed, then the // process's exit callback has yet to be invoked. We just // need to deschedule ourselves and wait to be reawoken. - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - assert!(self.to_wake.is_none()); - self.to_wake = Some(task); - } + wait_until_woken_after(&mut self.to_wake, || {}); assert!(self.exit_status.is_some()); } } diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index 745cb5a6fa090..b9ccacf4df707 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -9,12 +9,14 @@ // except according to those terms. use std::cast; -use std::libc::{c_int, size_t, ssize_t, c_void}; +use std::libc::{c_int, size_t, ssize_t}; +use std::ptr; use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::sched::Scheduler; -use super::{UvError, Buf, slice_to_uv_buf, Request}; +use super::{UvError, Buf, slice_to_uv_buf, Request, wait_until_woken_after, + ForbidUnwind}; use uvll; // This is a helper structure which is intended to get embedded into other @@ -63,6 +65,10 @@ impl StreamWatcher { } pub fn read(&mut self, buf: &mut [u8]) -> Result { + // This read operation needs to get canceled on an unwind via libuv's + // uv_read_stop function + let _f = ForbidUnwind::new("stream read"); + // Send off the read request, but don't block until we're sure that the // read request is queued. match unsafe { @@ -74,12 +80,10 @@ impl StreamWatcher { result: 0, task: None, }; - unsafe { - uvll::set_data_for_uv_handle(self.handle, &rcx) - } - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_sched, task| { - rcx.task = Some(task); + do wait_until_woken_after(&mut rcx.task) { + unsafe { + uvll::set_data_for_uv_handle(self.handle, &rcx) + } } match rcx.result { n if n < 0 => Err(UvError(n as c_int)), @@ -91,12 +95,17 @@ impl StreamWatcher { } pub fn write(&mut self, buf: &[u8]) -> Result<(), UvError> { + // The ownership of the write request is dubious if this function + // unwinds. I believe that if the write_cb fails to re-schedule the task + // then the write request will be leaked. + let _f = ForbidUnwind::new("stream write"); + // Prepare the write request, either using a cached one or allocating a // new one - if self.last_write_req.is_none() { - self.last_write_req = Some(Request::new(uvll::UV_WRITE)); - } - let req = self.last_write_req.get_ref(); + let mut req = match self.last_write_req.take() { + Some(req) => req, None => Request::new(uvll::UV_WRITE), + }; + req.set_data(ptr::null::<()>()); // Send off the request, but be careful to not block until we're sure // that the write reqeust is queued. If the reqeust couldn't be queued, @@ -107,11 +116,12 @@ impl StreamWatcher { } { 0 => { let mut wcx = WriteContext { result: 0, task: None, }; - req.set_data(&wcx); - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_sched, task| { - wcx.task = Some(task); + req.defuse(); // uv callback now owns this request + + do wait_until_woken_after(&mut wcx.task) { + req.set_data(&wcx); } + self.last_write_req = Some(Request::wrap(req.handle)); match wcx.result { 0 => Ok(()), n => Err(UvError(n)), @@ -120,50 +130,24 @@ impl StreamWatcher { n => Err(UvError(n)), } } - - // This will deallocate an internally used memory, along with closing the - // handle (and freeing it). - pub fn close(&mut self) { - let mut closing_task = None; - unsafe { - uvll::set_data_for_uv_handle(self.handle, &closing_task); - } - - // Wait for this stream to close because it possibly represents a remote - // connection which may have consequences if we close asynchronously. - let sched: ~Scheduler = Local::take(); - do sched.deschedule_running_task_and_then |_, task| { - closing_task = Some(task); - unsafe { uvll::uv_close(self.handle, close_cb) } - } - - extern fn close_cb(handle: *uvll::uv_handle_t) { - let data: *c_void = unsafe { uvll::get_data_for_uv_handle(handle) }; - unsafe { uvll::free_handle(handle) } - - let closing_task: &mut Option = unsafe { - cast::transmute(data) - }; - let task = closing_task.take_unwrap(); - let scheduler: ~Scheduler = Local::take(); - scheduler.resume_blocked_task_immediately(task); - } - } } // This allocation callback expects to be invoked once and only once. It will // unwrap the buffer in the ReadContext stored in the stream and return it. This // will fail if it is called more than once. extern fn alloc_cb(stream: *uvll::uv_stream_t, _hint: size_t) -> Buf { + uvdebug!("alloc_cb"); let rcx: &mut ReadContext = unsafe { cast::transmute(uvll::get_data_for_uv_handle(stream)) }; - rcx.buf.take().expect("alloc_cb called more than once") + rcx.buf.take().expect("stream alloc_cb called more than once") } // When a stream has read some data, we will always forcibly stop reading and // return all the data read (even if it didn't fill the whole buffer). extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: Buf) { + uvdebug!("read_cb {}", nread); + assert!(nread != uvll::ECANCELED as ssize_t); let rcx: &mut ReadContext = unsafe { cast::transmute(uvll::get_data_for_uv_handle(handle)) }; @@ -182,11 +166,11 @@ extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: Buf) { // reading, however, all this does is wake up the blocked task after squirreling // away the error code as a result. extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { - if status == uvll::ECANCELED { return } + let mut req = Request::wrap(req); + assert!(status != uvll::ECANCELED); // Remember to not free the request because it is re-used between writes on // the same stream. - let req = Request::wrap(req); - let wcx: &mut WriteContext = unsafe { cast::transmute(req.get_data()) }; + let wcx: &mut WriteContext = unsafe { req.get_data() }; wcx.result = status; req.defuse(); diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index df35a4892e978..96cf024639f81 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -16,7 +16,7 @@ use std::rt::rtio::RtioTimer; use std::rt::sched::{Scheduler, SchedHandle}; use uvll; -use super::{Loop, UvHandle}; +use super::{Loop, UvHandle, ForbidUnwind}; use uvio::HomingIO; pub struct TimerWatcher { @@ -67,6 +67,11 @@ impl UvHandle for TimerWatcher { impl RtioTimer for TimerWatcher { fn sleep(&mut self, msecs: u64) { let (_m, sched) = self.fire_homing_missile_sched(); + + // If the descheduling operation unwinds after the timer has been + // started, then we need to call stop on the timer. + let _f = ForbidUnwind::new("timer"); + do sched.deschedule_running_task_and_then |_sched, task| { self.action = Some(WakeTask(task)); self.start(msecs, 0); @@ -124,51 +129,43 @@ impl Drop for TimerWatcher { mod test { use super::*; use std::rt::rtio::RtioTimer; - use super::super::run_uv_loop; + use super::super::local_loop; #[test] fn oneshot() { - do run_uv_loop |l| { - let mut timer = TimerWatcher::new(l); - let port = timer.oneshot(1); - port.recv(); - let port = timer.oneshot(1); - port.recv(); - } + let mut timer = TimerWatcher::new(local_loop()); + let port = timer.oneshot(1); + port.recv(); + let port = timer.oneshot(1); + port.recv(); } #[test] fn override() { - do run_uv_loop |l| { - let mut timer = TimerWatcher::new(l); - let oport = timer.oneshot(1); - let pport = timer.period(1); - timer.sleep(1); - assert_eq!(oport.try_recv(), None); - assert_eq!(pport.try_recv(), None); - timer.oneshot(1).recv(); - } + let mut timer = TimerWatcher::new(local_loop()); + let oport = timer.oneshot(1); + let pport = timer.period(1); + timer.sleep(1); + assert_eq!(oport.try_recv(), None); + assert_eq!(pport.try_recv(), None); + timer.oneshot(1).recv(); } #[test] fn period() { - do run_uv_loop |l| { - let mut timer = TimerWatcher::new(l); - let port = timer.period(1); - port.recv(); - port.recv(); - let port = timer.period(1); - port.recv(); - port.recv(); - } + let mut timer = TimerWatcher::new(local_loop()); + let port = timer.period(1); + port.recv(); + port.recv(); + let port = timer.period(1); + port.recv(); + port.recv(); } #[test] fn sleep() { - do run_uv_loop |l| { - let mut timer = TimerWatcher::new(l); - timer.sleep(1); - timer.sleep(1); - } + let mut timer = TimerWatcher::new(local_loop()); + timer.sleep(1); + timer.sleep(1); } } diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index c072ab5156121..04e406ce987e9 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -103,6 +103,6 @@ impl HomingIO for TtyWatcher { impl Drop for TtyWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); - self.stream.close(); + self.close(); } } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 6ae2c174e18b4..75ec5f26b336c 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -9,7 +9,7 @@ // except according to those terms. use std::c_str::CString; -use std::comm::{SharedChan, GenericChan}; +use std::comm::SharedChan; use std::libc::c_int; use std::libc; use std::path::Path; @@ -26,7 +26,7 @@ use std::libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, use std::rt::io::{FileMode, FileAccess, Open, Append, Truncate, Read, Write, ReadWrite, FileStat}; use std::rt::io::signal::Signum; -use std::task; +use std::util; use ai = std::rt::io::net::addrinfo; #[cfg(test)] use std::unstable::run_in_bare_thread; @@ -44,6 +44,13 @@ pub trait HomingIO { fn go_to_IO_home(&mut self) -> uint { use std::rt::sched::RunOnce; + unsafe { + let task: *mut Task = Local::unsafe_borrow(); + (*task).death.inhibit_kill((*task).unwinder.unwinding); + } + + let _f = ForbidUnwind::new("going home"); + let current_sched_id = do Local::borrow |sched: &mut Scheduler| { sched.sched_id() }; @@ -51,22 +58,17 @@ pub trait HomingIO { // Only need to invoke a context switch if we're not on the right // scheduler. if current_sched_id != self.home().sched_id { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - /* FIXME(#8674) if the task was already killed then wake - * will return None. In that case, the home pointer will - * never be set. - * - * RESOLUTION IDEA: Since the task is dead, we should - * just abort the IO action. - */ - do task.wake().map |task| { - self.home().send(RunOnce(task)); - }; - } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + do task.wake().map |task| { + self.home().send(RunOnce(task)); + }; } } + let current_sched_id = do Local::borrow |sched: &mut Scheduler| { + sched.sched_id() + }; + assert!(current_sched_id == self.home().sched_id); self.home().sched_id } @@ -98,25 +100,38 @@ struct HomingMissile { priv io_home: uint, } +impl HomingMissile { + pub fn check(&self, msg: &'static str) { + let local_id = Local::borrow(|sched: &mut Scheduler| sched.sched_id()); + assert!(local_id == self.io_home, "{}", msg); + } +} + impl Drop for HomingMissile { fn drop(&mut self) { + let f = ForbidUnwind::new("leaving home"); + // It would truly be a sad day if we had moved off the home I/O // scheduler while we were doing I/O. - assert_eq!(Local::borrow(|sched: &mut Scheduler| sched.sched_id()), - self.io_home); + self.check("task moved away from the home scheduler"); // If we were a homed task, then we must send ourselves back to the // original scheduler. Otherwise, we can just return and keep running if !Task::on_appropriate_sched() { - do task::unkillable { // FIXME(#8674) - let scheduler: ~Scheduler = Local::take(); - do scheduler.deschedule_running_task_and_then |_, task| { - do task.wake().map |task| { - Scheduler::run_task(task); - }; - } + let scheduler: ~Scheduler = Local::take(); + do scheduler.deschedule_running_task_and_then |_, task| { + do task.wake().map |task| { + Scheduler::run_task(task); + }; } } + + util::ignore(f); + + unsafe { + let task: *mut Task = Local::unsafe_borrow(); + (*task).death.allow_kill((*task).unwinder.unwinding); + } } } From b652bbc6700e36c9ad80105c89d7fc2e3afec111 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 7 Nov 2013 15:24:30 -0800 Subject: [PATCH 23/27] Fall back from uv tty instances more aggressively It appears that uv's support for interacting with a stdio stream as a tty when it's actually a pipe is pretty problematic. To get around this, promote a check to see if the stream is a tty to the top of the tty constructor, and bail out quickly if it's not identified as a tty. Closes #10237 --- src/librustuv/tty.rs | 20 +++++++++++++++----- src/librustuv/uvll.rs | 2 ++ src/libstd/rt/io/stdio.rs | 6 ++---- src/libstd/rt/rtio.rs | 1 - 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 04e406ce987e9..4853973f1a307 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -30,8 +30,22 @@ impl TtyWatcher { pub fn new(loop_: &Loop, fd: libc::c_int, readable: bool) -> Result { - let handle = UvHandle::alloc(None::, uvll::UV_TTY); + // libuv may succeed in giving us a handle (via uv_tty_init), but if the + // handle isn't actually connected to a terminal there are frequently + // many problems in using it with libuv. To get around this, always + // return a failure if the specified file descriptor isn't actually a + // TTY. + // + // Related: + // - https://github.com/joyent/libuv/issues/982 + // - https://github.com/joyent/libuv/issues/988 + if unsafe { uvll::guess_handle(fd) != uvll::UV_TTY as libc::c_int } { + return Err(UvError(uvll::EBADF)); + } + // If this file descriptor is indeed guessed to be a tty, then go ahead + // with attempting to open it as a tty. + let handle = UvHandle::alloc(None::, uvll::UV_TTY); match unsafe { uvll::uv_tty_init(loop_.handle, handle, fd as libc::c_int, readable as libc::c_int) @@ -86,10 +100,6 @@ impl RtioTTY for TtyWatcher { n => Err(uv_error_to_io_error(UvError(n))) } } - - fn isatty(&self) -> bool { - unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as libc::c_int } - } } impl UvHandle for TtyWatcher { diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index d009201e8409a..58d182a22c30e 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -54,6 +54,7 @@ pub mod errors { pub static EPIPE: c_int = -4048; pub static ECONNABORTED: c_int = -4080; pub static ECANCELED: c_int = -4082; + pub static EBADF: c_int = -4084; } #[cfg(not(windows))] pub mod errors { @@ -67,6 +68,7 @@ pub mod errors { pub static EPIPE: c_int = -libc::EPIPE; pub static ECONNABORTED: c_int = -libc::ECONNABORTED; pub static ECANCELED : c_int = -libc::ECANCELED; + pub static EBADF : c_int = -libc::EBADF; } pub static PROCESS_SETUID: c_int = 1 << 0; diff --git a/src/libstd/rt/io/stdio.rs b/src/libstd/rt/io/stdio.rs index d33821a34b1ee..674b34639bc90 100644 --- a/src/libstd/rt/io/stdio.rs +++ b/src/libstd/rt/io/stdio.rs @@ -277,12 +277,10 @@ impl StdWriter { } } - /// Returns whether this tream is attached to a TTY instance or not. - /// - /// This is similar to libc's isatty() function + /// Returns whether this stream is attached to a TTY instance or not. pub fn isatty(&self) -> bool { match self.inner { - TTY(ref tty) => tty.isatty(), + TTY(*) => true, File(*) => false, } } diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 1e12da8645ce7..d623914cdadc9 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -222,7 +222,6 @@ pub trait RtioTTY { fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; fn set_raw(&mut self, raw: bool) -> Result<(), IoError>; fn get_winsize(&mut self) -> Result<(int, int), IoError>; - fn isatty(&self) -> bool; } pub trait PausibleIdleCallback { From 3a3eefc5c3ce95de3001d8ee830296345c2f6bc9 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 7 Nov 2013 15:26:47 -0800 Subject: [PATCH 24/27] Update to the latest libuv At this time, also point the libuv submodule to the official repo instead of my own off to the side. cc #10246 Closes #10329 --- .gitmodules | 2 +- mk/rt.mk | 2 +- src/librustuv/addrinfo.rs | 3 +- src/librustuv/lib.rs | 2 +- src/librustuv/net.rs | 221 +++++++++++++++----------------------- src/librustuv/process.rs | 4 +- src/librustuv/stream.rs | 13 +-- src/librustuv/uvll.rs | 216 ++++++++----------------------------- src/libuv | 2 +- src/rt/rust_uv.cpp | 177 +++++------------------------- 10 files changed, 172 insertions(+), 470 deletions(-) diff --git a/.gitmodules b/.gitmodules index 7e997334cecaf..a861cf7997866 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,7 +4,7 @@ branch = master [submodule "src/libuv"] path = src/libuv - url = https://github.com/alexcrichton/libuv.git + url = https://github.com/joyent/libuv.git branch = master [submodule "src/gyp"] path = src/gyp diff --git a/mk/rt.mk b/mk/rt.mk index 3d5e9cbcb82a1..55187fad49db0 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -207,7 +207,7 @@ LIBUV_MAKEFILE_$(1) := $$(CFG_BUILD_DIR)$$(RT_OUTPUT_DIR_$(1))/libuv/Makefile $$(LIBUV_MAKEFILE_$(1)): $$(LIBUV_DEPS) (cd $(S)src/libuv/ && \ - $$(CFG_PYTHON) ./gyp_uv -f make -Dtarget_arch=$$(LIBUV_ARCH_$(1)) \ + $$(CFG_PYTHON) ./gyp_uv.py -f make -Dtarget_arch=$$(LIBUV_ARCH_$(1)) \ -D ninja \ -DOS=$$(LIBUV_OSTYPE_$(1)) \ -Goutput_dir=$$(@D) --generator-output $$(@D)) diff --git a/src/librustuv/addrinfo.rs b/src/librustuv/addrinfo.rs index 56f6eda53575c..601cc9f84add0 100644 --- a/src/librustuv/addrinfo.rs +++ b/src/librustuv/addrinfo.rs @@ -141,8 +141,7 @@ pub fn accum_addrinfo(addr: &Addrinfo) -> ~[ai::Info] { let mut addrs = ~[]; loop { - let uvaddr = net::sockaddr_to_UvSocketAddr((*addr).ai_addr); - let rustaddr = net::uv_socket_addr_to_socket_addr(uvaddr); + let rustaddr = net::sockaddr_to_socket_addr((*addr).ai_addr); let mut flags = 0; do each_ai_flag |cval, aival| { diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 4da5ad4275f79..7c84ccb8f2cd7 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -47,7 +47,7 @@ via `close` and `delete` methods. use std::cast::transmute; use std::cast; -use std::libc::{c_int, malloc, free}; +use std::libc::{c_int, malloc}; use std::ptr::null; use std::ptr; use std::rt::BlockedTask; diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index bf5f6c88527e2..10b8f50e387ce 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -9,6 +9,7 @@ // except according to those terms. use std::cast; +use std::libc; use std::libc::{size_t, ssize_t, c_int, c_void, c_uint, c_char}; use std::ptr; use std::rt::BlockedTask; @@ -28,87 +29,61 @@ use super::{Loop, Request, UvError, Buf, status_to_io_result, wait_until_woken_after}; use uvio::HomingIO; use uvll; +use uvll::sockaddr; //////////////////////////////////////////////////////////////////////////////// /// Generic functions related to dealing with sockaddr things //////////////////////////////////////////////////////////////////////////////// -pub enum UvSocketAddr { - UvIpv4SocketAddr(*uvll::sockaddr_in), - UvIpv6SocketAddr(*uvll::sockaddr_in6), -} - -pub fn sockaddr_to_UvSocketAddr(addr: *uvll::sockaddr) -> UvSocketAddr { - unsafe { - assert!((uvll::is_ip4_addr(addr) || uvll::is_ip6_addr(addr))); - assert!(!(uvll::is_ip4_addr(addr) && uvll::is_ip6_addr(addr))); - match addr { - _ if uvll::is_ip4_addr(addr) => - UvIpv4SocketAddr(addr as *uvll::sockaddr_in), - _ if uvll::is_ip6_addr(addr) => - UvIpv6SocketAddr(addr as *uvll::sockaddr_in6), - _ => fail!(), - } - } -} - -fn socket_addr_as_uv_socket_addr(addr: SocketAddr, f: &fn(UvSocketAddr) -> T) -> T { +#[fixed_stack_segment] +fn socket_addr_as_sockaddr(addr: SocketAddr, f: &fn(*sockaddr) -> T) -> T { let malloc = match addr.ip { - Ipv4Addr(*) => uvll::malloc_ip4_addr, - Ipv6Addr(*) => uvll::malloc_ip6_addr, - }; - let wrap = match addr.ip { - Ipv4Addr(*) => UvIpv4SocketAddr, - Ipv6Addr(*) => UvIpv6SocketAddr, - }; - let free = match addr.ip { - Ipv4Addr(*) => uvll::free_ip4_addr, - Ipv6Addr(*) => uvll::free_ip6_addr, + Ipv4Addr(*) => uvll::rust_malloc_ip4_addr, + Ipv6Addr(*) => uvll::rust_malloc_ip6_addr, }; - let addr = unsafe { malloc(addr.ip.to_str(), addr.port as int) }; + let ip = addr.ip.to_str(); + let addr = ip.with_c_str(|p| unsafe { malloc(p, addr.port as c_int) }); do (|| { - f(wrap(addr)) + f(addr) }).finally { - unsafe { free(addr) }; + unsafe { libc::free(addr) }; } } -fn uv_socket_addr_as_socket_addr(addr: UvSocketAddr, f: &fn(SocketAddr) -> T) -> T { - let ip_size = match addr { - UvIpv4SocketAddr(*) => 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/, - UvIpv6SocketAddr(*) => 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/, - }; - let ip_name = { - let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); - unsafe { +#[fixed_stack_segment] +pub fn sockaddr_to_socket_addr(addr: *sockaddr) -> SocketAddr { + unsafe { + let ip_size = if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/ + } else if uvll::rust_is_ipv6_sockaddr(addr) == 1 { + 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/ + } else { + fail!("unknown address?"); + }; + let ip_name = { + let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); let buf_ptr = vec::raw::to_ptr(buf); - match addr { - UvIpv4SocketAddr(addr) => - uvll::uv_ip4_name(addr, buf_ptr as *c_char, ip_size as size_t), - UvIpv6SocketAddr(addr) => - uvll::uv_ip6_name(addr, buf_ptr as *c_char, ip_size as size_t), + if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + uvll::uv_ip4_name(addr, buf_ptr as *c_char, ip_size as size_t); + } else { + uvll::uv_ip6_name(addr, buf_ptr as *c_char, ip_size as size_t); } + buf }; - buf - }; - let ip_port = unsafe { - let port = match addr { - UvIpv4SocketAddr(addr) => uvll::ip4_port(addr), - UvIpv6SocketAddr(addr) => uvll::ip6_port(addr), + let ip_port = { + let port = if uvll::rust_is_ipv4_sockaddr(addr) == 1 { + uvll::rust_ip4_port(addr) + } else { + uvll::rust_ip6_port(addr) + }; + port as u16 }; - port as u16 - }; - let ip_str = str::from_utf8_slice(ip_name).trim_right_chars(&'\x00'); - let ip_addr = FromStr::from_str(ip_str).unwrap(); + let ip_str = str::from_utf8_slice(ip_name).trim_right_chars(&'\x00'); + let ip_addr = FromStr::from_str(ip_str).unwrap(); - // finally run the closure - f(SocketAddr { ip: ip_addr, port: ip_port }) -} - -pub fn uv_socket_addr_to_socket_addr(addr: UvSocketAddr) -> SocketAddr { - use std::util; - uv_socket_addr_as_socket_addr(addr, util::id) + SocketAddr { ip: ip_addr, port: ip_port } + } } #[cfg(test)] @@ -116,7 +91,9 @@ pub fn uv_socket_addr_to_socket_addr(addr: UvSocketAddr) -> SocketAddr { fn test_ip4_conversion() { use std::rt; let ip4 = rt::test::next_test_ip4(); - assert_eq!(ip4, socket_addr_as_uv_socket_addr(ip4, uv_socket_addr_to_socket_addr)); + do socket_addr_as_sockaddr(ip4) |addr| { + assert_eq!(ip4, sockaddr_to_socket_addr(addr)); + } } #[cfg(test)] @@ -124,7 +101,9 @@ fn test_ip4_conversion() { fn test_ip6_conversion() { use std::rt; let ip6 = rt::test::next_test_ip6(); - assert_eq!(ip6, socket_addr_as_uv_socket_addr(ip6, uv_socket_addr_to_socket_addr)); + do socket_addr_as_sockaddr(ip6) |addr| { + assert_eq!(ip6, sockaddr_to_socket_addr(addr)); + } } enum SocketNameKind { @@ -133,37 +112,29 @@ enum SocketNameKind { Udp } +#[fixed_stack_segment] fn socket_name(sk: SocketNameKind, handle: *c_void) -> Result { - let getsockname = match sk { - TcpPeer => uvll::tcp_getpeername, - Tcp => uvll::tcp_getsockname, - Udp => uvll::udp_getsockname, - }; - - // Allocate a sockaddr_storage - // since we don't know if it's ipv4 or ipv6 - let r_addr = unsafe { uvll::malloc_sockaddr_storage() }; + unsafe { + let getsockname = match sk { + TcpPeer => uvll::uv_tcp_getpeername, + Tcp => uvll::uv_tcp_getsockname, + Udp => uvll::uv_udp_getsockname, + }; - let r = unsafe { - getsockname(handle, r_addr as *uvll::sockaddr_storage) - }; + // Allocate a sockaddr_storage + // since we don't know if it's ipv4 or ipv6 + let size = uvll::rust_sockaddr_size(); + let name = libc::malloc(size as size_t); + assert!(!name.is_null()); + let mut namelen = size; - if r != 0 { - return Err(uv_error_to_io_error(UvError(r))); + let ret = match getsockname(handle, name, &mut namelen) { + 0 => Ok(sockaddr_to_socket_addr(name)), + n => Err(uv_error_to_io_error(UvError(n))) + }; + libc::free(name); + ret } - - let addr = unsafe { - if uvll::is_ip6_addr(r_addr as *uvll::sockaddr) { - uv_socket_addr_to_socket_addr(UvIpv6SocketAddr(r_addr as *uvll::sockaddr_in6)) - } else { - uv_socket_addr_to_socket_addr(UvIpv4SocketAddr(r_addr as *uvll::sockaddr_in)) - } - }; - - unsafe { uvll::free_sockaddr_storage(r_addr); } - - Ok(addr) - } //////////////////////////////////////////////////////////////////////////////// @@ -210,17 +181,11 @@ impl TcpWatcher { return do task::unkillable { let tcp = TcpWatcher::new(loop_); - let ret = do socket_addr_as_uv_socket_addr(address) |addr| { + let ret = do socket_addr_as_sockaddr(address) |addr| { let mut req = Request::new(uvll::UV_CONNECT); - let result = match addr { - UvIpv4SocketAddr(addr) => unsafe { - uvll::tcp_connect(req.handle, tcp.handle, addr, - connect_cb) - }, - UvIpv6SocketAddr(addr) => unsafe { - uvll::tcp_connect6(req.handle, tcp.handle, addr, - connect_cb) - }, + let result = unsafe { + uvll::uv_tcp_connect(req.handle, tcp.handle, addr, + connect_cb) }; match result { 0 => { @@ -340,11 +305,8 @@ impl TcpListener { closing_task: None, outgoing: Tube::new(), }; - let res = socket_addr_as_uv_socket_addr(address, |addr| unsafe { - match addr { - UvIpv4SocketAddr(addr) => uvll::tcp_bind(l.handle, addr), - UvIpv6SocketAddr(addr) => uvll::tcp_bind6(l.handle, addr), - } + let res = socket_addr_as_sockaddr(address, |addr| unsafe { + uvll::uv_tcp_bind(l.handle, addr) }); match res { 0 => Ok(l.install()), @@ -475,13 +437,8 @@ impl UdpWatcher { assert_eq!(unsafe { uvll::uv_udp_init(loop_.handle, udp.handle) }, 0); - let result = socket_addr_as_uv_socket_addr(address, |addr| unsafe { - match addr { - UvIpv4SocketAddr(addr) => - uvll::udp_bind(udp.handle, addr, 0u32), - UvIpv6SocketAddr(addr) => - uvll::udp_bind6(udp.handle, addr, 0u32), - } + let result = socket_addr_as_sockaddr(address, |addr| unsafe { + uvll::uv_udp_bind(udp.handle, addr, 0u32) }); match result { 0 => Ok(udp), @@ -513,7 +470,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { struct Ctx { task: Option, buf: Option, - result: Option<(ssize_t, SocketAddr)>, + result: Option<(ssize_t, Option)>, } let _m = self.fire_homing_missile(); @@ -532,7 +489,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { match cx.result.take_unwrap() { (n, _) if n < 0 => Err(uv_error_to_io_error(UvError(n as c_int))), - (n, addr) => Ok((n as uint, addr)) + (n, addr) => Ok((n as uint, addr.unwrap())) } } n => Err(uv_error_to_io_error(UvError(n))) @@ -540,14 +497,16 @@ impl rtio::RtioUdpSocket for UdpWatcher { return a; extern fn alloc_cb(handle: *uvll::uv_udp_t, - _suggested_size: size_t) -> Buf { - let cx: &mut Ctx = unsafe { - cast::transmute(uvll::get_data_for_uv_handle(handle)) - }; - cx.buf.take().expect("recv alloc_cb called more than once") + _suggested_size: size_t, + buf: *mut Buf) { + unsafe { + let cx: &mut Ctx = + cast::transmute(uvll::get_data_for_uv_handle(handle)); + *buf = cx.buf.take().expect("recv alloc_cb called more than once") + } } - extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: *Buf, addr: *uvll::sockaddr, _flags: c_uint) { assert!(nread != uvll::ECANCELED as ssize_t); let cx: &mut Ctx = unsafe { @@ -558,7 +517,7 @@ impl rtio::RtioUdpSocket for UdpWatcher { // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring // this we just drop back to kqueue and wait for the next callback. if nread == 0 { - cx.buf = Some(buf); + cx.buf = Some(unsafe { *buf }); return } @@ -569,8 +528,11 @@ impl rtio::RtioUdpSocket for UdpWatcher { let cx: &mut Ctx = unsafe { cast::transmute(uvll::get_data_for_uv_handle(handle)) }; - let addr = sockaddr_to_UvSocketAddr(addr); - let addr = uv_socket_addr_to_socket_addr(addr); + let addr = if addr == ptr::null() { + None + } else { + Some(sockaddr_to_socket_addr(addr)) + }; cx.result = Some((nread, addr)); let sched: ~Scheduler = Local::take(); @@ -585,13 +547,8 @@ impl rtio::RtioUdpSocket for UdpWatcher { let mut req = Request::new(uvll::UV_UDP_SEND); let buf = slice_to_uv_buf(buf); - let result = socket_addr_as_uv_socket_addr(dst, |dst| unsafe { - match dst { - UvIpv4SocketAddr(dst) => - uvll::udp_send(req.handle, self.handle, [buf], dst, send_cb), - UvIpv6SocketAddr(dst) => - uvll::udp_send6(req.handle, self.handle, [buf], dst, send_cb), - } + let result = socket_addr_as_sockaddr(dst, |dst| unsafe { + uvll::uv_udp_send(req.handle, self.handle, [buf], dst, send_cb) }); return match result { diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 17a7510aa19b7..d0b0d6429b8be 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -78,7 +78,7 @@ impl Process { let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); match unsafe { - uvll::uv_spawn(loop_.handle, handle, options) + uvll::uv_spawn(loop_.handle, handle, &options) } { 0 => { let process = ~Process { @@ -106,7 +106,7 @@ impl Process { } extern fn on_exit(handle: *uvll::uv_process_t, - exit_status: libc::c_int, + exit_status: i64, term_signal: libc::c_int) { let p: &mut Process = unsafe { UvHandle::from_uv_handle(&handle) }; diff --git a/src/librustuv/stream.rs b/src/librustuv/stream.rs index b9ccacf4df707..08b307700c7cd 100644 --- a/src/librustuv/stream.rs +++ b/src/librustuv/stream.rs @@ -135,17 +135,18 @@ impl StreamWatcher { // This allocation callback expects to be invoked once and only once. It will // unwrap the buffer in the ReadContext stored in the stream and return it. This // will fail if it is called more than once. -extern fn alloc_cb(stream: *uvll::uv_stream_t, _hint: size_t) -> Buf { +extern fn alloc_cb(stream: *uvll::uv_stream_t, _hint: size_t, buf: *mut Buf) { uvdebug!("alloc_cb"); - let rcx: &mut ReadContext = unsafe { - cast::transmute(uvll::get_data_for_uv_handle(stream)) - }; - rcx.buf.take().expect("stream alloc_cb called more than once") + unsafe { + let rcx: &mut ReadContext = + cast::transmute(uvll::get_data_for_uv_handle(stream)); + *buf = rcx.buf.take().expect("stream alloc_cb called more than once"); + } } // When a stream has read some data, we will always forcibly stop reading and // return all the data read (even if it didn't fill the whole buffer). -extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: Buf) { +extern fn read_cb(handle: *uvll::uv_stream_t, nread: ssize_t, _buf: *Buf) { uvdebug!("read_cb {}", nread); assert!(nread != uvll::ECANCELED as ssize_t); let rcx: &mut ReadContext = unsafe { diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 58d182a22c30e..4183ce4309eeb 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -206,15 +206,16 @@ impl uv_stat_t { pub type uv_idle_cb = extern "C" fn(handle: *uv_idle_t, status: c_int); pub type uv_alloc_cb = extern "C" fn(stream: *uv_stream_t, - suggested_size: size_t) -> uv_buf_t; + suggested_size: size_t, + buf: *mut uv_buf_t); pub type uv_read_cb = extern "C" fn(stream: *uv_stream_t, nread: ssize_t, - buf: uv_buf_t); + buf: *uv_buf_t); pub type uv_udp_send_cb = extern "C" fn(req: *uv_udp_send_t, status: c_int); pub type uv_udp_recv_cb = extern "C" fn(handle: *uv_udp_t, nread: ssize_t, - buf: uv_buf_t, + buf: *uv_buf_t, addr: *sockaddr, flags: c_uint); pub type uv_close_cb = extern "C" fn(handle: *uv_handle_t); @@ -234,16 +235,13 @@ pub type uv_getaddrinfo_cb = extern "C" fn(req: *uv_getaddrinfo_t, status: c_int, res: *addrinfo); pub type uv_exit_cb = extern "C" fn(handle: *uv_process_t, - exit_status: c_int, + exit_status: i64, term_signal: c_int); pub type uv_signal_cb = extern "C" fn(handle: *uv_signal_t, signum: c_int); pub type uv_fs_cb = extern "C" fn(req: *uv_fs_t); pub type sockaddr = c_void; -pub type sockaddr_in = c_void; -pub type sockaddr_in6 = c_void; -pub type sockaddr_storage = c_void; #[cfg(unix)] pub type socklen_t = c_int; @@ -420,86 +418,12 @@ pub unsafe fn loop_new() -> *c_void { return rust_uv_loop_new(); } -pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_bind(server, addr, flags); -} - -pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_bind6(server, addr, flags); -} - -pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); -} - -pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); -} - pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t { #[fixed_stack_segment]; #[inline(never)]; return rust_uv_get_udp_handle_from_send_req(send_req); } -pub unsafe fn udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_udp_getsockname(handle, name); -} - -pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in, after_connect_cb: uv_connect_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); -} - -pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in6, after_connect_cb: uv_connect_cb) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); -} - -pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_getpeername(tcp_handle_ptr, name); -} - -pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_tcp_getsockname(handle, name); -} - pub unsafe fn uv_write(req: *uv_write_t, stream: *uv_stream_t, buf_in: &[uv_buf_t], @@ -513,67 +437,6 @@ pub unsafe fn uv_write(req: *uv_write_t, return uv_write(req, stream, buf_ptr, buf_cnt, cb); } -pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool { - #[fixed_stack_segment]; #[inline(never)]; - - match rust_uv_is_ipv4_sockaddr(addr) { 0 => false, _ => true } -} - -pub unsafe fn is_ip6_addr(addr: *sockaddr) -> bool { - #[fixed_stack_segment]; #[inline(never)]; - - match rust_uv_is_ipv6_sockaddr(addr) { 0 => false, _ => true } -} - -pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { - #[fixed_stack_segment]; #[inline(never)]; - do ip.with_c_str |ip_buf| { - rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) - } -} -pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 { - #[fixed_stack_segment]; #[inline(never)]; - do ip.with_c_str |ip_buf| { - rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int) - } -} - -pub unsafe fn malloc_sockaddr_storage() -> *sockaddr_storage { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_malloc_sockaddr_storage() -} - -pub unsafe fn free_sockaddr_storage(ss: *sockaddr_storage) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_sockaddr_storage(ss); -} - -pub unsafe fn free_ip4_addr(addr: *sockaddr_in) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_ip4_addr(addr); -} - -pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { - #[fixed_stack_segment]; #[inline(never)]; - - rust_uv_free_ip6_addr(addr); -} - -pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip4_port(addr); -} - -pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint { - #[fixed_stack_segment]; #[inline(never)]; - - return rust_uv_ip6_port(addr); -} - pub unsafe fn process_pid(p: *uv_process_t) -> c_int { #[fixed_stack_segment]; #[inline(never)]; return rust_uv_process_pid(p); @@ -685,36 +548,19 @@ pub unsafe fn guess_handle(handle: c_int) -> c_int { extern { fn rust_uv_loop_new() -> *c_void; + // dealing with sockaddr things + pub fn rust_sockaddr_size() -> c_int; + pub fn rust_malloc_ip4_addr(s: *c_char, port: c_int) -> *sockaddr; + pub fn rust_malloc_ip6_addr(s: *c_char, port: c_int) -> *sockaddr; + pub fn rust_ip4_port(src: *sockaddr) -> c_uint; + pub fn rust_ip6_port(src: *sockaddr) -> c_uint; + pub fn rust_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; + pub fn rust_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; + fn rust_uv_handle_type_max() -> uintptr_t; fn rust_uv_req_type_max() -> uintptr_t; - fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; - fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; - fn rust_uv_free_ip4_addr(addr: *sockaddr_in); - fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); - fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; - fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, - cb: uv_connect_cb, - addr: *sockaddr_in) -> c_int; - fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - fn rust_uv_tcp_connect6(req: *uv_connect_t, handle: *uv_tcp_t, - cb: uv_connect_cb, - addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; - fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; - fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int; - fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int; fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; - fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_storage) -> c_int; - fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; - fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; - fn rust_uv_malloc_sockaddr_storage() -> *sockaddr_storage; - fn rust_uv_free_sockaddr_storage(ss: *sockaddr_storage); + fn rust_uv_populate_uv_stat(req_in: *uv_fs_t, stat_out: *uv_stat_t); fn rust_uv_get_result_from_fs_req(req: *uv_fs_t) -> c_int; fn rust_uv_get_ptr_from_fs_req(req: *uv_fs_t) -> *libc::c_void; @@ -768,17 +614,27 @@ externfn!(fn uv_async_send(a: *uv_async_t)) // tcp bindings externfn!(fn uv_tcp_init(l: *uv_loop_t, h: *uv_tcp_t) -> c_int) -externfn!(fn uv_ip4_name(src: *sockaddr_in, dst: *c_char, +externfn!(fn uv_tcp_connect(c: *uv_connect_t, h: *uv_tcp_t, + addr: *sockaddr, cb: uv_connect_cb) -> c_int) +externfn!(fn uv_tcp_bind(t: *uv_tcp_t, addr: *sockaddr) -> c_int) +externfn!(fn uv_ip4_name(src: *sockaddr, dst: *c_char, size: size_t) -> c_int) -externfn!(fn uv_ip6_name(src: *sockaddr_in6, dst: *c_char, +externfn!(fn uv_ip6_name(src: *sockaddr, dst: *c_char, size: size_t) -> c_int) externfn!(fn uv_tcp_nodelay(h: *uv_tcp_t, enable: c_int) -> c_int) externfn!(fn uv_tcp_keepalive(h: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int) externfn!(fn uv_tcp_simultaneous_accepts(h: *uv_tcp_t, enable: c_int) -> c_int) +externfn!(fn uv_tcp_getsockname(h: *uv_tcp_t, name: *sockaddr, + len: *mut c_int) -> c_int) +externfn!(fn uv_tcp_getpeername(h: *uv_tcp_t, name: *sockaddr, + len: *mut c_int) -> c_int) +externfn!(fn uv_ip4_addr(ip: *c_char, port: c_int, addr: *sockaddr) -> c_int) +externfn!(fn uv_ip6_addr(ip: *c_char, port: c_int, addr: *sockaddr) -> c_int) // udp bindings externfn!(fn uv_udp_init(l: *uv_loop_t, h: *uv_udp_t) -> c_int) +externfn!(fn uv_udp_bind(h: *uv_udp_t, addr: *sockaddr, flags: c_uint) -> c_int) externfn!(fn uv_udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, on_recv: uv_udp_recv_cb) -> c_int) @@ -790,6 +646,22 @@ externfn!(fn uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int) externfn!(fn uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) externfn!(fn uv_udp_set_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int) externfn!(fn uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int) +externfn!(fn uv_udp_getsockname(h: *uv_udp_t, name: *sockaddr, + len: *mut c_int) -> c_int) + +pub unsafe fn uv_udp_send(req: *uv_udp_send_t, + handle: *uv_udp_t, + buf_in: &[uv_buf_t], + addr: *sockaddr, + cb: uv_udp_send_cb) -> c_int { + externfn!(fn uv_udp_send(req: *uv_write_t, stream: *uv_stream_t, + buf_in: *uv_buf_t, buf_cnt: c_int, addr: *sockaddr, + cb: uv_udp_send_cb) -> c_int) + + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); +} // timer bindings externfn!(fn uv_timer_init(l: *uv_loop_t, t: *uv_timer_t) -> c_int) @@ -853,7 +725,7 @@ externfn!(fn uv_freeaddrinfo(ai: *addrinfo)) // process spawning externfn!(fn uv_spawn(loop_ptr: *uv_loop_t, outptr: *uv_process_t, - options: uv_process_options_t) -> c_int) + options: *uv_process_options_t) -> c_int) externfn!(fn uv_process_kill(p: *uv_process_t, signum: c_int) -> c_int) // pipes diff --git a/src/libuv b/src/libuv index d88cf5652a1af..c6ecf97aafc85 160000 --- a/src/libuv +++ b/src/libuv @@ -1 +1 @@ -Subproject commit d88cf5652a1afb23939da0bae86c70ec521b9921 +Subproject commit c6ecf97aafc858c2ad1089fb78da6c586d61d8b6 diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 6f619431ad711..280b016af10c2 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -36,96 +36,11 @@ rust_uv_loop_set_data(uv_loop_t* loop, void* data) { loop->data = data; } -extern "C" int -rust_uv_tcp_connect(uv_connect_t* connect_ptr, - uv_tcp_t* tcp_ptr, - uv_connect_cb cb, - sockaddr_in* addr_ptr) { - // FIXME ref #2064 - sockaddr_in addr = *addr_ptr; - int result = uv_tcp_connect(connect_ptr, tcp_ptr, addr, cb); - return result; -} - -extern "C" int -rust_uv_tcp_bind(uv_tcp_t* tcp_server, sockaddr_in* addr_ptr) { - // FIXME ref #2064 - sockaddr_in addr = *addr_ptr; - return uv_tcp_bind(tcp_server, addr); -} -extern "C" int -rust_uv_tcp_connect6(uv_connect_t* connect_ptr, - uv_tcp_t* tcp_ptr, - uv_connect_cb cb, - sockaddr_in6* addr_ptr) { - // FIXME ref #2064 - sockaddr_in6 addr = *addr_ptr; - int result = uv_tcp_connect6(connect_ptr, tcp_ptr, addr, cb); - return result; -} - -extern "C" int -rust_uv_tcp_bind6 -(uv_tcp_t* tcp_server, sockaddr_in6* addr_ptr) { - // FIXME ref #2064 - sockaddr_in6 addr = *addr_ptr; - return uv_tcp_bind6(tcp_server, addr); -} - -extern "C" int -rust_uv_tcp_getpeername -(uv_tcp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_in); - return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen); -} - -extern "C" int -rust_uv_tcp_getsockname -(uv_tcp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_storage); - return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen); -} - -extern "C" int -rust_uv_udp_bind(uv_udp_t* server, sockaddr_in* addr_ptr, unsigned flags) { - return uv_udp_bind(server, *addr_ptr, flags); -} - -extern "C" int -rust_uv_udp_bind6(uv_udp_t* server, sockaddr_in6* addr_ptr, unsigned flags) { - return uv_udp_bind6(server, *addr_ptr, flags); -} - -extern "C" int -rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, - int buf_cnt, sockaddr_in* addr_ptr, uv_udp_send_cb cb) { - return uv_udp_send(req, handle, buf_in, buf_cnt, *addr_ptr, cb); -} - -extern "C" int -rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, - int buf_cnt, sockaddr_in6* addr_ptr, uv_udp_send_cb cb) { - return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb); -} - extern "C" uv_udp_t* rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) { return send_req->handle; } -extern "C" int -rust_uv_udp_getsockname -(uv_udp_t* handle, sockaddr_storage* name) { - // sockaddr_storage is big enough to hold either - // sockaddr_in or sockaddr_in6 - int namelen = sizeof(sockaddr_storage); - return uv_udp_getsockname(handle, (sockaddr*)name, &namelen); -} - extern "C" uv_stream_t* rust_uv_get_stream_handle_from_connect_req(uv_connect_t* connect) { return connect->handle; @@ -171,94 +86,52 @@ rust_uv_set_data_for_req(uv_req_t* req, void* data) { req->data = data; } -extern "C" struct sockaddr_in -rust_uv_ip4_addr(const char* ip, int port) { - struct sockaddr_in addr = uv_ip4_addr(ip, port); - return addr; -} -extern "C" struct sockaddr_in6 -rust_uv_ip6_addr(const char* ip, int port) { - return uv_ip6_addr(ip, port); -} - -extern "C" struct sockaddr_in* -rust_uv_ip4_addrp(const char* ip, int port) { - struct sockaddr_in addr = uv_ip4_addr(ip, port); - struct sockaddr_in *addrp = (sockaddr_in*)malloc(sizeof(struct sockaddr_in)); - assert(addrp); - memcpy(addrp, &addr, sizeof(struct sockaddr_in)); - return addrp; -} -extern "C" struct sockaddr_in6* -rust_uv_ip6_addrp(const char* ip, int port) { - struct sockaddr_in6 addr = uv_ip6_addr(ip, port); - struct sockaddr_in6 *addrp = (sockaddr_in6*)malloc(sizeof(struct sockaddr_in6)); - assert(addrp); - memcpy(addrp, &addr, sizeof(struct sockaddr_in6)); - return addrp; -} - -extern "C" struct sockaddr_storage * -rust_uv_malloc_sockaddr_storage() { - struct sockaddr_storage *ss = (sockaddr_storage *)malloc(sizeof(struct sockaddr_storage)); - return ss; +extern "C" int +rust_sockaddr_size() { + return sizeof(struct sockaddr_storage); } -extern "C" void -rust_uv_free_sockaddr_storage(struct sockaddr_storage *ss) { - free(ss); +extern "C" struct sockaddr* +rust_malloc_ip4_addr(char *name, int port) { + struct sockaddr_in *addr = (struct sockaddr_in*) malloc(sizeof(struct sockaddr_in)); + memset(addr, 0, sizeof(struct sockaddr_in)); + assert(addr != NULL); + addr->sin_port = htons(port); + assert(uv_inet_pton(AF_INET, name, &addr->sin_addr) == 0); + addr->sin_family = AF_INET; + return (struct sockaddr*) addr; } -extern "C" void -rust_uv_free_ip4_addr(sockaddr_in *addrp) { - free(addrp); +extern "C" struct sockaddr* +rust_malloc_ip6_addr(char *name, int port) { + struct sockaddr_in6 *addr = (struct sockaddr_in6*) malloc(sizeof(struct sockaddr_in6)); + memset(addr, 0, sizeof(struct sockaddr)); + assert(addr != NULL); + addr->sin6_port = htons(port); + assert(uv_inet_pton(AF_INET6, name, &addr->sin6_addr) == 0); + addr->sin6_family = AF_INET6; + return (struct sockaddr*) addr; } -extern "C" void -rust_uv_free_ip6_addr(sockaddr_in6 *addrp) { - free(addrp); -} extern "C" unsigned int -rust_uv_ip4_port(struct sockaddr_in* src) { +rust_ip4_port(struct sockaddr_in* src) { return ntohs(src->sin_port); } extern "C" unsigned int -rust_uv_ip6_port(struct sockaddr_in6* src) { +rust_ip6_port(struct sockaddr_in6* src) { return ntohs(src->sin6_port); } extern "C" int -rust_uv_is_ipv4_sockaddr(sockaddr* addr) { +rust_is_ipv4_sockaddr(sockaddr* addr) { return addr->sa_family == AF_INET; } extern "C" int -rust_uv_is_ipv6_sockaddr(sockaddr* addr) { +rust_is_ipv6_sockaddr(sockaddr* addr) { return addr->sa_family == AF_INET6; } -extern "C" bool -rust_uv_is_ipv4_addrinfo(addrinfo* input) { - return input->ai_family == AF_INET; -} - -extern "C" bool -rust_uv_is_ipv6_addrinfo(addrinfo* input) { - return input->ai_family == AF_INET6; -} -extern "C" addrinfo* -rust_uv_get_next_addrinfo(addrinfo* input) { - return input->ai_next; -} -extern "C" sockaddr_in* -rust_uv_addrinfo_as_sockaddr_in(addrinfo* input) { - return (sockaddr_in*)input->ai_addr; -} -extern "C" sockaddr_in6* -rust_uv_addrinfo_as_sockaddr_in6(addrinfo* input) { - return (sockaddr_in6*)input->ai_addr; -} - extern "C" uintptr_t rust_uv_handle_type_max() { return UV_HANDLE_TYPE_MAX; From 86a321b65dcc5253f61202b2fdaac41f275344ce Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 7 Nov 2013 20:13:25 -0800 Subject: [PATCH 25/27] Another round of test fixes from previous commits --- mk/rt.mk | 2 +- src/librustuv/file.rs | 41 +++++++------- src/librustuv/idle.rs | 9 +++ src/librustuv/lib.rs | 22 +++++++- src/librustuv/net.rs | 50 ++++++++++++++++ src/librustuv/pipe.rs | 88 +++++++++++++++++++++++++++++ src/librustuv/process.rs | 23 +++----- src/librustuv/timer.rs | 78 ++++++++++++++++++++++++- src/librustuv/uvll.rs | 4 +- src/libstd/rt/io/mod.rs | 6 +- src/libstd/rt/io/native/file.rs | 16 ++++-- src/libstd/rt/io/native/process.rs | 6 +- src/libstd/rt/io/native/stdio.rs | 8 +-- src/libstd/rt/io/timer.rs | 6 +- src/libstd/rt/macros.rs | 5 +- src/libstd/run.rs | 4 +- src/rt/rust_uv.cpp | 6 +- src/test/run-pass/closure-reform.rs | 3 +- src/test/run-pass/rtio-processes.rs | 10 ++-- 19 files changed, 312 insertions(+), 75 deletions(-) diff --git a/mk/rt.mk b/mk/rt.mk index 55187fad49db0..39679cbed6961 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -218,7 +218,7 @@ $$(LIBUV_MAKEFILE_$(1)): $$(LIBUV_DEPS) ifdef CFG_WINDOWSY_$(1) $$(LIBUV_LIB_$(1)): $$(LIBUV_DEPS) $$(Q)$$(MAKE) -C $$(S)src/libuv -f Makefile.mingw \ - CFLAGS="$$(CFG_GCCISH_CFLAGS) $$(LIBUV_FLAGS_$$(HOST_$(1))) $$(SNAP_DEFINES)" \ + CC="$$(CC) $$(CFG_GCCISH_CFLAGS) $$(LIBUV_FLAGS_$$(HOST_$(1))) $$(SNAP_DEFINES)" \ AR="$$(AR_$(1))" \ V=$$(VERBOSE) $$(Q)cp $$(S)src/libuv/libuv.a $$@ diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index bdb1429f5b625..a5848194d05bb 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -12,7 +12,7 @@ use std::c_str::CString; use std::c_str; use std::cast::transmute; use std::cast; -use std::libc::{c_int, c_char, c_void, c_uint}; +use std::libc::{c_int, c_char, c_void, size_t}; use std::libc; use std::rt::BlockedTask; use std::rt::io::{FileStat, IoError}; @@ -20,6 +20,7 @@ use std::rt::io; use std::rt::local::Local; use std::rt::rtio; use std::rt::sched::{Scheduler, SchedHandle}; +use std::task; use std::vec; use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after}; @@ -79,7 +80,7 @@ impl FsRequest { execute_nop(|req, cb| unsafe { uvll::uv_fs_write(loop_.handle, req, fd, vec::raw::to_ptr(buf) as *c_void, - buf.len() as c_uint, offset, cb) + buf.len() as size_t, offset, cb) }) } @@ -89,7 +90,7 @@ impl FsRequest { do execute(|req, cb| unsafe { uvll::uv_fs_read(loop_.handle, req, fd, vec::raw::to_ptr(buf) as *c_void, - buf.len() as c_uint, offset, cb) + buf.len() as size_t, offset, cb) }).map |req| { req.get_result() as int } @@ -297,24 +298,26 @@ impl Drop for FsRequest { fn execute(f: &fn(*uvll::uv_fs_t, uvll::uv_fs_cb) -> c_int) -> Result { - let mut req = FsRequest { - fired: false, - req: unsafe { uvll::malloc_req(uvll::UV_FS) } - }; - return match f(req.req, fs_cb) { - 0 => { - req.fired = true; - let mut slot = None; - do wait_until_woken_after(&mut slot) { - unsafe { uvll::set_data_for_req(req.req, &slot) } - } - match req.get_result() { - n if n < 0 => Err(UvError(n)), - _ => Ok(req), + return do task::unkillable { + let mut req = FsRequest { + fired: false, + req: unsafe { uvll::malloc_req(uvll::UV_FS) } + }; + match f(req.req, fs_cb) { + 0 => { + req.fired = true; + let mut slot = None; + do wait_until_woken_after(&mut slot) { + unsafe { uvll::set_data_for_req(req.req, &slot) } + } + match req.get_result() { + n if n < 0 => Err(UvError(n)), + _ => Ok(req), + } } - } - n => Err(UvError(n)) + n => Err(UvError(n)) + } }; extern fn fs_cb(req: *uvll::uv_fs_t) { diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 83fc53dce1cd7..80481498881c4 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -126,6 +126,15 @@ mod test { tube.recv(); } + #[test] #[should_fail] + fn smoke_fail() { + let tube = Tube::new(); + let cb = ~MyCallback(tube.clone(), 1); + let mut idle = IdleWatcher::new(local_loop(), cb as ~Callback); + idle.resume(); + fail!(); + } + #[test] fn fun_combinations_of_methods() { let mut tube = Tube::new(); diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 7c84ccb8f2cd7..edb1953b9b1c3 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -154,6 +154,26 @@ pub trait UvHandle { } } +pub struct ForbidSwitch { + msg: &'static str, + sched: uint, +} + +impl ForbidSwitch { + fn new(s: &'static str) -> ForbidSwitch { + ForbidSwitch { + msg: s, sched: Local::borrow(|s: &mut Scheduler| s.sched_id()) + } + } +} + +impl Drop for ForbidSwitch { + fn drop(&mut self) { + assert!(self.sched == Local::borrow(|s: &mut Scheduler| s.sched_id()), + "didnt want a scheduler switch: {}", self.msg); + } +} + pub struct ForbidUnwind { msg: &'static str, failing_before: bool, @@ -170,7 +190,7 @@ impl ForbidUnwind { impl Drop for ForbidUnwind { fn drop(&mut self) { assert!(self.failing_before == task::failing(), - "failing sadface {}", self.msg); + "didnt want an unwind during: {}", self.msg); } } diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 10b8f50e387ce..0fc87e4e4fa99 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -1168,6 +1168,56 @@ mod test { } } + #[should_fail] #[test] + fn tcp_listener_fail_cleanup() { + let addr = next_test_ip4(); + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let _w = w.listen().unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn tcp_stream_fail_cleanup() { + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + let addr = next_test_ip4(); + + do task::spawn_unlinked { // please no linked failure + let w = TcpListener::bind(local_loop(), addr).unwrap(); + let mut w = w.listen().unwrap(); + chan.take().send(()); + w.accept(); + } + port.recv(); + let _w = TcpWatcher::connect(local_loop(), addr).unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn udp_listener_fail_cleanup() { + let addr = next_test_ip4(); + let _w = UdpWatcher::bind(local_loop(), addr).unwrap(); + fail!(); + } + + #[should_fail] #[test] + fn udp_fail_other_task() { + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + // force the handle to be created on a different scheduler, failure in + // the original task will force a homing operation back to this + // scheduler. + do task::spawn_sched(task::SingleThreaded) { + let w = UdpWatcher::bind(local_loop(), addr).unwrap(); + chan.take().send(w); + } + + let _w = port.recv(); + fail!(); + } + #[should_fail] #[test] #[ignore(reason = "linked failure")] diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 89a86a2ff7dce..1b0f352dc4df1 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -238,3 +238,91 @@ impl RtioUnixAcceptor for PipeAcceptor { impl HomingIO for PipeAcceptor { fn home<'r>(&'r mut self) -> &'r mut SchedHandle { self.listener.home() } } + +#[cfg(test)] +mod tests { + use std::cell::Cell; + use std::comm::oneshot; + use std::rt::rtio::{RtioUnixListener, RtioUnixAcceptor, RtioPipe}; + use std::rt::test::next_test_unix; + use std::task; + + use super::*; + use super::super::local_loop; + + #[test] + fn connect_err() { + match PipeWatcher::connect(local_loop(), &"path/to/nowhere".to_c_str()) { + Ok(*) => fail!(), + Err(*) => {} + } + } + + #[test] + fn bind_err() { + match PipeListener::bind(local_loop(), &"path/to/nowhere".to_c_str()) { + Ok(*) => fail!(), + Err(e) => assert_eq!(e.name(), ~"EACCES"), + } + } + + #[test] + fn bind() { + let p = next_test_unix().to_c_str(); + match PipeListener::bind(local_loop(), &p) { + Ok(*) => {} + Err(*) => fail!(), + } + } + + #[test] #[should_fail] + fn bind_fail() { + let p = next_test_unix().to_c_str(); + let _w = PipeListener::bind(local_loop(), &p).unwrap(); + fail!(); + } + + #[test] + fn connect() { + let path = next_test_unix(); + let path2 = path.clone(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + do spawn { + let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap(); + let mut p = p.listen().unwrap(); + chan.take().send(()); + let mut client = p.accept().unwrap(); + let mut buf = [0]; + assert!(client.read(buf).unwrap() == 1); + assert_eq!(buf[0], 1); + assert!(client.write([2]).is_ok()); + } + port.recv(); + let mut c = PipeWatcher::connect(local_loop(), &path.to_c_str()).unwrap(); + assert!(c.write([1]).is_ok()); + let mut buf = [0]; + assert!(c.read(buf).unwrap() == 1); + assert_eq!(buf[0], 2); + } + + #[test] #[should_fail] + fn connect_fail() { + let path = next_test_unix(); + let path2 = path.clone(); + let (port, chan) = oneshot(); + let chan = Cell::new(chan); + + do task::spawn_unlinked { // plz no linked failure + let p = PipeListener::bind(local_loop(), &path2.to_c_str()).unwrap(); + let mut p = p.listen().unwrap(); + chan.take().send(()); + p.accept(); + } + port.recv(); + let _c = PipeWatcher::connect(local_loop(), &path.to_c_str()).unwrap(); + fail!() + + } +} diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index d0b0d6429b8be..840ae814f35c8 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -77,23 +77,18 @@ impl Process { }; let handle = UvHandle::alloc(None::, uvll::UV_PROCESS); + let process = ~Process { + handle: handle, + home: get_handle_to_current_scheduler!(), + to_wake: None, + exit_status: None, + term_signal: None, + }; match unsafe { uvll::uv_spawn(loop_.handle, handle, &options) } { - 0 => { - let process = ~Process { - handle: handle, - home: get_handle_to_current_scheduler!(), - to_wake: None, - exit_status: None, - term_signal: None, - }; - Ok(process.install()) - } - err => { - unsafe { uvll::free_handle(handle) } - Err(UvError(err)) - } + 0 => Ok(process.install()), + err => Err(UvError(err)), } } }; diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 96cf024639f81..664875dd19903 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -16,7 +16,7 @@ use std::rt::rtio::RtioTimer; use std::rt::sched::{Scheduler, SchedHandle}; use uvll; -use super::{Loop, UvHandle, ForbidUnwind}; +use super::{Loop, UvHandle, ForbidUnwind, ForbidSwitch}; use uvio::HomingIO; pub struct TimerWatcher { @@ -100,7 +100,9 @@ impl RtioTimer for TimerWatcher { } } -extern fn timer_cb(handle: *uvll::uv_timer_t, _status: c_int) { +extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { + let _f = ForbidSwitch::new("timer callback can't switch"); + assert_eq!(status, 0); let timer: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) }; match timer.action.take_unwrap() { @@ -168,4 +170,76 @@ mod test { timer.sleep(1); timer.sleep(1); } + + #[test] #[should_fail] + fn oneshot_fail() { + let mut timer = TimerWatcher::new(local_loop()); + let _port = timer.oneshot(1); + fail!(); + } + + #[test] #[should_fail] + fn period_fail() { + let mut timer = TimerWatcher::new(local_loop()); + let _port = timer.period(1); + fail!(); + } + + #[test] #[should_fail] + fn normal_fail() { + let _timer = TimerWatcher::new(local_loop()); + fail!(); + } + + #[test] + fn closing_channel_during_drop_doesnt_kill_everything() { + // see issue #10375 + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); + } + + // when we drop the TimerWatcher we're going to destroy the channel, + // which must wake up the task on the other end + } + + #[test] + fn sender_goes_away_oneshot() { + let port = { + let mut timer = TimerWatcher::new(local_loop()); + timer.oneshot(1000) + }; + assert_eq!(port.try_recv(), None); + } + + #[test] + fn sender_goes_away_period() { + let port = { + let mut timer = TimerWatcher::new(local_loop()); + timer.period(1000) + }; + assert_eq!(port.try_recv(), None); + } + + #[test] + fn receiver_goes_away_oneshot() { + let mut timer1 = TimerWatcher::new(local_loop()); + timer1.oneshot(1); + let mut timer2 = TimerWatcher::new(local_loop()); + // while sleeping, the prevous timer should fire and not have its + // callback do something terrible. + timer2.sleep(2); + } + + #[test] + fn receiver_goes_away_period() { + let mut timer1 = TimerWatcher::new(local_loop()); + timer1.period(1); + let mut timer2 = TimerWatcher::new(local_loop()); + // while sleeping, the prevous timer should fire and not have its + // callback do something terrible. + timer2.sleep(2); + } } diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 4183ce4309eeb..5f68ac5e71d0b 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -676,9 +676,9 @@ externfn!(fn uv_fs_open(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, externfn!(fn uv_fs_unlink(loop_ptr: *uv_loop_t, req: *uv_fs_t, path: *c_char, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_write(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: c_uint, offset: i64, cb: uv_fs_cb) -> c_int) + len: size_t, offset: i64, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_read(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, buf: *c_void, - len: c_uint, offset: i64, cb: uv_fs_cb) -> c_int) + len: size_t, offset: i64, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_close(l: *uv_loop_t, req: *uv_fs_t, fd: c_int, cb: uv_fs_cb) -> c_int) externfn!(fn uv_fs_stat(l: *uv_loop_t, req: *uv_fs_t, path: *c_char, diff --git a/src/libstd/rt/io/mod.rs b/src/libstd/rt/io/mod.rs index e8ab4670233e3..ce9504a5b43d9 100644 --- a/src/libstd/rt/io/mod.rs +++ b/src/libstd/rt/io/mod.rs @@ -423,7 +423,11 @@ pub fn ignore_io_error(cb: &fn() -> T) -> T { /// closure if no error occurred. pub fn result(cb: &fn() -> T) -> Result { let mut err = None; - let ret = io_error::cond.trap(|e| err = Some(e)).inside(cb); + let ret = io_error::cond.trap(|e| { + if err.is_none() { + err = Some(e); + } + }).inside(cb); match err { Some(e) => Err(e), None => Ok(ret), diff --git a/src/libstd/rt/io/native/file.rs b/src/libstd/rt/io/native/file.rs index 35057f475cf5a..6d4f29182dda6 100644 --- a/src/libstd/rt/io/native/file.rs +++ b/src/libstd/rt/io/native/file.rs @@ -80,18 +80,20 @@ pub type fd_t = libc::c_int; pub struct FileDesc { priv fd: fd_t, + priv close_on_drop: bool, } impl FileDesc { /// Create a `FileDesc` from an open C file descriptor. /// /// The `FileDesc` will take ownership of the specified file descriptor and - /// close it upon destruction. + /// close it upon destruction if the `close_on_drop` flag is true, otherwise + /// it will not close the file descriptor when this `FileDesc` is dropped. /// /// Note that all I/O operations done on this object will be *blocking*, but /// they do not require the runtime to be active. - pub fn new(fd: fd_t) -> FileDesc { - FileDesc { fd: fd } + pub fn new(fd: fd_t, close_on_drop: bool) -> FileDesc { + FileDesc { fd: fd, close_on_drop: close_on_drop } } } @@ -137,7 +139,9 @@ impl Writer for FileDesc { impl Drop for FileDesc { #[fixed_stack_segment] #[inline(never)] fn drop(&mut self) { - unsafe { libc::close(self.fd); } + if self.close_on_drop { + unsafe { libc::close(self.fd); } + } } } @@ -245,8 +249,8 @@ mod tests { // opening or closing files. unsafe { let os::Pipe { input, out } = os::pipe(); - let mut reader = FileDesc::new(input); - let mut writer = FileDesc::new(out); + let mut reader = FileDesc::new(input, true); + let mut writer = FileDesc::new(out, true); writer.write(bytes!("test")); let mut buf = [0u8, ..4]; diff --git a/src/libstd/rt/io/native/process.rs b/src/libstd/rt/io/native/process.rs index 0fa454b94d066..f5c39de1bf44e 100644 --- a/src/libstd/rt/io/native/process.rs +++ b/src/libstd/rt/io/native/process.rs @@ -105,9 +105,9 @@ impl Process { Process { pid: res.pid, handle: res.handle, - input: in_pipe.map(|pipe| file::FileDesc::new(pipe.out)), - output: out_pipe.map(|pipe| file::FileDesc::new(pipe.input)), - error: err_pipe.map(|pipe| file::FileDesc::new(pipe.input)), + input: in_pipe.map(|pipe| file::FileDesc::new(pipe.out, true)), + output: out_pipe.map(|pipe| file::FileDesc::new(pipe.input, true)), + error: err_pipe.map(|pipe| file::FileDesc::new(pipe.input, true)), exit_code: None, } } diff --git a/src/libstd/rt/io/native/stdio.rs b/src/libstd/rt/io/native/stdio.rs index 5661725d77baa..ddfbb9a8f8c28 100644 --- a/src/libstd/rt/io/native/stdio.rs +++ b/src/libstd/rt/io/native/stdio.rs @@ -36,10 +36,8 @@ pub struct StdIn { impl StdIn { /// Duplicates the stdin file descriptor, returning an io::Reader - #[fixed_stack_segment] #[inline(never)] pub fn new() -> StdIn { - let fd = unsafe { libc::dup(libc::STDIN_FILENO) }; - StdIn { fd: file::FileDesc::new(fd) } + StdIn { fd: file::FileDesc::new(libc::STDIN_FILENO, false) } } } @@ -54,10 +52,8 @@ pub struct StdOut { impl StdOut { /// Duplicates the specified file descriptor, returning an io::Writer - #[fixed_stack_segment] #[inline(never)] pub fn new(fd: file::fd_t) -> StdOut { - let fd = unsafe { libc::dup(fd) }; - StdOut { fd: file::FileDesc::new(fd) } + StdOut { fd: file::FileDesc::new(fd, false) } } } diff --git a/src/libstd/rt/io/timer.rs b/src/libstd/rt/io/timer.rs index 36092dfbe34e6..fed6b9daa64cd 100644 --- a/src/libstd/rt/io/timer.rs +++ b/src/libstd/rt/io/timer.rs @@ -160,11 +160,7 @@ mod test { let port = timer.oneshot(100000000000); timer.sleep(1); // this should invalidate the port - let port = Cell::new(port); - let ret = do task::try { - port.take().recv(); - }; - assert!(ret.is_err()); + assert_eq!(port.try_recv(), None); } } diff --git a/src/libstd/rt/macros.rs b/src/libstd/rt/macros.rs index 2c89bfd8c764f..3ef57710344dc 100644 --- a/src/libstd/rt/macros.rs +++ b/src/libstd/rt/macros.rs @@ -42,8 +42,7 @@ macro_rules! rtassert ( macro_rules! rtabort ( - ($msg:expr $($arg:tt)*) => ( { - ::rt::util::abort(format!(concat!(file!(), ":", line!(), " ", $msg) - $($arg)*)); + ($($arg:tt)*) => ( { + ::rt::util::abort(format!($($arg)*)); } ) ) diff --git a/src/libstd/run.rs b/src/libstd/run.rs index 74f4ed3d55e4b..fe23944397d87 100644 --- a/src/libstd/run.rs +++ b/src/libstd/run.rs @@ -436,13 +436,13 @@ mod tests { } fn writeclose(fd: c_int, s: &str) { - let mut writer = file::FileDesc::new(fd); + let mut writer = file::FileDesc::new(fd, true); writer.write(s.as_bytes()); } fn readclose(fd: c_int) -> ~str { let mut res = ~[]; - let mut reader = file::FileDesc::new(fd); + let mut reader = file::FileDesc::new(fd, true); let mut buf = [0, ..1024]; loop { match reader.read(buf) { diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 280b016af10c2..f3be486a25ab2 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -93,8 +93,7 @@ rust_sockaddr_size() { extern "C" struct sockaddr* rust_malloc_ip4_addr(char *name, int port) { - struct sockaddr_in *addr = (struct sockaddr_in*) malloc(sizeof(struct sockaddr_in)); - memset(addr, 0, sizeof(struct sockaddr_in)); + struct sockaddr_in *addr = (struct sockaddr_in*) calloc(1, rust_sockaddr_size()); assert(addr != NULL); addr->sin_port = htons(port); assert(uv_inet_pton(AF_INET, name, &addr->sin_addr) == 0); @@ -104,8 +103,7 @@ rust_malloc_ip4_addr(char *name, int port) { extern "C" struct sockaddr* rust_malloc_ip6_addr(char *name, int port) { - struct sockaddr_in6 *addr = (struct sockaddr_in6*) malloc(sizeof(struct sockaddr_in6)); - memset(addr, 0, sizeof(struct sockaddr)); + struct sockaddr_in6 *addr = (struct sockaddr_in6*) calloc(1, rust_sockaddr_size()); assert(addr != NULL); addr->sin6_port = htons(port); assert(uv_inet_pton(AF_INET6, name, &addr->sin6_addr) == 0); diff --git a/src/test/run-pass/closure-reform.rs b/src/test/run-pass/closure-reform.rs index 18ca64d0f2762..629a807266182 100644 --- a/src/test/run-pass/closure-reform.rs +++ b/src/test/run-pass/closure-reform.rs @@ -67,7 +67,8 @@ pub fn main() { call_that(|x, y| *x + *y - z); call_cramped(|| 1, || unsafe { - cast::transmute(&100) + static a: uint = 100; + cast::transmute(&a) }); // External functions diff --git a/src/test/run-pass/rtio-processes.rs b/src/test/run-pass/rtio-processes.rs index 14595f83ce506..f45889eeb03b6 100644 --- a/src/test/run-pass/rtio-processes.rs +++ b/src/test/run-pass/rtio-processes.rs @@ -23,8 +23,8 @@ // // See #9341 +use std::rt::io; use std::rt::io::process::{Process, ProcessConfig, CreatePipe, Ignored}; -use std::rt::io::{Reader, Writer}; use std::str; #[test] @@ -55,10 +55,10 @@ fn smoke_failure() { cwd: None, io: io, }; - let p = Process::new(args); - assert!(p.is_some()); - let mut p = p.unwrap(); - assert!(p.wait() != 0); + match io::result(|| Process::new(args)) { + Ok(*) => fail!(), + Err(*) => {} + } } #[test] From c5fdd69d3e197cef64a4f29faff5d42a95010647 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 8 Nov 2013 21:59:50 -0800 Subject: [PATCH 26/27] Carefully destroy channels at the right time. When a channel is destroyed, it may attempt scheduler operations which could move a task off of it's I/O scheduler. This is obviously a bad interaction, and some finesse is required to make it work (making destructors run at the right time). Closes #10375 --- src/librustuv/signal.rs | 26 +++++++++++++++++++++ src/librustuv/timer.rs | 50 +++++++++++++++++++++++++++++++++-------- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index b7a37473fb944..5486cdfc418c3 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -77,3 +77,29 @@ impl Drop for SignalWatcher { self.close_async_(); } } + +#[cfg(test)] +mod test { + use super::*; + use std::cell::Cell; + use super::super::local_loop; + use std::rt::io::signal; + use std::comm::{SharedChan, stream}; + + #[test] + fn closing_channel_during_drop_doesnt_kill_everything() { + // see issue #10375, relates to timers as well. + let (port, chan) = stream(); + let chan = SharedChan::new(chan); + let _signal = SignalWatcher::new(local_loop(), signal::Interrupt, + chan); + + let port = Cell::new(port); + do spawn { + port.take().try_recv(); + } + + // when we drop the SignalWatcher we're going to destroy the channel, + // which must wake up the task on the other end + } +} diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 664875dd19903..7cc41b2a8827f 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -14,6 +14,7 @@ use std::rt::BlockedTask; use std::rt::local::Local; use std::rt::rtio::RtioTimer; use std::rt::sched::{Scheduler, SchedHandle}; +use std::util; use uvll; use super::{Loop, UvHandle, ForbidUnwind, ForbidSwitch}; @@ -82,9 +83,13 @@ impl RtioTimer for TimerWatcher { fn oneshot(&mut self, msecs: u64) -> PortOne<()> { let (port, chan) = oneshot(); - let _m = self.fire_homing_missile(); - self.action = Some(SendOnce(chan)); - self.start(msecs, 0); + // similarly to the destructor, we must drop the previous action outside + // of the homing missile + let _prev_action = { + let _m = self.fire_homing_missile(); + self.start(msecs, 0); + util::replace(&mut self.action, Some(SendOnce(chan))) + }; return port; } @@ -93,8 +98,14 @@ impl RtioTimer for TimerWatcher { let (port, chan) = stream(); let _m = self.fire_homing_missile(); - self.action = Some(SendMany(chan)); - self.start(msecs, msecs); + + // similarly to the destructor, we must drop the previous action outside + // of the homing missile + let _prev_action = { + let _m = self.fire_homing_missile(); + self.start(msecs, msecs); + util::replace(&mut self.action, Some(SendMany(chan))) + }; return port; } @@ -120,16 +131,24 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { impl Drop for TimerWatcher { fn drop(&mut self) { - let _m = self.fire_homing_missile(); - self.action = None; - self.stop(); - self.close_async_(); + // note that this drop is a little subtle. Dropping a channel which is + // held internally may invoke some scheduling operations. We can't take + // the channel unless we're on the home scheduler, but once we're on the + // home scheduler we should never move. Hence, we take the timer's + // action item and then move it outside of the homing block. + let _action = { + let _m = self.fire_homing_missile(); + self.stop(); + self.close_async_(); + self.action.take() + }; } } #[cfg(test)] mod test { use super::*; + use std::cell::Cell; use std::rt::rtio::RtioTimer; use super::super::local_loop; @@ -205,6 +224,19 @@ mod test { // which must wake up the task on the other end } + #[test] + fn reset_doesnt_switch_tasks() { + // similar test to the one above. + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); + } + + timer.oneshot(1); + } + #[test] fn sender_goes_away_oneshot() { let port = { From e38a89d0b0fcc3b2f5cad600d7b3a16faeb94248 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sat, 9 Nov 2013 11:02:16 -0800 Subject: [PATCH 27/27] Fix usage of libuv for windows --- .gitmodules | 2 +- src/librustuv/net.rs | 1 + src/librustuv/pipe.rs | 6 ++++++ src/librustuv/process.rs | 2 +- src/librustuv/signal.rs | 29 ++++++++++++----------------- src/librustuv/timer.rs | 33 ++++++++++++++++++++++++++++++--- src/librustuv/tty.rs | 2 +- src/librustuv/uvll.rs | 34 +++++++++++++++++++++++----------- src/libstd/rt/io/stdio.rs | 12 +++++++++++- src/libstd/rt/io/timer.rs | 8 ++------ src/libuv | 2 +- 11 files changed, 89 insertions(+), 42 deletions(-) diff --git a/.gitmodules b/.gitmodules index a861cf7997866..7e997334cecaf 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,7 +4,7 @@ branch = master [submodule "src/libuv"] path = src/libuv - url = https://github.com/joyent/libuv.git + url = https://github.com/alexcrichton/libuv.git branch = master [submodule "src/gyp"] path = src/gyp diff --git a/src/librustuv/net.rs b/src/librustuv/net.rs index 0fc87e4e4fa99..32c9b6c3d1729 100644 --- a/src/librustuv/net.rs +++ b/src/librustuv/net.rs @@ -883,6 +883,7 @@ mod test { } #[test] + #[ignore(cfg(windows))] // FIXME(#10102) server never sees second packet fn test_udp_twice() { let server_addr = next_test_ip4(); let client_addr = next_test_ip4(); diff --git a/src/librustuv/pipe.rs b/src/librustuv/pipe.rs index 1b0f352dc4df1..c123f916ef23f 100644 --- a/src/librustuv/pipe.rs +++ b/src/librustuv/pipe.rs @@ -251,6 +251,7 @@ mod tests { use super::super::local_loop; #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn connect_err() { match PipeWatcher::connect(local_loop(), &"path/to/nowhere".to_c_str()) { Ok(*) => fail!(), @@ -259,6 +260,7 @@ mod tests { } #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn bind_err() { match PipeListener::bind(local_loop(), &"path/to/nowhere".to_c_str()) { Ok(*) => fail!(), @@ -267,6 +269,7 @@ mod tests { } #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn bind() { let p = next_test_unix().to_c_str(); match PipeListener::bind(local_loop(), &p) { @@ -276,6 +279,7 @@ mod tests { } #[test] #[should_fail] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn bind_fail() { let p = next_test_unix().to_c_str(); let _w = PipeListener::bind(local_loop(), &p).unwrap(); @@ -283,6 +287,7 @@ mod tests { } #[test] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn connect() { let path = next_test_unix(); let path2 = path.clone(); @@ -308,6 +313,7 @@ mod tests { } #[test] #[should_fail] + #[ignore(cfg(windows))] // FIXME(#10386): how windows pipes work fn connect_fail() { let path = next_test_unix(); let path2 = path.clone(); diff --git a/src/librustuv/process.rs b/src/librustuv/process.rs index 840ae814f35c8..7e75515972cb8 100644 --- a/src/librustuv/process.rs +++ b/src/librustuv/process.rs @@ -232,6 +232,6 @@ impl Drop for Process { fn drop(&mut self) { let _m = self.fire_homing_missile(); assert!(self.to_wake.is_none()); - self.close_async_(); + self.close(); } } diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index 5486cdfc418c3..da2e1d8837c45 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -30,26 +30,21 @@ pub struct SignalWatcher { impl SignalWatcher { pub fn new(loop_: &mut Loop, signum: Signum, channel: SharedChan) -> Result<~SignalWatcher, UvError> { - let handle = UvHandle::alloc(None::, uvll::UV_SIGNAL); + let s = ~SignalWatcher { + handle: UvHandle::alloc(None::, uvll::UV_SIGNAL), + home: get_handle_to_current_scheduler!(), + channel: channel, + signal: signum, + }; assert_eq!(unsafe { - uvll::uv_signal_init(loop_.handle, handle) - + uvll::uv_signal_init(loop_.handle, s.handle) }, 0); - match unsafe { uvll::uv_signal_start(handle, signal_cb, signum as c_int) } { - 0 => { - let s = ~SignalWatcher { - handle: handle, - home: get_handle_to_current_scheduler!(), - channel: channel, - signal: signum, - }; - Ok(s.install()) - } - n => { - unsafe { uvll::free_handle(handle) } - Err(UvError(n)) - } + match unsafe { + uvll::uv_signal_start(s.handle, signal_cb, signum as c_int) + } { + 0 => Ok(s.install()), + n => Err(UvError(n)), } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 7cc41b2a8827f..0176399030517 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -67,12 +67,27 @@ impl UvHandle for TimerWatcher { impl RtioTimer for TimerWatcher { fn sleep(&mut self, msecs: u64) { - let (_m, sched) = self.fire_homing_missile_sched(); + // As with all of the below functions, we must be extra careful when + // destroying the previous action. If the previous action was a channel, + // destroying it could invoke a context switch. For these situtations, + // we must temporarily un-home ourselves, then destroy the action, and + // then re-home again. + let missile = self.fire_homing_missile(); + self.stop(); + let _missile = match util::replace(&mut self.action, None) { + None => missile, // no need to do a homing dance + Some(action) => { + util::ignore(missile); // un-home ourself + util::ignore(action); // destroy the previous action + self.fire_homing_missile() // re-home ourself + } + }; // If the descheduling operation unwinds after the timer has been // started, then we need to call stop on the timer. let _f = ForbidUnwind::new("timer"); + let sched: ~Scheduler = Local::take(); do sched.deschedule_running_task_and_then |_sched, task| { self.action = Some(WakeTask(task)); self.start(msecs, 0); @@ -87,6 +102,7 @@ impl RtioTimer for TimerWatcher { // of the homing missile let _prev_action = { let _m = self.fire_homing_missile(); + self.stop(); self.start(msecs, 0); util::replace(&mut self.action, Some(SendOnce(chan))) }; @@ -97,12 +113,11 @@ impl RtioTimer for TimerWatcher { fn period(&mut self, msecs: u64) -> Port<()> { let (port, chan) = stream(); - let _m = self.fire_homing_missile(); - // similarly to the destructor, we must drop the previous action outside // of the homing missile let _prev_action = { let _m = self.fire_homing_missile(); + self.stop(); self.start(msecs, msecs); util::replace(&mut self.action, Some(SendMany(chan))) }; @@ -236,6 +251,18 @@ mod test { timer.oneshot(1); } + #[test] + fn reset_doesnt_switch_tasks2() { + // similar test to the one above. + let mut timer = TimerWatcher::new(local_loop()); + let timer_port = Cell::new(timer.period(1000)); + + do spawn { + timer_port.take().try_recv(); + } + + timer.sleep(1); + } #[test] fn sender_goes_away_oneshot() { diff --git a/src/librustuv/tty.rs b/src/librustuv/tty.rs index 4853973f1a307..d3f001f39312f 100644 --- a/src/librustuv/tty.rs +++ b/src/librustuv/tty.rs @@ -113,6 +113,6 @@ impl HomingIO for TtyWatcher { impl Drop for TtyWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); - self.close(); + self.close_async_(); } } diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index 5f68ac5e71d0b..c76d03bfe6c33 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -47,14 +47,14 @@ pub static UNKNOWN: c_int = -4094; pub mod errors { use std::libc::c_int; - pub static EACCES: c_int = -4093; - pub static ECONNREFUSED: c_int = -4079; - pub static ECONNRESET: c_int = -4078; - pub static ENOTCONN: c_int = -4054; - pub static EPIPE: c_int = -4048; - pub static ECONNABORTED: c_int = -4080; - pub static ECANCELED: c_int = -4082; - pub static EBADF: c_int = -4084; + pub static EACCES: c_int = -4092; + pub static ECONNREFUSED: c_int = -4078; + pub static ECONNRESET: c_int = -4077; + pub static ENOTCONN: c_int = -4053; + pub static EPIPE: c_int = -4047; + pub static ECONNABORTED: c_int = -4079; + pub static ECANCELED: c_int = -4081; + pub static EBADF: c_int = -4083; } #[cfg(not(windows))] pub mod errors { @@ -87,19 +87,19 @@ pub static STDIO_WRITABLE_PIPE: c_int = 0x20; #[cfg(unix)] pub type uv_buf_len_t = libc::size_t; #[cfg(windows)] -pub type uv_buf_len_t = u32; +pub type uv_buf_len_t = libc::c_ulong; // see libuv/include/uv-unix.h #[cfg(unix)] pub struct uv_buf_t { base: *u8, - len: libc::size_t, + len: uv_buf_len_t, } // see libuv/include/uv-win.h #[cfg(windows)] pub struct uv_buf_t { - len: u32, + len: uv_buf_len_t, base: *u8, } @@ -544,7 +544,19 @@ pub unsafe fn guess_handle(handle: c_int) -> c_int { // uv_support is the result of compiling rust_uv.cpp +// +// Note that this is in a cfg'd block so it doesn't get linked during testing. +// There's a bit of a conundrum when testing in that we're actually assuming +// that the tests are running in a uv loop, but they were created from the +// statically linked uv to the original rustuv crate. When we create the test +// executable, on some platforms if we re-link against uv, it actually creates +// second copies of everything. We obviously don't want this, so instead of +// dying horribly during testing, we allow all of the test rustuv's references +// to get resolved to the original rustuv crate. #[link_args = "-luv_support -luv"] +#[cfg(not(test))] +extern {} + extern { fn rust_uv_loop_new() -> *c_void; diff --git a/src/libstd/rt/io/stdio.rs b/src/libstd/rt/io/stdio.rs index 674b34639bc90..acc2e11f067e6 100644 --- a/src/libstd/rt/io/stdio.rs +++ b/src/libstd/rt/io/stdio.rs @@ -33,7 +33,8 @@ use result::{Ok, Err}; use rt::io::buffered::LineBufferedWriter; use rt::rtio::{IoFactory, RtioTTY, RtioFileStream, with_local_io, CloseAsynchronously}; -use super::{Reader, Writer, io_error, IoError, OtherIoError}; +use super::{Reader, Writer, io_error, IoError, OtherIoError, + standard_error, EndOfFile}; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -203,6 +204,15 @@ impl Reader for StdReader { File(ref mut file) => file.read(buf).map(|i| i as uint), }; match ret { + // When reading a piped stdin, libuv will return 0-length reads when + // stdin reaches EOF. For pretty much all other streams it will + // return an actual EOF error, but apparently for stdin it's a + // little different. Hence, here we convert a 0 length read to an + // end-of-file indicator so the caller knows to stop reading. + Ok(0) => { + io_error::cond.raise(standard_error(EndOfFile)); + None + } Ok(amt) => Some(amt as uint), Err(e) => { io_error::cond.raise(e); diff --git a/src/libstd/rt/io/timer.rs b/src/libstd/rt/io/timer.rs index fed6b9daa64cd..b0cf7dee10abb 100644 --- a/src/libstd/rt/io/timer.rs +++ b/src/libstd/rt/io/timer.rs @@ -142,14 +142,10 @@ mod test { fn oneshot_twice() { do run_in_mt_newsched_task { let mut timer = Timer::new().unwrap(); - let port1 = timer.oneshot(100000000000); + let port1 = timer.oneshot(10000); let port = timer.oneshot(1); port.recv(); - let port1 = Cell::new(port1); - let ret = do task::try { - port1.take().recv(); - }; - assert!(ret.is_err()); + assert_eq!(port1.try_recv(), None); } } diff --git a/src/libuv b/src/libuv index c6ecf97aafc85..7ac7e0248b347 160000 --- a/src/libuv +++ b/src/libuv @@ -1 +1 @@ -Subproject commit c6ecf97aafc858c2ad1089fb78da6c586d61d8b6 +Subproject commit 7ac7e0248b34732e9963cdb8e31f7e612d23d14b