-
Notifications
You must be signed in to change notification settings - Fork 71
/
eio_linux.ml
1549 lines (1369 loc) · 54.8 KB
/
eio_linux.ml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
(*
* Copyright (C) 2020-2021 Anil Madhavapeddy
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*)
let src = Logs.Src.create "eio_linux" ~doc:"Effect-based IO system for Linux/io-uring"
module Log = (val Logs.src_log src : Logs.LOG)
open Eio.Std
module Fiber_context = Eio.Private.Fiber_context
module Ctf = Eio.Private.Ctf
module Suspended = Eio_utils.Suspended
module Zzz = Eio_utils.Zzz
module Lf_queue = Eio_utils.Lf_queue
(* SIGPIPE makes no sense in a modern application. *)
let () = Sys.(set_signal sigpipe Signal_ignore)
type amount = Exactly of int | Upto of int
let system_thread = Ctf.mint_id ()
let unclassified_error e = Eio.Exn.create (Eio.Exn.X e)
let wrap_error code name arg =
let ex = Eio_unix.Unix_error (code, name, arg) in
match code with
| ECONNREFUSED -> Eio.Net.err (Connection_failure (Refused ex))
| ECONNRESET | EPIPE -> Eio.Net.err (Connection_reset ex)
| _ -> unclassified_error ex
let wrap_error_fs code name arg =
let e = Eio_unix.Unix_error (code, name, arg) in
match code with
| Unix.EEXIST -> Eio.Fs.err (Already_exists e)
| Unix.ENOENT -> Eio.Fs.err (Not_found e)
| Unix.EXDEV -> Eio.Fs.err (Permission_denied e)
| _ -> wrap_error code name arg
type _ Effect.t += Close : Unix.file_descr -> int Effect.t
module FD = struct
type t = {
seekable : bool;
close_unix : bool; (* Whether closing this also closes the underlying FD. *)
mutable release_hook : Eio.Switch.hook; (* Use this on close to remove switch's [on_release] hook. *)
mutable fd : [`Open of Unix.file_descr | `Closed]
}
let get_exn op = function
| { fd = `Open fd; _ } -> fd
| { fd = `Closed ; _ } -> invalid_arg (op ^ ": file descriptor used after calling close!")
let get op = function
| { fd = `Open fd; _ } -> Ok fd
| { fd = `Closed ; _ } -> Error (Invalid_argument (op ^ ": file descriptor used after calling close!"))
let is_open = function
| { fd = `Open _; _ } -> true
| { fd = `Closed; _ } -> false
let close t =
Ctf.label "close";
let fd = get_exn "close" t in
t.fd <- `Closed;
Eio.Switch.remove_hook t.release_hook;
if t.close_unix then (
let res = Effect.perform (Close fd) in
if res < 0 then
raise (wrap_error (Uring.error_of_errno res) "close" (string_of_int (Obj.magic fd : int)))
)
let ensure_closed t =
if is_open t then close t
let is_seekable fd =
match Unix.lseek fd 0 Unix.SEEK_CUR with
| (_ : int) -> true
| exception Unix.Unix_error(Unix.ESPIPE, "lseek", "") -> false
let to_unix op t =
let fd = get_exn "to_unix" t in
match op with
| `Peek -> fd
| `Take ->
t.fd <- `Closed;
Eio.Switch.remove_hook t.release_hook;
fd
let of_unix_no_hook ~seekable ~close_unix fd =
{ seekable; close_unix; fd = `Open fd; release_hook = Eio.Switch.null_hook }
let of_unix ~sw ~seekable ~close_unix fd =
let t = of_unix_no_hook ~seekable ~close_unix fd in
t.release_hook <- Switch.on_release_cancellable sw (fun () -> ensure_closed t);
t
let placeholder ~seekable ~close_unix =
{ seekable; close_unix; fd = `Closed; release_hook = Eio.Switch.null_hook }
let uring_file_offset t =
if t.seekable then Optint.Int63.minus_one else Optint.Int63.zero
let fstat t =
(* todo: use uring *)
try
let ust = Unix.LargeFile.fstat (get_exn "fstat" t) in
let st_kind : Eio.File.Stat.kind =
match ust.st_kind with
| Unix.S_REG -> `Regular_file
| Unix.S_DIR -> `Directory
| Unix.S_CHR -> `Character_special
| Unix.S_BLK -> `Block_device
| Unix.S_LNK -> `Symbolic_link
| Unix.S_FIFO -> `Fifo
| Unix.S_SOCK -> `Socket
in
Eio.File.Stat.{
dev = ust.st_dev |> Int64.of_int;
ino = ust.st_ino |> Int64.of_int;
kind = st_kind;
perm = ust.st_perm;
nlink = ust.st_nlink |> Int64.of_int;
uid = ust.st_uid |> Int64.of_int;
gid = ust.st_gid |> Int64.of_int;
rdev = ust.st_rdev |> Int64.of_int;
size = ust.st_size |> Optint.Int63.of_int64;
atime = ust.st_atime;
mtime = ust.st_mtime;
ctime = ust.st_ctime;
}
with Unix.Unix_error (code, name, arg) -> raise @@ wrap_error_fs code name arg
end
type _ Eio.Generic.ty += FD : FD.t Eio.Generic.ty
let get_fd_opt t = Eio.Generic.probe t FD
type dir_fd =
| FD of FD.t
| Cwd (* Confined to "." *)
| Fs (* Unconfined "."; also allows absolute paths *)
type _ Eio.Generic.ty += Dir_fd : dir_fd Eio.Generic.ty
let get_dir_fd_opt t = Eio.Generic.probe t Dir_fd
type rw_req = {
op : [`R|`W];
file_offset : Optint.Int63.t;
fd : FD.t;
len : amount;
buf : Uring.Region.chunk;
mutable cur_off : int;
action : int Suspended.t;
}
(* Type of user-data attached to jobs. *)
type io_job =
| Read : rw_req -> io_job
| Job_no_cancel : int Suspended.t -> io_job
| Cancel_job : io_job
| Job : int Suspended.t -> io_job (* A negative result indicates error, and may report cancellation *)
| Write : rw_req -> io_job
| Job_fn : 'a Suspended.t * (int -> [`Exit_scheduler]) -> io_job
(* When done, remove the cancel_fn from [Suspended.t] and call the callback (unless cancelled). *)
type runnable =
| IO : runnable
| Thread : 'a Suspended.t * 'a -> runnable
| Failed_thread : 'a Suspended.t * exn -> runnable
type t = {
uring: io_job Uring.t;
mem: Uring.Region.t option;
io_q: (t -> unit) Queue.t; (* waiting for room on [uring] *)
mem_q : Uring.Region.chunk Suspended.t Queue.t;
(* The queue of runnable fibers ready to be resumed. Note: other domains can also add work items here. *)
run_q : runnable Lf_queue.t;
(* When adding to [run_q] from another domain, this domain may be sleeping and so won't see the event.
In that case, [need_wakeup = true] and you must signal using [eventfd]. *)
eventfd : FD.t;
(* If [false], the main thread will check [run_q] before sleeping again
(possibly because an event has been or will be sent to [eventfd]).
It can therefore be set to [false] in either of these cases:
- By the receiving thread because it will check [run_q] before sleeping, or
- By the sending thread because it will signal the main thread later *)
need_wakeup : bool Atomic.t;
sleep_q: Zzz.t;
}
let wake_buffer =
let b = Bytes.create 8 in
Bytes.set_int64_ne b 0 1L;
b
(* This can be called from any systhread (including ones not running Eio),
and also from signal handlers or GC finalizers. It must not take any locks. *)
let wakeup t =
Atomic.set t.need_wakeup false; (* [t] will check [run_q] after getting the event below *)
match t.eventfd.fd with
| `Closed -> () (* Domain has shut down (presumably after handling the event) *)
| `Open fd ->
let sent = Unix.single_write fd wake_buffer 0 8 in
assert (sent = 8)
(* Safe to call from anywhere (other systhreads, domains, signal handlers, GC finalizers) *)
let enqueue_thread st k x =
Lf_queue.push st.run_q (Thread (k, x));
if Atomic.get st.need_wakeup then wakeup st
(* Safe to call from anywhere (other systhreads, domains, signal handlers, GC finalizers) *)
let enqueue_failed_thread st k ex =
Lf_queue.push st.run_q (Failed_thread (k, ex));
if Atomic.get st.need_wakeup then wakeup st
(* Can only be called from our own domain, so no need to check for wakeup. *)
let enqueue_at_head st k x =
Lf_queue.push_head st.run_q (Thread (k, x))
type _ Effect.t += Enter : (t -> 'a Suspended.t -> unit) -> 'a Effect.t
type _ Effect.t += Cancel : io_job Uring.job -> unit Effect.t
let enter fn = Effect.perform (Enter fn)
(* Cancellations always come from the same domain, so no need to send wake events here. *)
let rec enqueue_cancel job st =
Ctf.label "cancel";
match Uring.cancel st.uring job Cancel_job with
| None -> Queue.push (fun st -> enqueue_cancel job st) st.io_q
| Some _ -> ()
let cancel job = Effect.perform (Cancel job)
(* Cancellation
For operations that can be cancelled we need to set the fiber's cancellation function.
The typical sequence is:
1. We submit an operation, getting back a uring job (needed for cancellation).
2. We set the cancellation function. The function uses the uring job to cancel.
When the job completes, we clear the cancellation function. The function
must have been set by this point because we don't poll for completions until
the above steps have finished.
If the context is cancelled while the operation is running, the function will get removed and called,
which will submit a cancellation request to uring. We know the job is still valid at this point because
we clear the cancel function when it completes.
If the operation completes before Linux processes the cancellation, we get [ENOENT], which we ignore. *)
(* [with_cancel_hook ~action st fn] calls [fn] to create a job,
then sets the fiber's cancel function to cancel it.
If [action] is already cancelled, it schedules [action] to be discontinued.
@return Whether to retry the operation later, once there is space. *)
let with_cancel_hook ~action st fn =
match Fiber_context.get_error action.Suspended.fiber with
| Some ex -> enqueue_failed_thread st action ex; false
| None ->
match fn () with
| None -> true
| Some job ->
Fiber_context.set_cancel_fn action.fiber (fun _ -> cancel job);
false
let rec submit_rw_req st ({op; file_offset; fd; buf; len; cur_off; action} as req) =
match FD.get "submit_rw_req" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok fd ->
let {uring;io_q;_} = st in
let off = Uring.Region.to_offset buf + cur_off in
let len = match len with Exactly l | Upto l -> l in
let len = len - cur_off in
let retry = with_cancel_hook ~action st (fun () ->
match op with
|`R -> Uring.read_fixed uring ~file_offset fd ~off ~len (Read req)
|`W -> Uring.write_fixed uring ~file_offset fd ~off ~len (Write req)
)
in
if retry then (
Ctf.label "await-sqe";
(* wait until an sqe is available *)
Queue.push (fun st -> submit_rw_req st req) io_q
)
(* TODO bind from unixsupport *)
let errno_is_retry = function -62 | -11 | -4 -> true |_ -> false
let enqueue_read st action (file_offset,fd,buf,len) =
let file_offset =
match file_offset with
| Some x -> x
| None -> FD.uring_file_offset fd
in
let req = { op=`R; file_offset; len; fd; cur_off = 0; buf; action } in
Ctf.label "read";
submit_rw_req st req
let rec enqueue_readv args st action =
let (file_offset,fd,bufs) = args in
let file_offset =
match file_offset with
| Some x -> x
| None -> FD.uring_file_offset fd
in
Ctf.label "readv";
match FD.get "readv" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.readv st.uring ~file_offset fd bufs (Job action))
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_readv args st action) st.io_q
let rec enqueue_writev args st action =
let (file_offset,fd,bufs) = args in
let file_offset =
match file_offset with
| Some x -> x
| None -> FD.uring_file_offset fd
in
Ctf.label "writev";
match FD.get "writev" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.writev st.uring ~file_offset fd bufs (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_writev args st action) st.io_q
let rec enqueue_poll_add fd poll_mask st action =
Ctf.label "poll_add";
match FD.get "poll_add" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok unix_fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.poll_add st.uring unix_fd poll_mask (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_poll_add fd poll_mask st action) st.io_q
let rec enqueue_poll_add_unix fd poll_mask st action cb =
Ctf.label "poll_add";
let retry = with_cancel_hook ~action st (fun () ->
Uring.poll_add st.uring fd poll_mask (Job_fn (action, cb))
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_poll_add_unix fd poll_mask st action cb) st.io_q
let rec enqueue_close st action fd =
Ctf.label "close";
let subm = Uring.close st.uring fd (Job_no_cancel action) in
if subm = None then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_close st action fd) st.io_q
let enqueue_write st action (file_offset,fd,buf,len) =
let file_offset =
match file_offset with
| Some x -> x
| None -> FD.uring_file_offset fd
in
let req = { op=`W; file_offset; len; fd; cur_off = 0; buf; action } in
Ctf.label "write";
submit_rw_req st req
let rec enqueue_splice ~src ~dst ~len st action =
Ctf.label "splice";
match FD.get "splice-src" src, FD.get "splice-dst" dst with
| Error ex, _
| _, Error ex -> enqueue_failed_thread st action ex
| Ok unix_src, Ok unix_dst ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.splice st.uring (Job action) ~src:unix_src ~dst:unix_dst ~len
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_splice ~src ~dst ~len st action) st.io_q
let rec enqueue_openat2 ((access, flags, perm, resolve, dir, path) as args) st action =
Ctf.label "openat2";
let use fd =
let retry = with_cancel_hook ~action st (fun () ->
Uring.openat2 st.uring ~access ~flags ~perm ~resolve ?fd path (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_openat2 args st action) st.io_q
in
match dir with
| None -> use None
| Some dir ->
match FD.get "openat2" dir with
| Error ex -> enqueue_failed_thread st action ex
| Ok fd -> use (Some fd)
let rec enqueue_unlink ((dir, fd, path) as args) st action =
Ctf.label "unlinkat";
match FD.get "unlink" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.unlink st.uring ~dir ~fd path (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_unlink args st action) st.io_q
let rec enqueue_connect fd addr st action =
Ctf.label "connect";
match FD.get "connect" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok unix_fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.connect st.uring unix_fd addr (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_connect fd addr st action) st.io_q
let rec extract_fds = function
| [] -> Ok []
| x :: xs ->
match FD.get "send_msg" x with
| Error _ as e -> e
| Ok fd ->
match extract_fds xs with
| Error _ as e -> e
| Ok fds -> Ok (fd :: fds)
let rec enqueue_send_msg fd ~fds ~dst buf st action =
Ctf.label "send_msg";
match FD.get "send_msg" fd, extract_fds fds with
| Error ex, _
| _, Error ex -> enqueue_failed_thread st action ex
| Ok unix_fd, Ok unix_fds ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.send_msg st.uring unix_fd ~fds:unix_fds ?dst buf (Job action)
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_send_msg fd ~fds ~dst buf st action) st.io_q
let rec enqueue_recv_msg fd msghdr st action =
Ctf.label "recv_msg";
match FD.get "recv_msg" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok unix_fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.recv_msg st.uring unix_fd msghdr (Job action);
)
in
if retry then (* wait until an sqe is available *)
Queue.push (fun st -> enqueue_recv_msg fd msghdr st action) st.io_q
let rec enqueue_accept fd client_addr st action =
Ctf.label "accept";
match FD.get "accept" fd with
| Error ex -> enqueue_failed_thread st action ex
| Ok unix_fd ->
let retry = with_cancel_hook ~action st (fun () ->
Uring.accept st.uring unix_fd client_addr (Job action)
) in
if retry then (
(* wait until an sqe is available *)
Queue.push (fun st -> enqueue_accept fd client_addr st action) st.io_q
)
let rec enqueue_noop st action =
Ctf.label "noop";
let retry = (Uring.noop st.uring (Job_no_cancel action) = None) in
if retry then (
(* wait until an sqe is available *)
Queue.push (fun st -> enqueue_noop st action) st.io_q
)
let submit_pending_io st =
match Queue.take_opt st.io_q with
| None -> ()
| Some fn ->
Ctf.label "submit_pending_io";
fn st
(* Switch control to the next ready continuation.
If none is ready, wait until we get an event to wake one and then switch.
Returns only if there is nothing to do and no queued operations. *)
let rec schedule ({run_q; sleep_q; mem_q; uring; _} as st) : [`Exit_scheduler] =
(* This is not a fair scheduler *)
(* Wakeup any paused fibers *)
match Lf_queue.pop run_q with
| None -> assert false (* We should always have an IO job, at least *)
| Some Thread (k, v) -> (* We already have a runnable task *)
Fiber_context.clear_cancel_fn k.fiber;
Suspended.continue k v
| Some Failed_thread (k, ex) ->
Fiber_context.clear_cancel_fn k.fiber;
Suspended.discontinue k ex
| Some IO -> (* Note: be sure to re-inject the IO task before continuing! *)
(* This is not a fair scheduler: timers always run before all other IO *)
let now = Mtime_clock.now () in
match Zzz.pop ~now sleep_q with
| `Due k ->
Lf_queue.push run_q IO; (* Re-inject IO job in the run queue *)
Suspended.continue k () (* A sleeping task is now due *)
| `Wait_until _ | `Nothing as next_due ->
(* Handle any pending events before submitting. This is faster. *)
match Uring.get_cqe_nonblocking uring with
| Some { data = runnable; result } ->
Lf_queue.push run_q IO; (* Re-inject IO job in the run queue *)
handle_complete st ~runnable result
| None ->
ignore (Uring.submit uring : int);
let timeout =
match next_due with
| `Wait_until time ->
let time = Mtime.to_uint64_ns time in
let now = Mtime.to_uint64_ns now in
let diff_ns = Int64.sub time now |> Int64.to_float in
Some (diff_ns /. 1e9)
| `Nothing -> None
in
if not (Lf_queue.is_empty st.run_q) then (
Lf_queue.push run_q IO; (* Re-inject IO job in the run queue *)
schedule st
) else if timeout = None && Uring.active_ops uring = 0 then (
(* Nothing further can happen at this point.
If there are no events in progress but also still no memory available, something has gone wrong! *)
assert (Queue.length mem_q = 0);
Lf_queue.close st.run_q; (* Just to catch bugs if something tries to enqueue later *)
`Exit_scheduler
) else (
Atomic.set st.need_wakeup true;
if Lf_queue.is_empty st.run_q then (
(* At this point we're not going to check [run_q] again before sleeping.
If [need_wakeup] is still [true], this is fine because we don't promise to do that.
If [need_wakeup = false], a wake-up event will arrive and wake us up soon. *)
Ctf.(note_hiatus Wait_for_work);
let result = Uring.wait ?timeout uring in
Ctf.note_resume system_thread;
Atomic.set st.need_wakeup false;
Lf_queue.push run_q IO; (* Re-inject IO job in the run queue *)
match result with
| None ->
(* Woken by a timeout, which is now due, or by a signal. *)
schedule st
| Some { data = runnable; result } ->
handle_complete st ~runnable result
) else (
(* Someone added a new job while we were setting [need_wakeup] to [true].
They might or might not have seen that, so we can't be sure they'll send an event. *)
Atomic.set st.need_wakeup false;
Lf_queue.push run_q IO; (* Re-inject IO job in the run queue *)
schedule st
)
)
and handle_complete st ~runnable result =
submit_pending_io st; (* If something was waiting for a slot, submit it now. *)
match runnable with
| Read req ->
complete_rw_req st req result
| Write req ->
complete_rw_req st req result
| Job k ->
Fiber_context.clear_cancel_fn k.fiber;
if result >= 0 then Suspended.continue k result
else (
match Fiber_context.get_error k.fiber with
| None -> Suspended.continue k result
| Some e ->
(* If cancelled, report that instead. *)
Suspended.discontinue k e
)
| Job_no_cancel k ->
Suspended.continue k result
| Cancel_job ->
begin match result with
| 0 (* Operation cancelled successfully *)
| -2 (* ENOENT - operation completed before cancel took effect *)
| -114 (* EALREADY - operation already in progress *)
-> ()
| errno ->
Log.warn (fun f -> f "Cancel returned unexpected error: %s" (Unix.error_message (Uring.error_of_errno errno)))
end;
schedule st
| Job_fn (k, f) ->
Fiber_context.clear_cancel_fn k.fiber;
(* Should we only do this on error, to avoid losing the return value?
We already do that with rw jobs. *)
begin match Fiber_context.get_error k.fiber with
| None -> f result
| Some e -> Suspended.discontinue k e
end
and complete_rw_req st ({len; cur_off; action; _} as req) res =
Fiber_context.clear_cancel_fn action.fiber;
match res, len with
| 0, _ -> Suspended.discontinue action End_of_file
| e, _ when e < 0 ->
begin match Fiber_context.get_error action.fiber with
| Some e -> Suspended.discontinue action e (* If cancelled, report that instead. *)
| None ->
if errno_is_retry e then (
submit_rw_req st req;
schedule st
) else (
Suspended.continue action e
)
end
| n, Exactly len when n < len - cur_off ->
req.cur_off <- req.cur_off + n;
submit_rw_req st req;
schedule st
| _, Exactly len -> Suspended.continue action len
| n, Upto _ -> Suspended.continue action n
module Low_level = struct
let alloc_buf_or_wait st k =
match st.mem with
| None -> Suspended.discontinue k (Failure "No fixed buffer available")
| Some mem ->
match Uring.Region.alloc mem with
| buf -> Suspended.continue k buf
| exception Uring.Region.No_space ->
Queue.push k st.mem_q;
schedule st
let free_buf st buf =
match Queue.take_opt st.mem_q with
| None -> Uring.Region.free buf
| Some k -> enqueue_thread st k buf
let noop () =
let result = enter enqueue_noop in
if result <> 0 then raise (unclassified_error (Eio_unix.Unix_error (Uring.error_of_errno result, "noop", "")))
type _ Effect.t += Sleep_until : Mtime.t -> unit Effect.t
let sleep_until d =
Effect.perform (Sleep_until d)
type _ Effect.t += ERead : (Optint.Int63.t option * FD.t * Uring.Region.chunk * amount) -> int Effect.t
let read_exactly ?file_offset fd buf len =
let res = Effect.perform (ERead (file_offset, fd, buf, Exactly len)) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "read_exactly" ""
)
let read_upto ?file_offset fd buf len =
let res = Effect.perform (ERead (file_offset, fd, buf, Upto len)) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "read_upto" ""
) else (
res
)
let readv ?file_offset fd bufs =
let res = enter (enqueue_readv (file_offset, fd, bufs)) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "readv" ""
) else if res = 0 then (
raise End_of_file
) else (
res
)
let writev_single ?file_offset fd bufs =
let res = enter (enqueue_writev (file_offset, fd, bufs)) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "writev" ""
) else (
res
)
let rec writev ?file_offset fd bufs =
let bytes_written = writev_single ?file_offset fd bufs in
match Cstruct.shiftv bufs bytes_written with
| [] -> ()
| bufs ->
let file_offset =
let module I63 = Optint.Int63 in
match file_offset with
| None -> None
| Some ofs when ofs = I63.minus_one -> Some I63.minus_one
| Some ofs -> Some (I63.add ofs (I63.of_int bytes_written))
in
writev ?file_offset fd bufs
let await_readable fd =
let res = enter (enqueue_poll_add fd (Uring.Poll_mask.(pollin + pollerr))) in
if res < 0 then (
raise (unclassified_error (Eio_unix.Unix_error (Uring.error_of_errno res, "await_readable", "")))
)
let await_writable fd =
let res = enter (enqueue_poll_add fd (Uring.Poll_mask.(pollout + pollerr))) in
if res < 0 then (
raise (unclassified_error (Eio_unix.Unix_error (Uring.error_of_errno res, "await_writable", "")))
)
type _ Effect.t += EWrite : (Optint.Int63.t option * FD.t * Uring.Region.chunk * amount) -> int Effect.t
let write ?file_offset fd buf len =
let res = Effect.perform (EWrite (file_offset, fd, buf, Exactly len)) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "write" ""
)
type _ Effect.t += Alloc : Uring.Region.chunk option Effect.t
let alloc_fixed () = Effect.perform Alloc
type _ Effect.t += Alloc_or_wait : Uring.Region.chunk Effect.t
let alloc_fixed_or_wait () = Effect.perform Alloc_or_wait
type _ Effect.t += Free : Uring.Region.chunk -> unit Effect.t
let free_fixed buf = Effect.perform (Free buf)
let splice src ~dst ~len =
let res = enter (enqueue_splice ~src ~dst ~len) in
if res > 0 then res
else if res = 0 then raise End_of_file
else raise @@ wrap_error (Uring.error_of_errno res) "splice" ""
let connect fd addr =
let res = enter (enqueue_connect fd addr) in
if res < 0 then (
let ex =
match addr with
| ADDR_UNIX _ -> wrap_error_fs (Uring.error_of_errno res) "connect" ""
| ADDR_INET _ -> wrap_error (Uring.error_of_errno res) "connect" ""
in
raise ex
)
let send_msg fd ?(fds=[]) ?dst buf =
let res = enter (enqueue_send_msg fd ~fds ~dst buf) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "send_msg" ""
)
let recv_msg fd buf =
let addr = Uring.Sockaddr.create () in
let msghdr = Uring.Msghdr.create ~addr buf in
let res = enter (enqueue_recv_msg fd msghdr) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "recv_msg" ""
);
addr, res
let recv_msg_with_fds ~sw ~max_fds fd buf =
let addr = Uring.Sockaddr.create () in
let msghdr = Uring.Msghdr.create ~n_fds:max_fds ~addr buf in
let res = enter (enqueue_recv_msg fd msghdr) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "recv_msg" ""
);
let fds =
Uring.Msghdr.get_fds msghdr
|> List.map (fun fd -> FD.of_unix ~sw ~seekable:(FD.is_seekable fd) ~close_unix:true fd)
in
addr, res, fds
let with_chunk ~fallback fn =
match alloc_fixed () with
| Some chunk ->
Fun.protect ~finally:(fun () -> free_fixed chunk) @@ fun () ->
fn chunk
| None ->
fallback ()
let openat2 ~sw ?seekable ~access ~flags ~perm ~resolve ?dir path =
let res = enter (enqueue_openat2 (access, flags, perm, resolve, dir, path)) in
if res < 0 then (
Switch.check sw; (* If cancelled, report that instead. *)
raise @@ wrap_error_fs (Uring.error_of_errno res) "openat2" ""
);
let fd : Unix.file_descr = Obj.magic res in
let seekable =
match seekable with
| None -> FD.is_seekable fd
| Some x -> x
in
FD.of_unix ~sw ~seekable ~close_unix:true fd
let openat ~sw ?seekable ~access ~flags ~perm dir path =
match dir with
| FD dir -> openat2 ~sw ?seekable ~access ~flags ~perm ~resolve:Uring.Resolve.beneath ~dir path
| Cwd -> openat2 ~sw ?seekable ~access ~flags ~perm ~resolve:Uring.Resolve.beneath path
| Fs -> openat2 ~sw ?seekable ~access ~flags ~perm ~resolve:Uring.Resolve.empty path
let fstat fd = FD.fstat fd
external eio_mkdirat : Unix.file_descr -> string -> Unix.file_perm -> unit = "caml_eio_mkdirat"
external eio_renameat : Unix.file_descr -> string -> Unix.file_descr -> string -> unit = "caml_eio_renameat"
external eio_getrandom : Cstruct.buffer -> int -> int -> int = "caml_eio_getrandom"
external eio_getdents : Unix.file_descr -> string list = "caml_eio_getdents"
let getrandom { Cstruct.buffer; off; len } =
let rec loop n =
if n = len then
()
else
loop (n + eio_getrandom buffer (off + n) (len - n))
in
loop 0
(* [with_parent_dir dir path fn] runs [fn parent (basename path)],
where [parent] is a path FD for [path]'s parent, resolved using [Resolve.beneath]. *)
let with_parent_dir dir path fn =
let dir_path = Filename.dirname path in
let leaf = Filename.basename path in
Switch.run (fun sw ->
let parent =
match dir with
| FD d when dir_path = "." -> d
| _ ->
openat ~sw ~seekable:false dir dir_path
~access:`R
~flags:Uring.Open_flags.(cloexec + path + directory)
~perm:0
in
fn parent leaf
)
let mkdir_beneath ~perm dir path =
(* [mkdir] is really an operation on [path]'s parent. Get a reference to that first: *)
with_parent_dir dir path @@ fun parent leaf ->
try eio_mkdirat (FD.get_exn "mkdirat" parent) leaf perm
with Unix.Unix_error (code, name, arg) -> raise @@ wrap_error_fs code name arg
let unlink ~rmdir dir path =
(* [unlink] is really an operation on [path]'s parent. Get a reference to that first: *)
with_parent_dir dir path @@ fun parent leaf ->
let res = enter (enqueue_unlink (rmdir, parent, leaf)) in
if res <> 0 then raise @@ wrap_error_fs (Uring.error_of_errno res) "unlinkat" ""
let rename old_dir old_path new_dir new_path =
with_parent_dir old_dir old_path @@ fun old_parent old_leaf ->
with_parent_dir new_dir new_path @@ fun new_parent new_leaf ->
try
eio_renameat
(FD.get_exn "renameat-old" old_parent) old_leaf
(FD.get_exn "renameat-new" new_parent) new_leaf
with Unix.Unix_error (code, name, arg) -> raise @@ wrap_error_fs code name arg
let shutdown socket command =
try Unix.shutdown (FD.get_exn "shutdown" socket) command
with Unix.Unix_error (code, name, arg) -> raise @@ wrap_error code name arg
let accept ~sw fd =
Ctf.label "accept";
let client_addr = Uring.Sockaddr.create () in
let res = enter (enqueue_accept fd client_addr) in
if res < 0 then (
raise @@ wrap_error (Uring.error_of_errno res) "accept" ""
) else (
let unix : Unix.file_descr = Obj.magic res in
let client = FD.of_unix ~sw ~seekable:false ~close_unix:true unix in
let client_addr = Uring.Sockaddr.get client_addr in
client, client_addr
)
let open_dir ~sw dir path =
openat ~sw ~seekable:false dir path
~access:`R
~flags:Uring.Open_flags.(cloexec + directory)
~perm:0
let read_dir fd =
let rec read_all acc fd =
match eio_getdents (FD.get_exn "getdents" fd) with
| [] -> acc
| files ->
let files = List.filter (function ".." | "." -> false | _ -> true) files in
read_all (files @ acc) fd
in
Eio_unix.run_in_systhread (fun () -> read_all [] fd)
(* https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml *)
let getaddrinfo ~service node =
let to_eio_sockaddr_t {Unix.ai_family; ai_addr; ai_socktype; ai_protocol; _ } =
match ai_family, ai_socktype, ai_addr with
| (Unix.PF_INET | PF_INET6),
(Unix.SOCK_STREAM | SOCK_DGRAM),
Unix.ADDR_INET (inet_addr,port) -> (
match ai_protocol with
| 6 -> Some (`Tcp (Eio_unix.Ipaddr.of_unix inet_addr, port))
| 17 -> Some (`Udp (Eio_unix.Ipaddr.of_unix inet_addr, port))
| _ -> None)
| _ -> None
in
Eio_unix.run_in_systhread @@ fun () ->
Unix.getaddrinfo node service []
|> List.filter_map to_eio_sockaddr_t
end
module EventFD_pool : sig
(* We need to write to event FDs from signal handlers and GC finalizers.
This means we can't take a lock, which means we can't easily prevent
the owning domain from closing the FD while we're writing to it
(which could result in us writing to an unreleaded file if the FD
got reused). To avoid that, we never close event FDs but just return them
to a free pool.
The case where this matters is:
1. Some other systhread calls [wakeup].
2 [wakeup] adds an item to the run-queue and sees it needs to send a wake-up event.
3. The domain wakes up for some other reason, handles the event, then shuts down.
4. The original systhread writes to the eventfd.
*)
val get : unit -> Unix.file_descr
(* Take the next free eventfd from the pool, or create a new one if the pool's empty.
You might get a few spurious events from it as other threads are shutting down,
so you must be able to cope with that. *)
val put : Unix.file_descr -> unit
(* [put fd] adds [fd] to the free pool. *)
end = struct
external eio_eventfd : int -> Unix.file_descr = "caml_eio_eventfd"
let free = Lf_queue.create ()
let get () =
match Lf_queue.pop free with
| Some fd -> fd
| None -> eio_eventfd 0
let put fd =
Lf_queue.push free fd
end
type has_fd = < fd : FD.t >
type source = < Eio.Flow.source; Eio.Flow.close; has_fd >
type sink = < Eio.Flow.sink ; Eio.Flow.close; has_fd >
let get_fd (t : <has_fd; ..>) = t#fd
(* When copying between a source with an FD and a sink with an FD, we can share the chunk
and avoid copying. *)
let fast_copy src dst =
let fallback () =
(* No chunks available. Use regular memory instead. *)
let buf = Cstruct.create 4096 in
try
while true do
let got = Low_level.readv src [buf] in
Low_level.writev dst [Cstruct.sub buf 0 got]
done
with End_of_file -> ()
in
Low_level.with_chunk ~fallback @@ fun chunk ->
let chunk_size = Uring.Region.length chunk in
try
while true do
let got = Low_level.read_upto src chunk chunk_size in
Low_level.write dst chunk got
done
with End_of_file -> ()
(* Try a fast copy using splice. If the FDs don't support that, switch to copying. *)
let _fast_copy_try_splice src dst =
try
while true do
let _ : int = Low_level.splice src ~dst ~len:max_int in
()
done
with
| End_of_file -> ()
| Eio.Exn.Io (Eio.Exn.X Eio_unix.Unix_error ((EAGAIN | EINVAL), "splice", _), _) -> fast_copy src dst
(* XXX workaround for issue #319, PR #327 *)
let fast_copy_try_splice src dst = fast_copy src dst
(* Copy using the [Read_source_buffer] optimisation.
Avoids a copy if the source already has the data. *)
let copy_with_rsb rsb dst =
try