Blob


1 /* $OpenBSD: ioev.c,v 1.42 2019/06/12 17:42:53 eric Exp $ */
2 /*
3 * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #define _GNU_SOURCE 1
18 #define _BSD_SOURCE 1
20 #include <sys/types.h>
21 #include <sys/queue.h>
22 #include <sys/socket.h>
24 #include <err.h>
25 #include <errno.h>
26 #include <event.h>
27 #include <fcntl.h>
28 #include <inttypes.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <stdio.h>
32 #include <unistd.h>
34 #include "openbsd-compat.h"
35 #include "ioev.h"
36 #include "iobuf.h"
38 #ifdef IO_TLS
39 #include <openssl/err.h>
40 #include <openssl/ssl.h>
41 #endif
43 enum {
44 IO_STATE_NONE,
45 IO_STATE_CONNECT,
46 IO_STATE_CONNECT_TLS,
47 IO_STATE_ACCEPT_TLS,
48 IO_STATE_UP,
50 IO_STATE_MAX,
51 };
53 #define IO_PAUSE_IN IO_IN
54 #define IO_PAUSE_OUT IO_OUT
55 #define IO_READ 0x04
56 #define IO_WRITE 0x08
57 #define IO_RW (IO_READ | IO_WRITE)
58 #define IO_RESET 0x10 /* internal */
59 #define IO_HELD 0x20 /* internal */
61 struct io {
62 int sock;
63 void *arg;
64 void (*cb)(struct io*, int, void *);
65 struct iobuf iobuf;
66 size_t lowat;
67 int timeout;
68 int flags;
69 int state;
70 struct event ev;
71 void *tls;
72 const char *error; /* only valid immediately on callback */
73 };
75 const char* io_strflags(int);
76 const char* io_evstr(short);
78 void _io_init(void);
79 void io_hold(struct io *);
80 void io_release(struct io *);
81 void io_callback(struct io*, int);
82 void io_dispatch(int, short, void *);
83 void io_dispatch_connect(int, short, void *);
84 size_t io_pending(struct io *);
85 size_t io_queued(struct io*);
86 void io_reset(struct io *, short, void (*)(int, short, void*));
87 void io_frame_enter(const char *, struct io *, int);
88 void io_frame_leave(struct io *);
90 #ifdef IO_TLS
91 void ssl_error(const char *); /* XXX external */
93 static const char* io_tls_error(void);
94 void io_dispatch_accept_tls(int, short, void *);
95 void io_dispatch_connect_tls(int, short, void *);
96 void io_dispatch_read_tls(int, short, void *);
97 void io_dispatch_write_tls(int, short, void *);
98 void io_reload_tls(struct io *io);
99 #endif
101 static struct io *current = NULL;
102 static uint64_t frame = 0;
103 static int _io_debug = 0;
105 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
108 const char*
109 io_strio(struct io *io)
111 static char buf[128];
112 char ssl[128];
114 ssl[0] = '\0';
115 #ifdef IO_TLS
116 if (io->tls) {
117 (void)snprintf(ssl, sizeof ssl, " tls=%s:%s:%d",
118 SSL_get_version(io->tls),
119 SSL_get_cipher_name(io->tls),
120 SSL_get_cipher_bits(io->tls, NULL));
122 #endif
124 (void)snprintf(buf, sizeof buf,
125 "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
126 io, io->sock, io->timeout, io_strflags(io->flags), ssl,
127 io_pending(io), io_queued(io));
129 return (buf);
132 #define CASE(x) case x : return #x
134 const char*
135 io_strevent(int evt)
137 static char buf[32];
139 switch (evt) {
140 CASE(IO_CONNECTED);
141 CASE(IO_TLSREADY);
142 CASE(IO_DATAIN);
143 CASE(IO_LOWAT);
144 CASE(IO_DISCONNECTED);
145 CASE(IO_TIMEOUT);
146 CASE(IO_ERROR);
147 default:
148 (void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
149 return buf;
153 void
154 io_set_nonblocking(int fd)
156 int flags;
158 if ((flags = fcntl(fd, F_GETFL)) == -1)
159 err(1, "io_set_blocking:fcntl(F_GETFL)");
161 flags |= O_NONBLOCK;
163 if (fcntl(fd, F_SETFL, flags) == -1)
164 err(1, "io_set_blocking:fcntl(F_SETFL)");
167 void
168 io_set_nolinger(int fd)
170 struct linger l;
172 memset(&l, 0, sizeof(l));
173 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
174 err(1, "io_set_linger:setsockopt()");
177 /*
178 * Event framing must not rely on an io pointer to refer to the "same" io
179 * throughout the frame, because this is not always the case:
181 * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
182 * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
184 * In both case, the problem is that the io is freed in the callback, so
185 * the pointer becomes invalid. If that happens, the user is required to
186 * call io_clear, so we can adapt the frame state there.
187 */
188 void
189 io_frame_enter(const char *where, struct io *io, int ev)
191 io_debug("\n=== %" PRIu64 " ===\n"
192 "io_frame_enter(%s, %s, %s)\n",
193 frame, where, io_evstr(ev), io_strio(io));
195 if (current)
196 errx(1, "io_frame_enter: interleaved frames");
198 current = io;
200 io_hold(io);
203 void
204 io_frame_leave(struct io *io)
206 io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
208 if (current && current != io)
209 errx(1, "io_frame_leave: io mismatch");
211 /* io has been cleared */
212 if (current == NULL)
213 goto done;
215 /* TODO: There is a possible optimization there:
216 * In a typical half-duplex request/response scenario,
217 * the io is waiting to read a request, and when done, it queues
218 * the response in the output buffer and goes to write mode.
219 * There, the write event is set and will be triggered in the next
220 * event frame. In most case, the write call could be done
221 * immediately as part of the last read frame, thus avoiding to go
222 * through the event loop machinery. So, as an optimisation, we
223 * could detect that case here and force an event dispatching.
224 */
226 /* Reload the io if it has not been reset already. */
227 io_release(io);
228 current = NULL;
229 done:
230 io_debug("=== /%" PRIu64 "\n", frame);
232 frame += 1;
235 void
236 _io_init(void)
238 static int init = 0;
240 if (init)
241 return;
243 init = 1;
244 _io_debug = getenv("IO_DEBUG") != NULL;
247 struct io *
248 io_new(void)
250 struct io *io;
252 _io_init();
254 if ((io = calloc(1, sizeof(*io))) == NULL)
255 return NULL;
257 io->sock = -1;
258 io->timeout = -1;
260 if (iobuf_init(&io->iobuf, 0, 0) == -1) {
261 free(io);
262 return NULL;
265 return io;
268 void
269 io_free(struct io *io)
271 io_debug("io_clear(%p)\n", io);
273 /* the current io is virtually dead */
274 if (io == current)
275 current = NULL;
277 #ifdef IO_TLS
278 SSL_free(io->tls);
279 io->tls = NULL;
280 #endif
282 if (event_initialized(&io->ev))
283 event_del(&io->ev);
284 if (io->sock != -1) {
285 close(io->sock);
286 io->sock = -1;
289 iobuf_clear(&io->iobuf);
290 free(io);
293 void
294 io_hold(struct io *io)
296 io_debug("io_enter(%p)\n", io);
298 if (io->flags & IO_HELD)
299 errx(1, "io_hold: io is already held");
301 io->flags &= ~IO_RESET;
302 io->flags |= IO_HELD;
305 void
306 io_release(struct io *io)
308 if (!(io->flags & IO_HELD))
309 errx(1, "io_release: io is not held");
311 io->flags &= ~IO_HELD;
312 if (!(io->flags & IO_RESET))
313 io_reload(io);
316 void
317 io_set_fd(struct io *io, int fd)
319 io->sock = fd;
320 if (fd != -1)
321 io_reload(io);
324 void
325 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
327 io->cb = cb;
328 io->arg = arg;
331 void
332 io_set_timeout(struct io *io, int msec)
334 io_debug("io_set_timeout(%p, %d)\n", io, msec);
336 io->timeout = msec;
339 void
340 io_set_lowat(struct io *io, size_t lowat)
342 io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
344 io->lowat = lowat;
347 void
348 io_pause(struct io *io, int dir)
350 io_debug("io_pause(%p, %x)\n", io, dir);
352 io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
353 io_reload(io);
356 void
357 io_resume(struct io *io, int dir)
359 io_debug("io_resume(%p, %x)\n", io, dir);
361 io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
362 io_reload(io);
365 void
366 io_set_read(struct io *io)
368 int mode;
370 io_debug("io_set_read(%p)\n", io);
372 mode = io->flags & IO_RW;
373 if (!(mode == 0 || mode == IO_WRITE))
374 errx(1, "io_set_read(): full-duplex or reading");
376 io->flags &= ~IO_RW;
377 io->flags |= IO_READ;
378 io_reload(io);
381 void
382 io_set_write(struct io *io)
384 int mode;
386 io_debug("io_set_write(%p)\n", io);
388 mode = io->flags & IO_RW;
389 if (!(mode == 0 || mode == IO_READ))
390 errx(1, "io_set_write(): full-duplex or writing");
392 io->flags &= ~IO_RW;
393 io->flags |= IO_WRITE;
394 io_reload(io);
397 const char *
398 io_error(struct io *io)
400 return io->error;
403 void *
404 io_tls(struct io *io)
406 return io->tls;
409 int
410 io_fileno(struct io *io)
412 return io->sock;
415 int
416 io_paused(struct io *io, int what)
418 return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
421 /*
422 * Buffered output functions
423 */
425 int
426 io_write(struct io *io, const void *buf, size_t len)
428 int r;
430 r = iobuf_queue(&io->iobuf, buf, len);
432 io_reload(io);
434 return r;
437 int
438 io_writev(struct io *io, const struct iovec *iov, int iovcount)
440 int r;
442 r = iobuf_queuev(&io->iobuf, iov, iovcount);
444 io_reload(io);
446 return r;
449 int
450 io_print(struct io *io, const char *s)
452 return io_write(io, s, strlen(s));
455 int
456 io_printf(struct io *io, const char *fmt, ...)
458 va_list ap;
459 int r;
461 va_start(ap, fmt);
462 r = io_vprintf(io, fmt, ap);
463 va_end(ap);
465 return r;
468 int
469 io_vprintf(struct io *io, const char *fmt, va_list ap)
472 char *buf;
473 int len;
475 len = vasprintf(&buf, fmt, ap);
476 if (len == -1)
477 return -1;
478 len = io_write(io, buf, len);
479 free(buf);
481 return len;
484 size_t
485 io_queued(struct io *io)
487 return iobuf_queued(&io->iobuf);
490 /*
491 * Buffered input functions
492 */
494 void *
495 io_data(struct io *io)
497 return iobuf_data(&io->iobuf);
500 size_t
501 io_datalen(struct io *io)
503 return iobuf_len(&io->iobuf);
506 char *
507 io_getline(struct io *io, size_t *sz)
509 return iobuf_getline(&io->iobuf, sz);
512 void
513 io_drop(struct io *io, size_t sz)
515 return iobuf_drop(&io->iobuf, sz);
519 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
520 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
522 /*
523 * Setup the necessary events as required by the current io state,
524 * honouring duplex mode and i/o pauses.
525 */
526 void
527 io_reload(struct io *io)
529 short events;
531 /* io will be reloaded at release time */
532 if (io->flags & IO_HELD)
533 return;
535 iobuf_normalize(&io->iobuf);
537 #ifdef IO_TLS
538 if (io->tls) {
539 io_reload_tls(io);
540 return;
542 #endif
544 io_debug("io_reload(%p)\n", io);
546 events = 0;
547 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
548 events = EV_READ;
549 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
550 events |= EV_WRITE;
552 io_reset(io, events, io_dispatch);
555 /* Set the requested event. */
556 void
557 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
559 struct timeval tv, *ptv;
561 io_debug("io_reset(%p, %s, %p) -> %s\n",
562 io, io_evstr(events), dispatch, io_strio(io));
564 /*
565 * Indicate that the event has already been reset so that reload
566 * is not called on frame_leave.
567 */
568 io->flags |= IO_RESET;
570 if (event_initialized(&io->ev) &&
571 event_pending(&io->ev, EV_READ|EV_WRITE, NULL))
572 event_del(&io->ev);
574 /*
575 * The io is paused by the user, so we don't want the timeout to be
576 * effective.
577 */
578 if (events == 0)
579 return;
581 event_set(&io->ev, io->sock, events, dispatch, io);
582 if (io->timeout >= 0) {
583 tv.tv_sec = io->timeout / 1000;
584 tv.tv_usec = (io->timeout % 1000) * 1000;
585 ptv = &tv;
586 } else
587 ptv = NULL;
589 event_add(&io->ev, ptv);
592 size_t
593 io_pending(struct io *io)
595 return iobuf_len(&io->iobuf);
598 const char*
599 io_strflags(int flags)
601 static char buf[64];
603 buf[0] = '\0';
605 switch (flags & IO_RW) {
606 case 0:
607 (void)strlcat(buf, "rw", sizeof buf);
608 break;
609 case IO_READ:
610 (void)strlcat(buf, "R", sizeof buf);
611 break;
612 case IO_WRITE:
613 (void)strlcat(buf, "W", sizeof buf);
614 break;
615 case IO_RW:
616 (void)strlcat(buf, "RW", sizeof buf);
617 break;
620 if (flags & IO_PAUSE_IN)
621 (void)strlcat(buf, ",F_PI", sizeof buf);
622 if (flags & IO_PAUSE_OUT)
623 (void)strlcat(buf, ",F_PO", sizeof buf);
625 return buf;
628 const char*
629 io_evstr(short ev)
631 static char buf[64];
632 char buf2[16];
633 int n;
635 n = 0;
636 buf[0] = '\0';
638 if (ev == 0) {
639 (void)strlcat(buf, "<NONE>", sizeof(buf));
640 return buf;
643 if (ev & EV_TIMEOUT) {
644 (void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
645 ev &= ~EV_TIMEOUT;
646 n++;
649 if (ev & EV_READ) {
650 if (n)
651 (void)strlcat(buf, "|", sizeof(buf));
652 (void)strlcat(buf, "EV_READ", sizeof(buf));
653 ev &= ~EV_READ;
654 n++;
657 if (ev & EV_WRITE) {
658 if (n)
659 (void)strlcat(buf, "|", sizeof(buf));
660 (void)strlcat(buf, "EV_WRITE", sizeof(buf));
661 ev &= ~EV_WRITE;
662 n++;
665 if (ev & EV_SIGNAL) {
666 if (n)
667 (void)strlcat(buf, "|", sizeof(buf));
668 (void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
669 ev &= ~EV_SIGNAL;
670 n++;
673 if (ev) {
674 if (n)
675 (void)strlcat(buf, "|", sizeof(buf));
676 (void)strlcat(buf, "EV_?=0x", sizeof(buf));
677 (void)snprintf(buf2, sizeof(buf2), "%hx", ev);
678 (void)strlcat(buf, buf2, sizeof(buf));
681 return buf;
684 void
685 io_dispatch(__unused int fd, short ev, void *humppa)
687 struct io *io = humppa;
688 size_t w;
689 ssize_t n;
690 int saved_errno;
692 io_frame_enter("io_dispatch", io, ev);
694 if (ev == EV_TIMEOUT) {
695 io_callback(io, IO_TIMEOUT);
696 goto leave;
699 if (ev & EV_WRITE && (w = io_queued(io))) {
700 if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
701 if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
702 goto read;
703 if (n == IOBUF_CLOSED)
704 io_callback(io, IO_DISCONNECTED);
705 else {
706 saved_errno = errno;
707 io->error = strerror(errno);
708 errno = saved_errno;
709 io_callback(io, IO_ERROR);
711 goto leave;
713 if (w > io->lowat && w - n <= io->lowat)
714 io_callback(io, IO_LOWAT);
716 read:
718 if (ev & EV_READ) {
719 iobuf_normalize(&io->iobuf);
720 if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
721 if (n == IOBUF_CLOSED)
722 io_callback(io, IO_DISCONNECTED);
723 else {
724 saved_errno = errno;
725 io->error = strerror(errno);
726 errno = saved_errno;
727 io_callback(io, IO_ERROR);
729 goto leave;
731 if (n)
732 io_callback(io, IO_DATAIN);
735 leave:
736 io_frame_leave(io);
739 void
740 io_callback(struct io *io, int evt)
742 io->cb(io, evt, io->arg);
745 int
746 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
748 int sock, errno_save;
750 if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
751 goto fail;
753 io_set_nonblocking(sock);
754 io_set_nolinger(sock);
756 if (bsa && bind(sock, bsa, SA_LEN(bsa)) == -1)
757 goto fail;
759 if (connect(sock, sa, SA_LEN(sa)) == -1)
760 if (errno != EINPROGRESS)
761 goto fail;
763 io->sock = sock;
764 io_reset(io, EV_WRITE, io_dispatch_connect);
766 return (sock);
768 fail:
769 if (sock != -1) {
770 errno_save = errno;
771 close(sock);
772 errno = errno_save;
773 io->error = strerror(errno);
775 return (-1);
778 void
779 io_dispatch_connect(int fd, short ev, void *humppa)
781 struct io *io = humppa;
782 int r, e;
783 socklen_t sl;
785 io_frame_enter("io_dispatch_connect", io, ev);
787 if (ev == EV_TIMEOUT) {
788 close(fd);
789 io->sock = -1;
790 io_callback(io, IO_TIMEOUT);
791 } else {
792 sl = sizeof(e);
793 r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
794 if (r == -1) {
795 warn("io_dispatch_connect: getsockopt");
796 e = errno;
798 if (e) {
799 close(fd);
800 io->sock = -1;
801 io->error = strerror(e);
802 io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
804 else {
805 io->state = IO_STATE_UP;
806 io_callback(io, IO_CONNECTED);
810 io_frame_leave(io);
813 #ifdef IO_TLS
815 static const char*
816 io_tls_error(void)
818 static char buf[128];
819 unsigned long e;
821 e = ERR_peek_last_error();
822 if (e) {
823 ERR_error_string(e, buf);
824 return (buf);
827 return ("No TLS error");
830 int
831 io_start_tls(struct io *io, void *tls)
833 int mode;
835 mode = io->flags & IO_RW;
836 if (mode == 0 || mode == IO_RW)
837 errx(1, "io_start_tls(): full-duplex or unset");
839 if (io->tls)
840 errx(1, "io_start_tls(): TLS already started");
841 io->tls = tls;
843 if (SSL_set_fd(io->tls, io->sock) == 0) {
844 ssl_error("io_start_tls:SSL_set_fd");
845 return (-1);
848 if (mode == IO_WRITE) {
849 io->state = IO_STATE_CONNECT_TLS;
850 SSL_set_connect_state(io->tls);
851 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
852 } else {
853 io->state = IO_STATE_ACCEPT_TLS;
854 SSL_set_accept_state(io->tls);
855 io_reset(io, EV_READ, io_dispatch_accept_tls);
858 return (0);
861 void
862 io_dispatch_accept_tls(int fd, short event, void *humppa)
864 struct io *io = humppa;
865 int e, ret;
867 io_frame_enter("io_dispatch_accept_tls", io, event);
869 if (event == EV_TIMEOUT) {
870 io_callback(io, IO_TIMEOUT);
871 goto leave;
874 if ((ret = SSL_accept(io->tls)) > 0) {
875 io->state = IO_STATE_UP;
876 io_callback(io, IO_TLSREADY);
877 goto leave;
880 switch ((e = SSL_get_error(io->tls, ret))) {
881 case SSL_ERROR_WANT_READ:
882 io_reset(io, EV_READ, io_dispatch_accept_tls);
883 break;
884 case SSL_ERROR_WANT_WRITE:
885 io_reset(io, EV_WRITE, io_dispatch_accept_tls);
886 break;
887 default:
888 io->error = io_tls_error();
889 ssl_error("io_dispatch_accept_tls:SSL_accept");
890 io_callback(io, IO_ERROR);
891 break;
894 leave:
895 io_frame_leave(io);
898 void
899 io_dispatch_connect_tls(int fd, short event, void *humppa)
901 struct io *io = humppa;
902 int e, ret;
904 io_frame_enter("io_dispatch_connect_tls", io, event);
906 if (event == EV_TIMEOUT) {
907 io_callback(io, IO_TIMEOUT);
908 goto leave;
911 if ((ret = SSL_connect(io->tls)) > 0) {
912 io->state = IO_STATE_UP;
913 io_callback(io, IO_TLSREADY);
914 goto leave;
917 switch ((e = SSL_get_error(io->tls, ret))) {
918 case SSL_ERROR_WANT_READ:
919 io_reset(io, EV_READ, io_dispatch_connect_tls);
920 break;
921 case SSL_ERROR_WANT_WRITE:
922 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
923 break;
924 default:
925 io->error = io_tls_error();
926 ssl_error("io_dispatch_connect_ssl:SSL_connect");
927 io_callback(io, IO_TLSERROR);
928 break;
931 leave:
932 io_frame_leave(io);
935 void
936 io_dispatch_read_tls(int fd, short event, void *humppa)
938 struct io *io = humppa;
939 int n, saved_errno;
941 io_frame_enter("io_dispatch_read_tls", io, event);
943 if (event == EV_TIMEOUT) {
944 io_callback(io, IO_TIMEOUT);
945 goto leave;
948 again:
949 iobuf_normalize(&io->iobuf);
950 switch ((n = iobuf_read_tls(&io->iobuf, (SSL*)io->tls))) {
951 case IOBUF_WANT_READ:
952 io_reset(io, EV_READ, io_dispatch_read_tls);
953 break;
954 case IOBUF_WANT_WRITE:
955 io_reset(io, EV_WRITE, io_dispatch_read_tls);
956 break;
957 case IOBUF_CLOSED:
958 io_callback(io, IO_DISCONNECTED);
959 break;
960 case IOBUF_ERROR:
961 saved_errno = errno;
962 io->error = strerror(errno);
963 errno = saved_errno;
964 io_callback(io, IO_ERROR);
965 break;
966 case IOBUF_TLSERROR:
967 io->error = io_tls_error();
968 ssl_error("io_dispatch_read_tls:SSL_read");
969 io_callback(io, IO_ERROR);
970 break;
971 default:
972 io_debug("io_dispatch_read_tls(...) -> r=%d\n", n);
973 io_callback(io, IO_DATAIN);
974 if (current == io && IO_READING(io) && SSL_pending(io->tls))
975 goto again;
978 leave:
979 io_frame_leave(io);
982 void
983 io_dispatch_write_tls(int fd, short event, void *humppa)
985 struct io *io = humppa;
986 int n, saved_errno;
987 size_t w2, w;
989 io_frame_enter("io_dispatch_write_tls", io, event);
991 if (event == EV_TIMEOUT) {
992 io_callback(io, IO_TIMEOUT);
993 goto leave;
996 w = io_queued(io);
997 switch ((n = iobuf_write_tls(&io->iobuf, (SSL*)io->tls))) {
998 case IOBUF_WANT_READ:
999 io_reset(io, EV_READ, io_dispatch_write_tls);
1000 break;
1001 case IOBUF_WANT_WRITE:
1002 io_reset(io, EV_WRITE, io_dispatch_write_tls);
1003 break;
1004 case IOBUF_CLOSED:
1005 io_callback(io, IO_DISCONNECTED);
1006 break;
1007 case IOBUF_ERROR:
1008 saved_errno = errno;
1009 io->error = strerror(errno);
1010 errno = saved_errno;
1011 io_callback(io, IO_ERROR);
1012 break;
1013 case IOBUF_TLSERROR:
1014 io->error = io_tls_error();
1015 ssl_error("io_dispatch_write_tls:SSL_write");
1016 io_callback(io, IO_ERROR);
1017 break;
1018 default:
1019 io_debug("io_dispatch_write_tls(...) -> w=%d\n", n);
1020 w2 = io_queued(io);
1021 if (w > io->lowat && w2 <= io->lowat)
1022 io_callback(io, IO_LOWAT);
1023 break;
1026 leave:
1027 io_frame_leave(io);
1030 void
1031 io_reload_tls(struct io *io)
1033 short ev = 0;
1034 void (*dispatch)(int, short, void*) = NULL;
1036 switch (io->state) {
1037 case IO_STATE_CONNECT_TLS:
1038 ev = EV_WRITE;
1039 dispatch = io_dispatch_connect_tls;
1040 break;
1041 case IO_STATE_ACCEPT_TLS:
1042 ev = EV_READ;
1043 dispatch = io_dispatch_accept_tls;
1044 break;
1045 case IO_STATE_UP:
1046 ev = 0;
1047 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1048 ev = EV_READ;
1049 dispatch = io_dispatch_read_tls;
1051 else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1052 io_queued(io)) {
1053 ev = EV_WRITE;
1054 dispatch = io_dispatch_write_tls;
1056 if (!ev)
1057 return; /* paused */
1058 break;
1059 default:
1060 errx(1, "io_reload_tls(): bad state");
1063 io_reset(io, ev, dispatch);
1066 #endif /* IO_TLS */