Blob


1 /* $OpenBSD: ioev.c,v 1.42 2019/06/12 17:42:53 eric Exp $ */
2 /*
3 * Copyright (c) 2012 Eric Faurot <eric@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #define _GNU_SOURCE 1
18 #define _BSD_SOURCE 1
20 #include <sys/types.h>
21 #include <sys/queue.h>
22 #include <sys/socket.h>
24 #include <err.h>
25 #include <errno.h>
26 #include <event.h>
27 #include <fcntl.h>
28 #include <inttypes.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <stdio.h>
32 #include <unistd.h>
34 #include "openbsd-compat.h"
35 #include "ioev.h"
36 #include "iobuf.h"
38 #ifdef IO_TLS
39 #include <openssl/err.h>
40 #include <openssl/ssl.h>
41 #endif
43 enum {
44 IO_STATE_NONE,
45 IO_STATE_CONNECT,
46 IO_STATE_CONNECT_TLS,
47 IO_STATE_ACCEPT_TLS,
48 IO_STATE_UP,
50 IO_STATE_MAX,
51 };
53 #define IO_PAUSE_IN IO_IN
54 #define IO_PAUSE_OUT IO_OUT
55 #define IO_READ 0x04
56 #define IO_WRITE 0x08
57 #define IO_RW (IO_READ | IO_WRITE)
58 #define IO_RESET 0x10 /* internal */
59 #define IO_HELD 0x20 /* internal */
61 struct io {
62 int sock;
63 void *arg;
64 void (*cb)(struct io*, int, void *);
65 struct iobuf iobuf;
66 size_t lowat;
67 int timeout;
68 int flags;
69 int state;
70 struct event ev;
71 void *tls;
72 const char *error; /* only valid immediately on callback */
73 };
75 const char* io_strflags(int);
76 const char* io_evstr(short);
78 void _io_init(void);
79 void io_hold(struct io *);
80 void io_release(struct io *);
81 void io_callback(struct io*, int);
82 void io_dispatch(int, short, void *);
83 void io_dispatch_connect(int, short, void *);
84 size_t io_pending(struct io *);
85 size_t io_queued(struct io*);
86 void io_reset(struct io *, short, void (*)(int, short, void*));
87 void io_frame_enter(const char *, struct io *, int);
88 void io_frame_leave(struct io *);
90 #ifdef IO_TLS
91 void ssl_error(const char *); /* XXX external */
93 static const char* io_tls_error(void);
94 void io_dispatch_accept_tls(int, short, void *);
95 void io_dispatch_connect_tls(int, short, void *);
96 void io_dispatch_read_tls(int, short, void *);
97 void io_dispatch_write_tls(int, short, void *);
98 void io_reload_tls(struct io *io);
99 #endif
101 static struct io *current = NULL;
102 static uint64_t frame = 0;
103 static int _io_debug = 0;
105 #define io_debug(args...) do { if (_io_debug) printf(args); } while(0)
108 const char*
109 io_strio(struct io *io)
111 static char buf[128];
112 char ssl[128];
114 ssl[0] = '\0';
115 #ifdef IO_TLS
116 if (io->tls) {
117 (void)snprintf(ssl, sizeof ssl, " tls=%s:%s:%d",
118 SSL_get_version(io->tls),
119 SSL_get_cipher_name(io->tls),
120 SSL_get_cipher_bits(io->tls, NULL));
122 #endif
124 (void)snprintf(buf, sizeof buf,
125 "<io:%p fd=%d to=%d fl=%s%s ib=%zu ob=%zu>",
126 io, io->sock, io->timeout, io_strflags(io->flags), ssl,
127 io_pending(io), io_queued(io));
129 return (buf);
132 #define CASE(x) case x : return #x
134 const char*
135 io_strevent(int evt)
137 static char buf[32];
139 switch (evt) {
140 CASE(IO_CONNECTED);
141 CASE(IO_TLSREADY);
142 CASE(IO_DATAIN);
143 CASE(IO_LOWAT);
144 CASE(IO_DISCONNECTED);
145 CASE(IO_TIMEOUT);
146 CASE(IO_ERROR);
147 default:
148 (void)snprintf(buf, sizeof(buf), "IO_? %d", evt);
149 return buf;
153 void
154 io_set_nonblocking(int fd)
156 int flags;
158 if ((flags = fcntl(fd, F_GETFL)) == -1)
159 err(1, "io_set_blocking:fcntl(F_GETFL)");
161 flags |= O_NONBLOCK;
163 if (fcntl(fd, F_SETFL, flags) == -1)
164 err(1, "io_set_blocking:fcntl(F_SETFL)");
167 void
168 io_set_nolinger(int fd)
170 struct linger l;
172 memset(&l, 0, sizeof(l));
173 if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1)
174 err(1, "io_set_linger:setsockopt()");
177 /*
178 * Event framing must not rely on an io pointer to refer to the "same" io
179 * throughout the frame, because this is not always the case:
181 * 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV
182 * 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD!
184 * In both case, the problem is that the io is freed in the callback, so
185 * the pointer becomes invalid. If that happens, the user is required to
186 * call io_clear, so we can adapt the frame state there.
187 */
188 void
189 io_frame_enter(const char *where, struct io *io, int ev)
191 io_debug("\n=== %" PRIu64 " ===\n"
192 "io_frame_enter(%s, %s, %s)\n",
193 frame, where, io_evstr(ev), io_strio(io));
195 if (current)
196 errx(1, "io_frame_enter: interleaved frames");
198 current = io;
200 io_hold(io);
203 void
204 io_frame_leave(struct io *io)
206 io_debug("io_frame_leave(%" PRIu64 ")\n", frame);
208 if (current && current != io)
209 errx(1, "io_frame_leave: io mismatch");
211 /* io has been cleared */
212 if (current == NULL)
213 goto done;
215 /* TODO: There is a possible optimization there:
216 * In a typical half-duplex request/response scenario,
217 * the io is waiting to read a request, and when done, it queues
218 * the response in the output buffer and goes to write mode.
219 * There, the write event is set and will be triggered in the next
220 * event frame. In most case, the write call could be done
221 * immediately as part of the last read frame, thus avoiding to go
222 * through the event loop machinery. So, as an optimisation, we
223 * could detect that case here and force an event dispatching.
224 */
226 /* Reload the io if it has not been reset already. */
227 io_release(io);
228 current = NULL;
229 done:
230 io_debug("=== /%" PRIu64 "\n", frame);
232 frame += 1;
235 void
236 _io_init()
238 static int init = 0;
240 if (init)
241 return;
243 init = 1;
244 _io_debug = getenv("IO_DEBUG") != NULL;
247 struct io *
248 io_new(void)
250 struct io *io;
252 _io_init();
254 if ((io = calloc(1, sizeof(*io))) == NULL)
255 return NULL;
257 io->sock = -1;
258 io->timeout = -1;
260 if (iobuf_init(&io->iobuf, 0, 0) == -1) {
261 free(io);
262 return NULL;
265 return io;
268 void
269 io_free(struct io *io)
271 io_debug("io_clear(%p)\n", io);
273 /* the current io is virtually dead */
274 if (io == current)
275 current = NULL;
277 #ifdef IO_TLS
278 SSL_free(io->tls);
279 io->tls = NULL;
280 #endif
282 if (event_initialized(&io->ev))
283 event_del(&io->ev);
284 if (io->sock != -1) {
285 close(io->sock);
286 io->sock = -1;
289 iobuf_clear(&io->iobuf);
290 free(io);
293 void
294 io_hold(struct io *io)
296 io_debug("io_enter(%p)\n", io);
298 if (io->flags & IO_HELD)
299 errx(1, "io_hold: io is already held");
301 io->flags &= ~IO_RESET;
302 io->flags |= IO_HELD;
305 void
306 io_release(struct io *io)
308 if (!(io->flags & IO_HELD))
309 errx(1, "io_release: io is not held");
311 io->flags &= ~IO_HELD;
312 if (!(io->flags & IO_RESET))
313 io_reload(io);
316 void
317 io_set_fd(struct io *io, int fd)
319 io->sock = fd;
320 if (fd != -1)
321 io_reload(io);
324 void
325 io_set_callback(struct io *io, void(*cb)(struct io *, int, void *), void *arg)
327 io->cb = cb;
328 io->arg = arg;
331 void
332 io_set_timeout(struct io *io, int msec)
334 io_debug("io_set_timeout(%p, %d)\n", io, msec);
336 io->timeout = msec;
339 void
340 io_set_lowat(struct io *io, size_t lowat)
342 io_debug("io_set_lowat(%p, %zu)\n", io, lowat);
344 io->lowat = lowat;
347 void
348 io_pause(struct io *io, int dir)
350 io_debug("io_pause(%p, %x)\n", io, dir);
352 io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT);
353 io_reload(io);
356 void
357 io_resume(struct io *io, int dir)
359 io_debug("io_resume(%p, %x)\n", io, dir);
361 io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT));
362 io_reload(io);
365 void
366 io_set_read(struct io *io)
368 int mode;
370 io_debug("io_set_read(%p)\n", io);
372 mode = io->flags & IO_RW;
373 if (!(mode == 0 || mode == IO_WRITE))
374 errx(1, "io_set_read(): full-duplex or reading");
376 io->flags &= ~IO_RW;
377 io->flags |= IO_READ;
378 io_reload(io);
381 void
382 io_set_write(struct io *io)
384 int mode;
386 io_debug("io_set_write(%p)\n", io);
388 mode = io->flags & IO_RW;
389 if (!(mode == 0 || mode == IO_READ))
390 errx(1, "io_set_write(): full-duplex or writing");
392 io->flags &= ~IO_RW;
393 io->flags |= IO_WRITE;
394 io_reload(io);
397 const char *
398 io_error(struct io *io)
400 return io->error;
403 void *
404 io_tls(struct io *io)
406 return io->tls;
409 int
410 io_fileno(struct io *io)
412 return io->sock;
415 int
416 io_paused(struct io *io, int what)
418 return (io->flags & (IO_PAUSE_IN | IO_PAUSE_OUT)) == what;
421 /*
422 * Buffered output functions
423 */
425 int
426 io_write(struct io *io, const void *buf, size_t len)
428 int r;
430 r = iobuf_queue(&io->iobuf, buf, len);
432 io_reload(io);
434 return r;
437 int
438 io_writev(struct io *io, const struct iovec *iov, int iovcount)
440 int r;
442 r = iobuf_queuev(&io->iobuf, iov, iovcount);
444 io_reload(io);
446 return r;
449 int
450 io_print(struct io *io, const char *s)
452 return io_write(io, s, strlen(s));
455 int
456 io_printf(struct io *io, const char *fmt, ...)
458 va_list ap;
459 int r;
461 va_start(ap, fmt);
462 r = io_vprintf(io, fmt, ap);
463 va_end(ap);
465 return r;
468 int
469 io_vprintf(struct io *io, const char *fmt, va_list ap)
472 char *buf;
473 int len;
475 len = vasprintf(&buf, fmt, ap);
476 if (len == -1)
477 return -1;
478 len = io_write(io, buf, len);
479 free(buf);
481 return len;
484 size_t
485 io_queued(struct io *io)
487 return iobuf_queued(&io->iobuf);
490 /*
491 * Buffered input functions
492 */
494 void *
495 io_data(struct io *io)
497 return iobuf_data(&io->iobuf);
500 size_t
501 io_datalen(struct io *io)
503 return iobuf_len(&io->iobuf);
506 char *
507 io_getline(struct io *io, size_t *sz)
509 return iobuf_getline(&io->iobuf, sz);
512 void
513 io_drop(struct io *io, size_t sz)
515 return iobuf_drop(&io->iobuf, sz);
519 #define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE)
520 #define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ)
522 /*
523 * Setup the necessary events as required by the current io state,
524 * honouring duplex mode and i/o pauses.
525 */
526 void
527 io_reload(struct io *io)
529 short events;
531 /* io will be reloaded at release time */
532 if (io->flags & IO_HELD)
533 return;
535 iobuf_normalize(&io->iobuf);
537 #ifdef IO_TLS
538 if (io->tls) {
539 io_reload_tls(io);
540 return;
542 #endif
544 io_debug("io_reload(%p)\n", io);
546 events = 0;
547 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN))
548 events = EV_READ;
549 if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io))
550 events |= EV_WRITE;
552 io_reset(io, events, io_dispatch);
555 /* Set the requested event. */
556 void
557 io_reset(struct io *io, short events, void (*dispatch)(int, short, void*))
559 struct timeval tv, *ptv;
561 io_debug("io_reset(%p, %s, %p) -> %s\n",
562 io, io_evstr(events), dispatch, io_strio(io));
564 /*
565 * Indicate that the event has already been reset so that reload
566 * is not called on frame_leave.
567 */
568 io->flags |= IO_RESET;
570 if (event_initialized(&io->ev))
571 event_del(&io->ev);
573 /*
574 * The io is paused by the user, so we don't want the timeout to be
575 * effective.
576 */
577 if (events == 0)
578 return;
580 event_set(&io->ev, io->sock, events, dispatch, io);
581 if (io->timeout >= 0) {
582 tv.tv_sec = io->timeout / 1000;
583 tv.tv_usec = (io->timeout % 1000) * 1000;
584 ptv = &tv;
585 } else
586 ptv = NULL;
588 event_add(&io->ev, ptv);
591 size_t
592 io_pending(struct io *io)
594 return iobuf_len(&io->iobuf);
597 const char*
598 io_strflags(int flags)
600 static char buf[64];
602 buf[0] = '\0';
604 switch (flags & IO_RW) {
605 case 0:
606 (void)strlcat(buf, "rw", sizeof buf);
607 break;
608 case IO_READ:
609 (void)strlcat(buf, "R", sizeof buf);
610 break;
611 case IO_WRITE:
612 (void)strlcat(buf, "W", sizeof buf);
613 break;
614 case IO_RW:
615 (void)strlcat(buf, "RW", sizeof buf);
616 break;
619 if (flags & IO_PAUSE_IN)
620 (void)strlcat(buf, ",F_PI", sizeof buf);
621 if (flags & IO_PAUSE_OUT)
622 (void)strlcat(buf, ",F_PO", sizeof buf);
624 return buf;
627 const char*
628 io_evstr(short ev)
630 static char buf[64];
631 char buf2[16];
632 int n;
634 n = 0;
635 buf[0] = '\0';
637 if (ev == 0) {
638 (void)strlcat(buf, "<NONE>", sizeof(buf));
639 return buf;
642 if (ev & EV_TIMEOUT) {
643 (void)strlcat(buf, "EV_TIMEOUT", sizeof(buf));
644 ev &= ~EV_TIMEOUT;
645 n++;
648 if (ev & EV_READ) {
649 if (n)
650 (void)strlcat(buf, "|", sizeof(buf));
651 (void)strlcat(buf, "EV_READ", sizeof(buf));
652 ev &= ~EV_READ;
653 n++;
656 if (ev & EV_WRITE) {
657 if (n)
658 (void)strlcat(buf, "|", sizeof(buf));
659 (void)strlcat(buf, "EV_WRITE", sizeof(buf));
660 ev &= ~EV_WRITE;
661 n++;
664 if (ev & EV_SIGNAL) {
665 if (n)
666 (void)strlcat(buf, "|", sizeof(buf));
667 (void)strlcat(buf, "EV_SIGNAL", sizeof(buf));
668 ev &= ~EV_SIGNAL;
669 n++;
672 if (ev) {
673 if (n)
674 (void)strlcat(buf, "|", sizeof(buf));
675 (void)strlcat(buf, "EV_?=0x", sizeof(buf));
676 (void)snprintf(buf2, sizeof(buf2), "%hx", ev);
677 (void)strlcat(buf, buf2, sizeof(buf));
680 return buf;
683 void
684 io_dispatch(__unused int fd, short ev, void *humppa)
686 struct io *io = humppa;
687 size_t w;
688 ssize_t n;
689 int saved_errno;
691 io_frame_enter("io_dispatch", io, ev);
693 if (ev == EV_TIMEOUT) {
694 io_callback(io, IO_TIMEOUT);
695 goto leave;
698 if (ev & EV_WRITE && (w = io_queued(io))) {
699 if ((n = iobuf_write(&io->iobuf, io->sock)) < 0) {
700 if (n == IOBUF_WANT_WRITE) /* kqueue bug? */
701 goto read;
702 if (n == IOBUF_CLOSED)
703 io_callback(io, IO_DISCONNECTED);
704 else {
705 saved_errno = errno;
706 io->error = strerror(errno);
707 errno = saved_errno;
708 io_callback(io, IO_ERROR);
710 goto leave;
712 if (w > io->lowat && w - n <= io->lowat)
713 io_callback(io, IO_LOWAT);
715 read:
717 if (ev & EV_READ) {
718 iobuf_normalize(&io->iobuf);
719 if ((n = iobuf_read(&io->iobuf, io->sock)) < 0) {
720 if (n == IOBUF_CLOSED)
721 io_callback(io, IO_DISCONNECTED);
722 else {
723 saved_errno = errno;
724 io->error = strerror(errno);
725 errno = saved_errno;
726 io_callback(io, IO_ERROR);
728 goto leave;
730 if (n)
731 io_callback(io, IO_DATAIN);
734 leave:
735 io_frame_leave(io);
738 void
739 io_callback(struct io *io, int evt)
741 io->cb(io, evt, io->arg);
744 int
745 io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa)
747 int sock, errno_save;
749 if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1)
750 goto fail;
752 io_set_nonblocking(sock);
753 io_set_nolinger(sock);
755 if (bsa && bind(sock, bsa, SA_LEN(bsa)) == -1)
756 goto fail;
758 if (connect(sock, sa, SA_LEN(sa)) == -1)
759 if (errno != EINPROGRESS)
760 goto fail;
762 io->sock = sock;
763 io_reset(io, EV_WRITE, io_dispatch_connect);
765 return (sock);
767 fail:
768 if (sock != -1) {
769 errno_save = errno;
770 close(sock);
771 errno = errno_save;
772 io->error = strerror(errno);
774 return (-1);
777 void
778 io_dispatch_connect(int fd, short ev, void *humppa)
780 struct io *io = humppa;
781 int r, e;
782 socklen_t sl;
784 io_frame_enter("io_dispatch_connect", io, ev);
786 if (ev == EV_TIMEOUT) {
787 close(fd);
788 io->sock = -1;
789 io_callback(io, IO_TIMEOUT);
790 } else {
791 sl = sizeof(e);
792 r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl);
793 if (r == -1) {
794 warn("io_dispatch_connect: getsockopt");
795 e = errno;
797 if (e) {
798 close(fd);
799 io->sock = -1;
800 io->error = strerror(e);
801 io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR);
803 else {
804 io->state = IO_STATE_UP;
805 io_callback(io, IO_CONNECTED);
809 io_frame_leave(io);
812 #ifdef IO_TLS
814 static const char*
815 io_tls_error(void)
817 static char buf[128];
818 unsigned long e;
820 e = ERR_peek_last_error();
821 if (e) {
822 ERR_error_string(e, buf);
823 return (buf);
826 return ("No TLS error");
829 int
830 io_start_tls(struct io *io, void *tls)
832 int mode;
834 mode = io->flags & IO_RW;
835 if (mode == 0 || mode == IO_RW)
836 errx(1, "io_start_tls(): full-duplex or unset");
838 if (io->tls)
839 errx(1, "io_start_tls(): TLS already started");
840 io->tls = tls;
842 if (SSL_set_fd(io->tls, io->sock) == 0) {
843 ssl_error("io_start_tls:SSL_set_fd");
844 return (-1);
847 if (mode == IO_WRITE) {
848 io->state = IO_STATE_CONNECT_TLS;
849 SSL_set_connect_state(io->tls);
850 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
851 } else {
852 io->state = IO_STATE_ACCEPT_TLS;
853 SSL_set_accept_state(io->tls);
854 io_reset(io, EV_READ, io_dispatch_accept_tls);
857 return (0);
860 void
861 io_dispatch_accept_tls(int fd, short event, void *humppa)
863 struct io *io = humppa;
864 int e, ret;
866 io_frame_enter("io_dispatch_accept_tls", io, event);
868 if (event == EV_TIMEOUT) {
869 io_callback(io, IO_TIMEOUT);
870 goto leave;
873 if ((ret = SSL_accept(io->tls)) > 0) {
874 io->state = IO_STATE_UP;
875 io_callback(io, IO_TLSREADY);
876 goto leave;
879 switch ((e = SSL_get_error(io->tls, ret))) {
880 case SSL_ERROR_WANT_READ:
881 io_reset(io, EV_READ, io_dispatch_accept_tls);
882 break;
883 case SSL_ERROR_WANT_WRITE:
884 io_reset(io, EV_WRITE, io_dispatch_accept_tls);
885 break;
886 default:
887 io->error = io_tls_error();
888 ssl_error("io_dispatch_accept_tls:SSL_accept");
889 io_callback(io, IO_ERROR);
890 break;
893 leave:
894 io_frame_leave(io);
897 void
898 io_dispatch_connect_tls(int fd, short event, void *humppa)
900 struct io *io = humppa;
901 int e, ret;
903 io_frame_enter("io_dispatch_connect_tls", io, event);
905 if (event == EV_TIMEOUT) {
906 io_callback(io, IO_TIMEOUT);
907 goto leave;
910 if ((ret = SSL_connect(io->tls)) > 0) {
911 io->state = IO_STATE_UP;
912 io_callback(io, IO_TLSREADY);
913 goto leave;
916 switch ((e = SSL_get_error(io->tls, ret))) {
917 case SSL_ERROR_WANT_READ:
918 io_reset(io, EV_READ, io_dispatch_connect_tls);
919 break;
920 case SSL_ERROR_WANT_WRITE:
921 io_reset(io, EV_WRITE, io_dispatch_connect_tls);
922 break;
923 default:
924 io->error = io_tls_error();
925 ssl_error("io_dispatch_connect_ssl:SSL_connect");
926 io_callback(io, IO_TLSERROR);
927 break;
930 leave:
931 io_frame_leave(io);
934 void
935 io_dispatch_read_tls(int fd, short event, void *humppa)
937 struct io *io = humppa;
938 int n, saved_errno;
940 io_frame_enter("io_dispatch_read_tls", io, event);
942 if (event == EV_TIMEOUT) {
943 io_callback(io, IO_TIMEOUT);
944 goto leave;
947 again:
948 iobuf_normalize(&io->iobuf);
949 switch ((n = iobuf_read_tls(&io->iobuf, (SSL*)io->tls))) {
950 case IOBUF_WANT_READ:
951 io_reset(io, EV_READ, io_dispatch_read_tls);
952 break;
953 case IOBUF_WANT_WRITE:
954 io_reset(io, EV_WRITE, io_dispatch_read_tls);
955 break;
956 case IOBUF_CLOSED:
957 io_callback(io, IO_DISCONNECTED);
958 break;
959 case IOBUF_ERROR:
960 saved_errno = errno;
961 io->error = strerror(errno);
962 errno = saved_errno;
963 io_callback(io, IO_ERROR);
964 break;
965 case IOBUF_TLSERROR:
966 io->error = io_tls_error();
967 ssl_error("io_dispatch_read_tls:SSL_read");
968 io_callback(io, IO_ERROR);
969 break;
970 default:
971 io_debug("io_dispatch_read_tls(...) -> r=%d\n", n);
972 io_callback(io, IO_DATAIN);
973 if (current == io && IO_READING(io) && SSL_pending(io->tls))
974 goto again;
977 leave:
978 io_frame_leave(io);
981 void
982 io_dispatch_write_tls(int fd, short event, void *humppa)
984 struct io *io = humppa;
985 int n, saved_errno;
986 size_t w2, w;
988 io_frame_enter("io_dispatch_write_tls", io, event);
990 if (event == EV_TIMEOUT) {
991 io_callback(io, IO_TIMEOUT);
992 goto leave;
995 w = io_queued(io);
996 switch ((n = iobuf_write_tls(&io->iobuf, (SSL*)io->tls))) {
997 case IOBUF_WANT_READ:
998 io_reset(io, EV_READ, io_dispatch_write_tls);
999 break;
1000 case IOBUF_WANT_WRITE:
1001 io_reset(io, EV_WRITE, io_dispatch_write_tls);
1002 break;
1003 case IOBUF_CLOSED:
1004 io_callback(io, IO_DISCONNECTED);
1005 break;
1006 case IOBUF_ERROR:
1007 saved_errno = errno;
1008 io->error = strerror(errno);
1009 errno = saved_errno;
1010 io_callback(io, IO_ERROR);
1011 break;
1012 case IOBUF_TLSERROR:
1013 io->error = io_tls_error();
1014 ssl_error("io_dispatch_write_tls:SSL_write");
1015 io_callback(io, IO_ERROR);
1016 break;
1017 default:
1018 io_debug("io_dispatch_write_tls(...) -> w=%d\n", n);
1019 w2 = io_queued(io);
1020 if (w > io->lowat && w2 <= io->lowat)
1021 io_callback(io, IO_LOWAT);
1022 break;
1025 leave:
1026 io_frame_leave(io);
1029 void
1030 io_reload_tls(struct io *io)
1032 short ev = 0;
1033 void (*dispatch)(int, short, void*) = NULL;
1035 switch (io->state) {
1036 case IO_STATE_CONNECT_TLS:
1037 ev = EV_WRITE;
1038 dispatch = io_dispatch_connect_tls;
1039 break;
1040 case IO_STATE_ACCEPT_TLS:
1041 ev = EV_READ;
1042 dispatch = io_dispatch_accept_tls;
1043 break;
1044 case IO_STATE_UP:
1045 ev = 0;
1046 if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) {
1047 ev = EV_READ;
1048 dispatch = io_dispatch_read_tls;
1050 else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) &&
1051 io_queued(io)) {
1052 ev = EV_WRITE;
1053 dispatch = io_dispatch_write_tls;
1055 if (!ev)
1056 return; /* paused */
1057 break;
1058 default:
1059 errx(1, "io_reload_tls(): bad state");
1062 io_reset(io, ev, dispatch);
1065 #endif /* IO_TLS */