net.c (8937B)
1 /*
2 * Copyright (c) 2013-2022 Joris Vink <joris@coders.se>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <sys/param.h>
18 #include <sys/socket.h>
19 #include <sys/types.h>
20
21 #if defined(__linux__)
22 #include <endian.h>
23 #elif defined(__MACH__)
24 #include <libkern/OSByteOrder.h>
25 #define htobe64(x) OSSwapHostToBigInt64(x)
26 #define be64toh(x) OSSwapBigToHostInt64(x)
27 #else
28 #include <sys/endian.h>
29 #endif
30
31 #include "kore.h"
32
33 struct kore_pool nb_pool;
34
35 void
36 net_init(void)
37 {
38 u_int32_t elm;
39
40 /* Add some overhead so we don't roll over for internal items. */
41 elm = worker_max_connections + 10;
42 kore_pool_init(&nb_pool, "nb_pool", sizeof(struct netbuf), elm);
43 }
44
45 void
46 net_cleanup(void)
47 {
48 kore_debug("net_cleanup()");
49 kore_pool_cleanup(&nb_pool);
50 }
51
52 struct netbuf *
53 net_netbuf_get(void)
54 {
55 struct netbuf *nb;
56
57 nb = kore_pool_get(&nb_pool);
58
59 nb->cb = NULL;
60 nb->buf = NULL;
61 nb->owner = NULL;
62 nb->extra = NULL;
63 nb->file_ref = NULL;
64
65 nb->type = 0;
66 nb->s_off = 0;
67 nb->b_len = 0;
68 nb->m_len = 0;
69 nb->flags = 0;
70
71 #if defined(KORE_USE_PLATFORM_SENDFILE)
72 nb->fd_off = -1;
73 nb->fd_len = -1;
74 #endif
75
76 return (nb);
77 }
78
79 void
80 net_send_queue(struct connection *c, const void *data, size_t len)
81 {
82 const u_int8_t *d;
83 struct netbuf *nb;
84 size_t avail;
85
86 kore_debug("net_send_queue(%p, %p, %zu)", c, data, len);
87
88 d = data;
89 nb = TAILQ_LAST(&(c->send_queue), netbuf_head);
90 if (nb != NULL && !(nb->flags & NETBUF_IS_STREAM) &&
91 nb->b_len < nb->m_len) {
92 avail = nb->m_len - nb->b_len;
93 if (len < avail) {
94 memcpy(nb->buf + nb->b_len, d, len);
95 nb->b_len += len;
96 return;
97 } else {
98 memcpy(nb->buf + nb->b_len, d, avail);
99 nb->b_len += avail;
100
101 len -= avail;
102 d += avail;
103 if (len == 0)
104 return;
105 }
106 }
107
108 nb = net_netbuf_get();
109
110 nb->owner = c;
111 nb->b_len = len;
112 nb->type = NETBUF_SEND;
113
114 if (nb->b_len < NETBUF_SEND_PAYLOAD_MAX)
115 nb->m_len = NETBUF_SEND_PAYLOAD_MAX;
116 else
117 nb->m_len = nb->b_len;
118
119 nb->buf = kore_malloc(nb->m_len);
120 memcpy(nb->buf, d, nb->b_len);
121
122 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
123 }
124
125 void
126 net_send_stream(struct connection *c, void *data, size_t len,
127 int (*cb)(struct netbuf *), struct netbuf **out)
128 {
129 struct netbuf *nb;
130
131 kore_debug("net_send_stream(%p, %p, %zu)", c, data, len);
132
133 nb = net_netbuf_get();
134 nb->cb = cb;
135 nb->owner = c;
136 nb->buf = data;
137 nb->b_len = len;
138 nb->m_len = nb->b_len;
139 nb->type = NETBUF_SEND;
140 nb->flags = NETBUF_IS_STREAM;
141
142 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
143
144 if (out != NULL)
145 *out = nb;
146 }
147
148 void
149 net_send_fileref(struct connection *c, struct kore_fileref *ref)
150 {
151 struct netbuf *nb;
152
153 nb = net_netbuf_get();
154 nb->owner = c;
155 nb->file_ref = ref;
156 nb->type = NETBUF_SEND;
157 nb->flags = NETBUF_IS_FILEREF;
158
159 #if defined(KORE_USE_PLATFORM_SENDFILE)
160 if (c->owner->server->tls == 0) {
161 nb->fd_off = 0;
162 nb->fd_len = ref->size;
163 } else {
164 nb->buf = ref->base;
165 nb->b_len = ref->size;
166 nb->m_len = nb->b_len;
167 nb->flags |= NETBUF_IS_STREAM;
168 }
169 #else
170 nb->buf = ref->base;
171 nb->b_len = ref->size;
172 nb->m_len = nb->b_len;
173 nb->flags |= NETBUF_IS_STREAM;
174 #endif
175
176 TAILQ_INSERT_TAIL(&(c->send_queue), nb, list);
177 }
178
179 void
180 net_recv_reset(struct connection *c, size_t len, int (*cb)(struct netbuf *))
181 {
182 kore_debug("net_recv_reset(): %p %zu", c, len);
183
184 c->rnb->cb = cb;
185 c->rnb->s_off = 0;
186 c->rnb->b_len = len;
187
188 if (c->rnb->buf != NULL && c->rnb->b_len <= c->rnb->m_len &&
189 c->rnb->m_len < (NETBUF_SEND_PAYLOAD_MAX / 2))
190 return;
191
192 kore_free(c->rnb->buf);
193 c->rnb->m_len = len;
194 c->rnb->buf = kore_malloc(c->rnb->m_len);
195 }
196
197 void
198 net_recv_queue(struct connection *c, size_t len, int flags,
199 int (*cb)(struct netbuf *))
200 {
201 kore_debug("net_recv_queue(): %p %zu %d", c, len, flags);
202
203 if (c->rnb != NULL)
204 fatal("net_recv_queue(): called incorrectly");
205
206 c->rnb = net_netbuf_get();
207 c->rnb->cb = cb;
208 c->rnb->owner = c;
209 c->rnb->b_len = len;
210 c->rnb->m_len = len;
211 c->rnb->flags = flags;
212 c->rnb->type = NETBUF_RECV;
213 c->rnb->buf = kore_malloc(c->rnb->b_len);
214 }
215
216 void
217 net_recv_expand(struct connection *c, size_t len, int (*cb)(struct netbuf *))
218 {
219 kore_debug("net_recv_expand(): %p %d", c, len);
220
221 c->rnb->cb = cb;
222 c->rnb->b_len += len;
223 c->rnb->m_len = c->rnb->b_len;
224 c->rnb->buf = kore_realloc(c->rnb->buf, c->rnb->b_len);
225 }
226
227 int
228 net_send(struct connection *c)
229 {
230 size_t r, len, smin;
231
232 c->snb = TAILQ_FIRST(&(c->send_queue));
233
234 #if defined(KORE_USE_PLATFORM_SENDFILE)
235 if ((c->snb->flags & NETBUF_IS_FILEREF) &&
236 !(c->snb->flags & NETBUF_IS_STREAM)) {
237 return (kore_platform_sendfile(c, c->snb));
238 }
239 #endif
240
241 if (c->snb->b_len != 0) {
242 smin = c->snb->b_len - c->snb->s_off;
243 len = MIN(NETBUF_SEND_PAYLOAD_MAX, smin);
244
245 if (!c->write(c, len, &r))
246 return (KORE_RESULT_ERROR);
247 if (!(c->evt.flags & KORE_EVENT_WRITE))
248 return (KORE_RESULT_OK);
249
250 c->snb->s_off += r;
251 c->snb->flags &= ~NETBUF_MUST_RESEND;
252 }
253
254 if (c->snb->s_off == c->snb->b_len ||
255 (c->snb->flags & NETBUF_FORCE_REMOVE)) {
256 net_remove_netbuf(c, c->snb);
257 c->snb = NULL;
258 }
259
260 return (KORE_RESULT_OK);
261 }
262
263 int
264 net_send_flush(struct connection *c)
265 {
266 kore_debug("net_send_flush(%p)", c);
267
268 while (!TAILQ_EMPTY(&(c->send_queue)) &&
269 (c->evt.flags & KORE_EVENT_WRITE)) {
270 if (!net_send(c))
271 return (KORE_RESULT_ERROR);
272 }
273
274 if ((c->flags & CONN_CLOSE_EMPTY) && TAILQ_EMPTY(&(c->send_queue))) {
275 kore_connection_disconnect(c);
276 }
277
278 return (KORE_RESULT_OK);
279 }
280
281 int
282 net_recv_flush(struct connection *c)
283 {
284 size_t r;
285
286 kore_debug("net_recv_flush(%p)", c);
287
288 if (c->rnb == NULL)
289 return (KORE_RESULT_OK);
290
291 while (c->evt.flags & KORE_EVENT_READ) {
292 if (c->rnb->buf == NULL)
293 return (KORE_RESULT_OK);
294
295 if ((c->rnb->b_len - c->rnb->s_off) == 0)
296 return (KORE_RESULT_OK);
297
298 if (!c->read(c, &r))
299 return (KORE_RESULT_ERROR);
300 if (!(c->evt.flags & KORE_EVENT_READ))
301 break;
302
303 c->rnb->s_off += r;
304 if (c->rnb->s_off == c->rnb->b_len ||
305 (c->rnb->flags & NETBUF_CALL_CB_ALWAYS)) {
306 r = c->rnb->cb(c->rnb);
307 if (r != KORE_RESULT_OK)
308 return (r);
309 }
310 }
311
312 return (KORE_RESULT_OK);
313 }
314
315 void
316 net_remove_netbuf(struct connection *c, struct netbuf *nb)
317 {
318 kore_debug("net_remove_netbuf(%p, %p)", c, nb);
319
320 if (nb->type == NETBUF_RECV)
321 fatal("net_remove_netbuf(): cannot remove recv netbuf");
322
323 if (nb->flags & NETBUF_MUST_RESEND) {
324 kore_debug("retaining %p (MUST_RESEND)", nb);
325 nb->flags |= NETBUF_FORCE_REMOVE;
326 return;
327 }
328
329 if (!(nb->flags & NETBUF_IS_STREAM)) {
330 kore_free(nb->buf);
331 } else if (nb->cb != NULL) {
332 (void)nb->cb(nb);
333 }
334
335 if (nb->flags & NETBUF_IS_FILEREF)
336 kore_fileref_release(nb->file_ref);
337
338 TAILQ_REMOVE(&(c->send_queue), nb, list);
339
340 kore_pool_put(&nb_pool, nb);
341 }
342
343 int
344 net_write(struct connection *c, size_t len, size_t *written)
345 {
346 ssize_t r;
347
348 r = send(c->fd, (c->snb->buf + c->snb->s_off), len, 0);
349 if (r == -1) {
350 switch (errno) {
351 case EINTR:
352 *written = 0;
353 return (KORE_RESULT_OK);
354 case EAGAIN:
355 c->evt.flags &= ~KORE_EVENT_WRITE;
356 return (KORE_RESULT_OK);
357 default:
358 kore_debug("write: %s", errno_s);
359 return (KORE_RESULT_ERROR);
360 }
361 }
362
363 *written = (size_t)r;
364
365 return (KORE_RESULT_OK);
366 }
367
368 int
369 net_read(struct connection *c, size_t *bytes)
370 {
371 ssize_t r;
372
373 r = recv(c->fd, (c->rnb->buf + c->rnb->s_off),
374 (c->rnb->b_len - c->rnb->s_off), 0);
375 if (r == -1) {
376 switch (errno) {
377 case EINTR:
378 *bytes = 0;
379 return (KORE_RESULT_OK);
380 case EAGAIN:
381 c->evt.flags &= ~KORE_EVENT_READ;
382 return (KORE_RESULT_OK);
383 default:
384 kore_debug("read(): %s", errno_s);
385 return (KORE_RESULT_ERROR);
386 }
387 }
388
389 if (r == 0) {
390 kore_connection_disconnect(c);
391 c->evt.flags &= ~KORE_EVENT_READ;
392 return (KORE_RESULT_OK);
393 }
394
395 *bytes = (size_t)r;
396
397 return (KORE_RESULT_OK);
398 }
399
400 u_int16_t
401 net_read16(u_int8_t *b)
402 {
403 u_int16_t r;
404
405 r = *(u_int16_t *)b;
406 return (ntohs(r));
407 }
408
409 u_int32_t
410 net_read32(u_int8_t *b)
411 {
412 u_int32_t r;
413
414 r = *(u_int32_t *)b;
415 return (ntohl(r));
416 }
417
418 void
419 net_write16(u_int8_t *p, u_int16_t n)
420 {
421 u_int16_t r;
422
423 r = htons(n);
424 memcpy(p, &r, sizeof(r));
425 }
426
427 void
428 net_write32(u_int8_t *p, u_int32_t n)
429 {
430 u_int32_t r;
431
432 r = htonl(n);
433 memcpy(p, &r, sizeof(r));
434 }
435
436 u_int64_t
437 net_read64(u_int8_t *b)
438 {
439 u_int64_t r;
440
441 r = *(u_int64_t *)b;
442 return (be64toh(r));
443 }
444
445 void
446 net_write64(u_int8_t *p, u_int64_t n)
447 {
448 u_int64_t r;
449
450 r = htobe64(n);
451 memcpy(p, &r, sizeof(r));
452 }