FreeBSD kernel kern code
sys_socket.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)sys_socket.c 8.1 (Berkeley) 6/10/93
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/aio.h>
40#include <sys/domain.h>
41#include <sys/file.h>
42#include <sys/filedesc.h>
43#include <sys/kernel.h>
44#include <sys/kthread.h>
45#include <sys/malloc.h>
46#include <sys/proc.h>
47#include <sys/protosw.h>
48#include <sys/sigio.h>
49#include <sys/signal.h>
50#include <sys/signalvar.h>
51#include <sys/socket.h>
52#include <sys/socketvar.h>
53#include <sys/filio.h> /* XXX */
54#include <sys/sockio.h>
55#include <sys/stat.h>
56#include <sys/sysctl.h>
57#include <sys/sysproto.h>
58#include <sys/taskqueue.h>
59#include <sys/uio.h>
60#include <sys/ucred.h>
61#include <sys/un.h>
62#include <sys/unpcb.h>
63#include <sys/user.h>
64
65#include <net/if.h>
66#include <net/if_var.h>
67#include <net/route.h>
68#include <net/vnet.h>
69
70#include <netinet/in.h>
71#include <netinet/in_pcb.h>
72
73#include <security/mac/mac_framework.h>
74
75#include <vm/vm.h>
76#include <vm/pmap.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_map.h>
79
80static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
81 "socket AIO stats");
82
83static int empty_results;
84SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results,
85 0, "socket operation returned EAGAIN");
86
87static int empty_retries;
88SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries,
89 0, "socket operation retries");
90
91static fo_rdwr_t soo_read;
92static fo_rdwr_t soo_write;
93static fo_ioctl_t soo_ioctl;
94static fo_poll_t soo_poll;
95extern fo_kqfilter_t soo_kqfilter;
96static fo_stat_t soo_stat;
97static fo_close_t soo_close;
98static fo_fill_kinfo_t soo_fill_kinfo;
99static fo_aio_queue_t soo_aio_queue;
100
101static void soo_aio_cancel(struct kaiocb *job);
102
103struct fileops socketops = {
104 .fo_read = soo_read,
105 .fo_write = soo_write,
106 .fo_truncate = invfo_truncate,
107 .fo_ioctl = soo_ioctl,
108 .fo_poll = soo_poll,
109 .fo_kqfilter = soo_kqfilter,
110 .fo_stat = soo_stat,
111 .fo_close = soo_close,
112 .fo_chmod = invfo_chmod,
113 .fo_chown = invfo_chown,
114 .fo_sendfile = invfo_sendfile,
115 .fo_fill_kinfo = soo_fill_kinfo,
116 .fo_aio_queue = soo_aio_queue,
117 .fo_flags = DFLAG_PASSABLE
118};
119
120static int
121soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
122 int flags, struct thread *td)
123{
124 struct socket *so = fp->f_data;
125 int error;
126
127#ifdef MAC
128 error = mac_socket_check_receive(active_cred, so);
129 if (error)
130 return (error);
131#endif
132 error = soreceive(so, 0, uio, 0, 0, 0);
133 return (error);
134}
135
136static int
137soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
138 int flags, struct thread *td)
139{
140 struct socket *so = fp->f_data;
141 int error;
142
143#ifdef MAC
144 error = mac_socket_check_send(active_cred, so);
145 if (error)
146 return (error);
147#endif
148 error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
149 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
150 PROC_LOCK(uio->uio_td->td_proc);
151 tdsignal(uio->uio_td, SIGPIPE);
152 PROC_UNLOCK(uio->uio_td->td_proc);
153 }
154 return (error);
155}
156
157static int
158soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
159 struct thread *td)
160{
161 struct socket *so = fp->f_data;
162 int error = 0;
163
164 switch (cmd) {
165 case FIONBIO:
166 SOCK_LOCK(so);
167 if (*(int *)data)
168 so->so_state |= SS_NBIO;
169 else
170 so->so_state &= ~SS_NBIO;
171 SOCK_UNLOCK(so);
172 break;
173
174 case FIOASYNC:
175 if (*(int *)data) {
176 SOCK_LOCK(so);
177 so->so_state |= SS_ASYNC;
178 if (SOLISTENING(so)) {
179 so->sol_sbrcv_flags |= SB_ASYNC;
180 so->sol_sbsnd_flags |= SB_ASYNC;
181 } else {
182 SOCKBUF_LOCK(&so->so_rcv);
183 so->so_rcv.sb_flags |= SB_ASYNC;
184 SOCKBUF_UNLOCK(&so->so_rcv);
185 SOCKBUF_LOCK(&so->so_snd);
186 so->so_snd.sb_flags |= SB_ASYNC;
187 SOCKBUF_UNLOCK(&so->so_snd);
188 }
189 SOCK_UNLOCK(so);
190 } else {
191 SOCK_LOCK(so);
192 so->so_state &= ~SS_ASYNC;
193 if (SOLISTENING(so)) {
194 so->sol_sbrcv_flags &= ~SB_ASYNC;
195 so->sol_sbsnd_flags &= ~SB_ASYNC;
196 } else {
197 SOCKBUF_LOCK(&so->so_rcv);
198 so->so_rcv.sb_flags &= ~SB_ASYNC;
199 SOCKBUF_UNLOCK(&so->so_rcv);
200 SOCKBUF_LOCK(&so->so_snd);
201 so->so_snd.sb_flags &= ~SB_ASYNC;
202 SOCKBUF_UNLOCK(&so->so_snd);
203 }
204 SOCK_UNLOCK(so);
205 }
206 break;
207
208 case FIONREAD:
209 SOCK_RECVBUF_LOCK(so);
210 if (SOLISTENING(so)) {
211 error = EINVAL;
212 } else {
213 *(int *)data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
214 }
215 SOCK_RECVBUF_UNLOCK(so);
216 break;
217
218 case FIONWRITE:
219 /* Unlocked read. */
220 if (SOLISTENING(so)) {
221 error = EINVAL;
222 } else {
223 *(int *)data = sbavail(&so->so_snd);
224 }
225 break;
226
227 case FIONSPACE:
228 /* Unlocked read. */
229 if (SOLISTENING(so)) {
230 error = EINVAL;
231 } else {
232 if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
233 (so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt)) {
234 *(int *)data = 0;
235 } else {
236 *(int *)data = sbspace(&so->so_snd);
237 }
238 }
239 break;
240
241 case FIOSETOWN:
242 error = fsetown(*(int *)data, &so->so_sigio);
243 break;
244
245 case FIOGETOWN:
246 *(int *)data = fgetown(&so->so_sigio);
247 break;
248
249 case SIOCSPGRP:
250 error = fsetown(-(*(int *)data), &so->so_sigio);
251 break;
252
253 case SIOCGPGRP:
254 *(int *)data = -fgetown(&so->so_sigio);
255 break;
256
257 case SIOCATMARK:
258 /* Unlocked read. */
259 if (SOLISTENING(so)) {
260 error = EINVAL;
261 } else {
262 *(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
263 }
264 break;
265 default:
266 /*
267 * Interface/routing/protocol specific ioctls: interface and
268 * routing ioctls should have a different entry since a
269 * socket is unnecessary.
270 */
271 if (IOCGROUP(cmd) == 'i')
272 error = ifioctl(so, cmd, data, td);
273 else if (IOCGROUP(cmd) == 'r') {
274 CURVNET_SET(so->so_vnet);
275 error = rtioctl_fib(cmd, data, so->so_fibnum);
276 CURVNET_RESTORE();
277 } else {
278 CURVNET_SET(so->so_vnet);
279 error = ((*so->so_proto->pr_usrreqs->pru_control)
280 (so, cmd, data, 0, td));
281 CURVNET_RESTORE();
282 }
283 break;
284 }
285 return (error);
286}
287
288static int
289soo_poll(struct file *fp, int events, struct ucred *active_cred,
290 struct thread *td)
291{
292 struct socket *so = fp->f_data;
293#ifdef MAC
294 int error;
295
296 error = mac_socket_check_poll(active_cred, so);
297 if (error)
298 return (error);
299#endif
300 return (sopoll(so, events, fp->f_cred, td));
301}
302
303static int
304soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred)
305{
306 struct socket *so = fp->f_data;
307 int error;
308
309 bzero((caddr_t)ub, sizeof (*ub));
310 ub->st_mode = S_IFSOCK;
311#ifdef MAC
312 error = mac_socket_check_stat(active_cred, so);
313 if (error)
314 return (error);
315#endif
316 SOCK_LOCK(so);
317 if (!SOLISTENING(so)) {
318 struct sockbuf *sb;
319
320 /*
321 * If SBS_CANTRCVMORE is set, but there's still data left
322 * in the receive buffer, the socket is still readable.
323 */
324 sb = &so->so_rcv;
325 SOCKBUF_LOCK(sb);
326 if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
327 ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
328 ub->st_size = sbavail(sb) - sb->sb_ctl;
329 SOCKBUF_UNLOCK(sb);
330
331 sb = &so->so_snd;
332 SOCKBUF_LOCK(sb);
333 if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
334 ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
335 SOCKBUF_UNLOCK(sb);
336 }
337 ub->st_uid = so->so_cred->cr_uid;
338 ub->st_gid = so->so_cred->cr_gid;
339 error = so->so_proto->pr_usrreqs->pru_sense(so, ub);
340 SOCK_UNLOCK(so);
341 return (error);
342}
343
344/*
345 * API socket close on file pointer. We call soclose() to close the socket
346 * (including initiating closing protocols). soclose() will sorele() the
347 * file reference but the actual socket will not go away until the socket's
348 * ref count hits 0.
349 */
350static int
351soo_close(struct file *fp, struct thread *td)
352{
353 int error = 0;
354 struct socket *so;
355
356 so = fp->f_data;
357 fp->f_ops = &badfileops;
358 fp->f_data = NULL;
359
360 if (so)
361 error = soclose(so);
362 return (error);
363}
364
365static int
366soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
367{
368 struct sockaddr *sa;
369 struct inpcb *inpcb;
370 struct unpcb *unpcb;
371 struct socket *so;
372 int error;
373
374 kif->kf_type = KF_TYPE_SOCKET;
375 so = fp->f_data;
376 CURVNET_SET(so->so_vnet);
377 kif->kf_un.kf_sock.kf_sock_domain0 =
378 so->so_proto->pr_domain->dom_family;
379 kif->kf_un.kf_sock.kf_sock_type0 = so->so_type;
380 kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol;
381 kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
382 switch (kif->kf_un.kf_sock.kf_sock_domain0) {
383 case AF_INET:
384 case AF_INET6:
385 if (kif->kf_un.kf_sock.kf_sock_protocol0 == IPPROTO_TCP) {
386 if (so->so_pcb != NULL) {
387 inpcb = (struct inpcb *)(so->so_pcb);
388 kif->kf_un.kf_sock.kf_sock_inpcb =
389 (uintptr_t)inpcb->inp_ppcb;
390 kif->kf_un.kf_sock.kf_sock_sendq =
391 sbused(&so->so_snd);
392 kif->kf_un.kf_sock.kf_sock_recvq =
393 sbused(&so->so_rcv);
394 }
395 }
396 break;
397 case AF_UNIX:
398 if (so->so_pcb != NULL) {
399 unpcb = (struct unpcb *)(so->so_pcb);
400 if (unpcb->unp_conn) {
401 kif->kf_un.kf_sock.kf_sock_unpconn =
402 (uintptr_t)unpcb->unp_conn;
403 kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
404 so->so_rcv.sb_state;
405 kif->kf_un.kf_sock.kf_sock_snd_sb_state =
406 so->so_snd.sb_state;
407 kif->kf_un.kf_sock.kf_sock_sendq =
408 sbused(&so->so_snd);
409 kif->kf_un.kf_sock.kf_sock_recvq =
410 sbused(&so->so_rcv);
411 }
412 }
413 break;
414 }
415 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
416 if (error == 0 &&
417 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) {
418 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len);
419 free(sa, M_SONAME);
420 }
421 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
422 if (error == 0 &&
423 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) {
424 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len);
425 free(sa, M_SONAME);
426 }
427 strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
428 sizeof(kif->kf_path));
429 CURVNET_RESTORE();
430 return (0);
431}
432
433/*
434 * Use the 'backend3' field in AIO jobs to store the amount of data
435 * completed by the AIO job so far.
436 */
437#define aio_done backend3
438
439static STAILQ_HEAD(, task) soaio_jobs;
440static struct mtx soaio_jobs_lock;
441static struct task soaio_kproc_task;
442static int soaio_starting, soaio_idle, soaio_queued;
443static struct unrhdr *soaio_kproc_unr;
444
445static int soaio_max_procs = MAX_AIO_PROCS;
446SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0,
447 "Maximum number of kernel processes to use for async socket IO");
448
449static int soaio_num_procs;
450SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0,
451 "Number of active kernel processes for async socket IO");
452
453static int soaio_target_procs = TARGET_AIO_PROCS;
454SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD,
455 &soaio_target_procs, 0,
456 "Preferred number of ready kernel processes for async socket IO");
457
458static int soaio_lifetime;
459SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0,
460 "Maximum lifetime for idle aiod");
461
462static void
463soaio_kproc_loop(void *arg)
464{
465 struct proc *p;
466 struct vmspace *myvm;
467 struct task *task;
468 int error, id, pending;
469
470 id = (intptr_t)arg;
471
472 /*
473 * Grab an extra reference on the daemon's vmspace so that it
474 * doesn't get freed by jobs that switch to a different
475 * vmspace.
476 */
477 p = curproc;
478 myvm = vmspace_acquire_ref(p);
479
480 mtx_lock(&soaio_jobs_lock);
481 MPASS(soaio_starting > 0);
482 soaio_starting--;
483 for (;;) {
484 while (!STAILQ_EMPTY(&soaio_jobs)) {
485 task = STAILQ_FIRST(&soaio_jobs);
486 STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
487 soaio_queued--;
488 pending = task->ta_pending;
489 task->ta_pending = 0;
490 mtx_unlock(&soaio_jobs_lock);
491
492 task->ta_func(task->ta_context, pending);
493
494 mtx_lock(&soaio_jobs_lock);
495 }
496 MPASS(soaio_queued == 0);
497
498 if (p->p_vmspace != myvm) {
499 mtx_unlock(&soaio_jobs_lock);
500 vmspace_switch_aio(myvm);
501 mtx_lock(&soaio_jobs_lock);
502 continue;
503 }
504
505 soaio_idle++;
506 error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
507 soaio_lifetime);
508 soaio_idle--;
509 if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
510 soaio_num_procs > soaio_target_procs)
511 break;
512 }
513 soaio_num_procs--;
514 mtx_unlock(&soaio_jobs_lock);
515 free_unr(soaio_kproc_unr, id);
516 kproc_exit(0);
517}
518
519static void
520soaio_kproc_create(void *context, int pending)
521{
522 struct proc *p;
523 int error, id;
524
525 mtx_lock(&soaio_jobs_lock);
526 for (;;) {
527 if (soaio_num_procs < soaio_target_procs) {
528 /* Must create */
529 } else if (soaio_num_procs >= soaio_max_procs) {
530 /*
531 * Hit the limit on kernel processes, don't
532 * create another one.
533 */
534 break;
535 } else if (soaio_queued <= soaio_idle + soaio_starting) {
536 /*
537 * No more AIO jobs waiting for a process to be
538 * created, so stop.
539 */
540 break;
541 }
542 soaio_starting++;
543 mtx_unlock(&soaio_jobs_lock);
544
545 id = alloc_unr(soaio_kproc_unr);
546 error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
547 &p, 0, 0, "soaiod%d", id);
548 if (error != 0) {
549 free_unr(soaio_kproc_unr, id);
550 mtx_lock(&soaio_jobs_lock);
551 soaio_starting--;
552 break;
553 }
554
555 mtx_lock(&soaio_jobs_lock);
556 soaio_num_procs++;
557 }
558 mtx_unlock(&soaio_jobs_lock);
559}
560
561void
562soaio_enqueue(struct task *task)
563{
564
565 mtx_lock(&soaio_jobs_lock);
566 MPASS(task->ta_pending == 0);
567 task->ta_pending++;
568 STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
569 soaio_queued++;
570 if (soaio_queued <= soaio_idle)
571 wakeup_one(&soaio_idle);
572 else if (soaio_num_procs < soaio_max_procs)
573 taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
574 mtx_unlock(&soaio_jobs_lock);
575}
576
577static void
579{
580
581 soaio_lifetime = AIOD_LIFETIME_DEFAULT;
582 STAILQ_INIT(&soaio_jobs);
583 mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF);
584 soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL);
585 TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL);
586}
587SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL);
588
589static __inline int
590soaio_ready(struct socket *so, struct sockbuf *sb)
591{
592 return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so));
593}
594
595static void
596soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
597{
598 struct ucred *td_savedcred;
599 struct thread *td;
600 struct file *fp;
601 size_t cnt, done, job_total_nbytes __diagused;
602 long ru_before;
603 int error, flags;
604
605 SOCKBUF_UNLOCK(sb);
607 td = curthread;
608 fp = job->fd_file;
609retry:
610 td_savedcred = td->td_ucred;
611 td->td_ucred = job->cred;
612
613 job_total_nbytes = job->uiop->uio_resid + job->aio_done;
614 done = job->aio_done;
615 cnt = job->uiop->uio_resid;
616 job->uiop->uio_offset = 0;
617 job->uiop->uio_td = td;
618 flags = MSG_NBIO;
619
620 /*
621 * For resource usage accounting, only count a completed request
622 * as a single message to avoid counting multiple calls to
623 * sosend/soreceive on a blocking socket.
624 */
625
626 if (sb == &so->so_rcv) {
627 ru_before = td->td_ru.ru_msgrcv;
628#ifdef MAC
629 error = mac_socket_check_receive(fp->f_cred, so);
630 if (error == 0)
631
632#endif
633 error = soreceive(so, NULL, job->uiop, NULL, NULL,
634 &flags);
635 if (td->td_ru.ru_msgrcv != ru_before)
636 job->msgrcv = 1;
637 } else {
638 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
639 flags |= MSG_MORETOCOME;
640 ru_before = td->td_ru.ru_msgsnd;
641#ifdef MAC
642 error = mac_socket_check_send(fp->f_cred, so);
643 if (error == 0)
644#endif
645 error = sosend(so, NULL, job->uiop, NULL, NULL, flags,
646 td);
647 if (td->td_ru.ru_msgsnd != ru_before)
648 job->msgsnd = 1;
649 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
650 PROC_LOCK(job->userproc);
651 kern_psignal(job->userproc, SIGPIPE);
652 PROC_UNLOCK(job->userproc);
653 }
654 }
655
656 done += cnt - job->uiop->uio_resid;
657 job->aio_done = done;
658 td->td_ucred = td_savedcred;
659
660 if (error == EWOULDBLOCK) {
661 /*
662 * The request was either partially completed or not
663 * completed at all due to racing with a read() or
664 * write() on the socket. If the socket is
665 * non-blocking, return with any partial completion.
666 * If the socket is blocking or if no progress has
667 * been made, requeue this request at the head of the
668 * queue to try again when the socket is ready.
669 */
670 MPASS(done != job_total_nbytes);
671 SOCKBUF_LOCK(sb);
672 if (done == 0 || !(so->so_state & SS_NBIO)) {
674 if (soaio_ready(so, sb)) {
676 SOCKBUF_UNLOCK(sb);
677 goto retry;
678 }
679
681 SOCKBUF_UNLOCK(sb);
682 if (done != 0)
683 aio_complete(job, done, 0);
684 else
685 aio_cancel(job);
686 SOCKBUF_LOCK(sb);
687 } else {
688 TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
689 }
690 return;
691 }
692 SOCKBUF_UNLOCK(sb);
693 }
694 if (done != 0 && (error == ERESTART || error == EINTR ||
695 error == EWOULDBLOCK))
696 error = 0;
697 if (error)
698 aio_complete(job, -1, error);
699 else
700 aio_complete(job, done, 0);
701 SOCKBUF_LOCK(sb);
702}
703
704static void
705soaio_process_sb(struct socket *so, struct sockbuf *sb)
706{
707 struct kaiocb *job;
708
709 CURVNET_SET(so->so_vnet);
710 SOCKBUF_LOCK(sb);
711 while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
712 job = TAILQ_FIRST(&sb->sb_aiojobq);
713 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
715 continue;
716
717 soaio_process_job(so, sb, job);
718 }
719
720 /*
721 * If there are still pending requests, the socket must not be
722 * ready so set SB_AIO to request a wakeup when the socket
723 * becomes ready.
724 */
725 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
726 sb->sb_flags |= SB_AIO;
727 sb->sb_flags &= ~SB_AIO_RUNNING;
728 SOCKBUF_UNLOCK(sb);
729
730 sorele(so);
731 CURVNET_RESTORE();
732}
733
734void
735soaio_rcv(void *context, int pending)
736{
737 struct socket *so;
738
739 so = context;
740 soaio_process_sb(so, &so->so_rcv);
741}
742
743void
744soaio_snd(void *context, int pending)
745{
746 struct socket *so;
747
748 so = context;
749 soaio_process_sb(so, &so->so_snd);
750}
751
752void
753sowakeup_aio(struct socket *so, struct sockbuf *sb)
754{
755
756 SOCKBUF_LOCK_ASSERT(sb);
757 sb->sb_flags &= ~SB_AIO;
758 if (sb->sb_flags & SB_AIO_RUNNING)
759 return;
760 sb->sb_flags |= SB_AIO_RUNNING;
761 soref(so);
762 soaio_enqueue(&sb->sb_aiotask);
763}
764
765static void
766soo_aio_cancel(struct kaiocb *job)
767{
768 struct socket *so;
769 struct sockbuf *sb;
770 long done;
771 int opcode;
772
773 so = job->fd_file->f_data;
774 opcode = job->uaiocb.aio_lio_opcode;
775 if (opcode & LIO_READ)
776 sb = &so->so_rcv;
777 else {
778 MPASS(opcode & LIO_WRITE);
779 sb = &so->so_snd;
780 }
781
782 SOCKBUF_LOCK(sb);
783 if (!aio_cancel_cleared(job))
784 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
785 if (TAILQ_EMPTY(&sb->sb_aiojobq))
786 sb->sb_flags &= ~SB_AIO;
787 SOCKBUF_UNLOCK(sb);
788
789 done = job->aio_done;
790 if (done != 0)
791 aio_complete(job, done, 0);
792 else
793 aio_cancel(job);
794}
795
796static int
797soo_aio_queue(struct file *fp, struct kaiocb *job)
798{
799 struct socket *so;
800 struct sockbuf *sb;
801 int error;
802
803 so = fp->f_data;
804 error = (*so->so_proto->pr_usrreqs->pru_aio_queue)(so, job);
805 if (error == 0)
806 return (0);
807
808 /* Lock through the socket, since this may be a listening socket. */
809 switch (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
810 case LIO_READ:
811 sb = &so->so_rcv;
812 SOCK_RECVBUF_LOCK(so);
813 break;
814 case LIO_WRITE:
815 sb = &so->so_snd;
816 SOCK_SENDBUF_LOCK(so);
817 break;
818 default:
819 return (EINVAL);
820 }
821
822 if (SOLISTENING(so)) {
823 if (sb == &so->so_rcv)
824 SOCK_RECVBUF_UNLOCK(so);
825 else
826 SOCK_SENDBUF_UNLOCK(so);
827 return (EINVAL);
828 }
829
831 panic("new job was cancelled");
832 TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
833 if (!(sb->sb_flags & SB_AIO_RUNNING)) {
834 if (soaio_ready(so, sb))
835 sowakeup_aio(so, sb);
836 else
837 sb->sb_flags |= SB_AIO;
838 }
839 SOCKBUF_UNLOCK(sb);
840 return (0);
841}
int invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td)
int fsetown(pid_t pgid, struct sigio **sigiop)
int invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td)
pid_t fgetown(struct sigio **sigiop)
struct fileops badfileops
int invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, struct thread *td)
int invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
int kproc_create(void(*func)(void *), void *arg, struct proc **newpp, int flags, int pages, const char *fmt,...)
Definition: kern_kthread.c:84
void kproc_exit(int ecode)
Definition: kern_kthread.c:148
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
void panic(const char *fmt,...)
void tdsignal(struct thread *td, int sig)
Definition: kern_sig.c:2153
void kern_psignal(struct proc *p, int sig)
Definition: kern_sig.c:2117
void wakeup_one(const void *ident)
Definition: kern_synch.c:369
uint32_t * data
Definition: msi_if.m:90
uint16_t flags
Definition: subr_stats.c:2
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
int alloc_unr(struct unrhdr *uh)
Definition: subr_unit.c:650
struct unrhdr * new_unrhdr(int low, int high, struct mtx *mutex)
Definition: subr_unit.c:360
void free_unr(struct unrhdr *uh, u_int item)
Definition: subr_unit.c:900
static fo_close_t soo_close
Definition: sys_socket.c:97
static STAILQ_HEAD(task)
Definition: sys_socket.c:439
static fo_poll_t soo_poll
Definition: sys_socket.c:94
struct fileops socketops
Definition: sys_socket.c:103
fo_kqfilter_t soo_kqfilter
Definition: uipc_socket.c:180
static void soo_aio_cancel(struct kaiocb *job)
Definition: sys_socket.c:766
static fo_stat_t soo_stat
Definition: sys_socket.c:96
static fo_aio_queue_t soo_aio_queue
Definition: sys_socket.c:99
static fo_rdwr_t soo_read
Definition: sys_socket.c:91
void sowakeup_aio(struct socket *so, struct sockbuf *sb)
Definition: sys_socket.c:753
static __inline int soaio_ready(struct socket *so, struct sockbuf *sb)
Definition: sys_socket.c:590
static void soaio_init(void)
Definition: sys_socket.c:578
static fo_rdwr_t soo_write
Definition: sys_socket.c:92
static int empty_retries
Definition: sys_socket.c:87
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results, 0, "socket operation returned EAGAIN")
static void soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
Definition: sys_socket.c:596
static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "socket AIO stats")
__FBSDID("$FreeBSD$")
void soaio_enqueue(struct task *task)
Definition: sys_socket.c:562
SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL)
static void soaio_kproc_create(void *context, int pending)
Definition: sys_socket.c:520
static fo_fill_kinfo_t soo_fill_kinfo
Definition: sys_socket.c:98
static void soaio_process_sb(struct socket *so, struct sockbuf *sb)
Definition: sys_socket.c:705
static fo_ioctl_t soo_ioctl
Definition: sys_socket.c:93
static int empty_results
Definition: sys_socket.c:83
void soaio_snd(void *context, int pending)
Definition: sys_socket.c:744
void soaio_rcv(void *context, int pending)
Definition: sys_socket.c:735
struct mtx mtx
Definition: uipc_ktls.c:0
int soclose(struct socket *so)
Definition: uipc_socket.c:1239
int soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
Definition: uipc_socket.c:2859
int sopoll(struct socket *so, int events, struct ucred *active_cred, struct thread *td)
Definition: uipc_socket.c:3597
int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
Definition: uipc_socket.c:1864
void aio_switch_vmspace(struct kaiocb *job)
Definition: vfs_aio.c:1060
bool aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func)
Definition: vfs_aio.c:1017
void aio_cancel(struct kaiocb *job)
Definition: vfs_aio.c:1053
bool aio_clear_cancel_function(struct kaiocb *job)
Definition: vfs_aio.c:993
void aio_complete(struct kaiocb *job, long status, int error)
Definition: vfs_aio.c:1030
bool aio_cancel_cleared(struct kaiocb *job)
Definition: vfs_aio.c:966