Merge branch 'tcp-pass-back-data-left-in-socket-after-receive' of git://git.kernel.org/pub/scm/linux/kernel/git/kuba/linux into for-5.19/io_uring-net

Merge net branch with the required patch for supporting the io_uring
feature that passes back whether we had more data in the socket or not.

* 'tcp-pass-back-data-left-in-socket-after-receive' of git://git.kernel.org/pub/scm/linux/kernel/git/kuba/linux:
  tcp: pass back data left in socket after receive
This commit is contained in:
Jens Axboe 2022-04-29 21:11:15 -06:00
commit a4c7685360
2 changed files with 15 additions and 7 deletions

View file

@ -50,6 +50,9 @@ struct linger {
struct msghdr { struct msghdr {
void *msg_name; /* ptr to socket address structure */ void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */ int msg_namelen; /* size of socket address structure */
int msg_inq; /* output, data left in socket */
struct iov_iter msg_iter; /* data */ struct iov_iter msg_iter; /* data */
/* /*
@ -62,8 +65,9 @@ struct msghdr {
void __user *msg_control_user; void __user *msg_control_user;
}; };
bool msg_control_is_user : 1; bool msg_control_is_user : 1;
__kernel_size_t msg_controllen; /* ancillary data buffer length */ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */ unsigned int msg_flags; /* flags on received message */
__kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */
}; };

View file

@ -2335,8 +2335,10 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
goto out; goto out;
if (tp->recvmsg_inq) if (tp->recvmsg_inq) {
*cmsg_flags = TCP_CMSG_INQ; *cmsg_flags = TCP_CMSG_INQ;
msg->msg_get_inq = 1;
}
timeo = sock_rcvtimeo(sk, nonblock); timeo = sock_rcvtimeo(sk, nonblock);
/* Urgent data needs to be handled specially. */ /* Urgent data needs to be handled specially. */
@ -2559,7 +2561,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len) int flags, int *addr_len)
{ {
int cmsg_flags = 0, ret, inq; int cmsg_flags = 0, ret;
struct scm_timestamping_internal tss; struct scm_timestamping_internal tss;
if (unlikely(flags & MSG_ERRQUEUE)) if (unlikely(flags & MSG_ERRQUEUE))
@ -2576,12 +2578,14 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
release_sock(sk); release_sock(sk);
sk_defer_free_flush(sk); sk_defer_free_flush(sk);
if (cmsg_flags && ret >= 0) { if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
if (cmsg_flags & TCP_CMSG_TS) if (cmsg_flags & TCP_CMSG_TS)
tcp_recv_timestamp(msg, sk, &tss); tcp_recv_timestamp(msg, sk, &tss);
if (cmsg_flags & TCP_CMSG_INQ) { if (msg->msg_get_inq) {
inq = tcp_inq_hint(sk); msg->msg_inq = tcp_inq_hint(sk);
put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); if (cmsg_flags & TCP_CMSG_INQ)
put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
sizeof(msg->msg_inq), &msg->msg_inq);
} }
} }
return ret; return ret;