From a15d56a60760aa9dbe26343b9a0ac5228f35d445 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 22 Aug 2019 08:55:36 +0200 Subject: [PATCH 01/58] batman-adv: Only read OGM tvlv_len after buffer len check Multiple batadv_ogm_packet can be stored in an skbuff. The functions batadv_iv_ogm_send_to_if()/batadv_iv_ogm_receive() use batadv_iv_ogm_aggr_packet() to check if there is another additional batadv_ogm_packet in the skb or not before they continue processing the packet. The length for such an OGM is BATADV_OGM_HLEN + batadv_ogm_packet->tvlv_len. The check must first check that at least BATADV_OGM_HLEN bytes are available before it accesses tvlv_len (which is part of the header. Otherwise it might try read outside of the currently available skbuff to get the content of tvlv_len. Fixes: ef26157747d4 ("batman-adv: tvlv - basic infrastructure") Reported-by: syzbot+355cab184197dbbfa384@syzkaller.appspotmail.com Signed-off-by: Sven Eckelmann Acked-by: Antonio Quartulli Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_iv_ogm.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 240ed70912d6..d78938e3e008 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv) * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm_packet: potential OGM in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ -static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool +batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm_packet *ogm_packet) { int next_buff_pos = 0; - next_buff_pos += buff_pos + BATADV_OGM_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, /* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, - batadv_ogm_packet->tvlv_len)) { + batadv_ogm_packet)) { /* we might have aggregated direct link packets with an * ordinary base packet */ @@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, /* unpack the aggregated packets and process them one by one */ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_iv_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM_HLEN; From 0ff0f15a32c093381ad1abc06abe85afb561ab28 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 22 Aug 2019 08:55:36 +0200 Subject: [PATCH 02/58] batman-adv: Only read OGM2 tvlv_len after buffer len check Multiple batadv_ogm2_packet can be stored in an skbuff. The functions batadv_v_ogm_send_to_if() uses batadv_v_ogm_aggr_packet() to check if there is another additional batadv_ogm2_packet in the skb or not before they continue processing the packet. The length for such an OGM2 is BATADV_OGM2_HLEN + batadv_ogm2_packet->tvlv_len. The check must first check that at least BATADV_OGM2_HLEN bytes are available before it accesses tvlv_len (which is part of the header. Otherwise it might try read outside of the currently available skbuff to get the content of tvlv_len. Fixes: 9323158ef9f4 ("batman-adv: OGMv2 - implement originators logic") Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_v_ogm.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index fad95ef64e01..bc06e3cdfa84 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated * @buff_pos: current position in the skb * @packet_len: total length of the skb - * @tvlv_len: tvlv length of the previously considered OGM + * @ogm2_packet: potential OGM2 in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ -static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool +batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, + const struct batadv_ogm2_packet *ogm2_packet) { int next_buff_pos = 0; - next_buff_pos += buff_pos + BATADV_OGM2_HLEN; - next_buff_pos += ntohs(tvlv_len); + /* check if there is enough space for the header */ + next_buff_pos += buff_pos + sizeof(*ogm2_packet); + if (next_buff_pos > packet_len) + return false; + + /* check if there is enough space for the optional TVLV */ + next_buff_pos += ntohs(ogm2_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); @@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, ogm_packet = (struct batadv_ogm2_packet *)skb->data; while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), - ogm_packet->tvlv_len)) { + ogm_packet)) { batadv_v_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM2_HLEN; From 86c28b2d69f93a218a9a5cef146ed0097a98687f Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Fri, 23 Aug 2019 19:00:28 -0700 Subject: [PATCH 03/58] nfp: bpf: fix latency bug when updating stack index register NFP is using Local Memory to model stack. LM_addr could be used as base of a 16 32-bit word region of Local Memory. Then, if the stack offset is beyond the current region, the local index needs to be updated. The update needs at least three cycles to take effect, therefore the sequence normally looks like: local_csr_wr[ActLMAddr3, gprB_5] nop nop nop If the local index switch happens on a narrow loads, then the instruction preparing value to zero high 32-bit of the destination register could be counted as one cycle, the sequence then could be something like: local_csr_wr[ActLMAddr3, gprB_5] nop nop immed[gprB_5, 0] However, we have zero extension optimization that zeroing high 32-bit could be eliminated, therefore above IMMED insn won't be available for which case the first sequence needs to be generated. Fixes: 0b4de1ff19bf ("nfp: bpf: eliminate zero extension code-gen") Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4054b70d7719..5afcb3c4c2ef 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; - bool first = true, last; + bool first = true, narrow_ld, last; bool needs_inc = false; swreg stack_off_reg; u8 prev_gpr = 255; @@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, needs_inc = true; } + + narrow_ld = clr_gpr && size < 8; + if (lm3) { + unsigned int nop_cnt; + emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); - /* For size < 4 one slot will be filled by zeroing of upper. */ - wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); + /* For size < 4 one slot will be filled by zeroing of upper, + * but be careful, that zeroing could be eliminated by zext + * optimization. + */ + nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3; + wrp_nops(nfp_prog, nop_cnt); } - if (clr_gpr && size < 8) + if (narrow_ld) wrp_zext(nfp_prog, meta, gpr); while (size) { From ede7c460b1da5be7b8ef4efe47f1687babf06408 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 22 Aug 2019 00:53:58 +0530 Subject: [PATCH 04/58] bpf: handle 32-bit zext during constant blinding Since BPF constant blinding is performed after the verifier pass, the ALU32 instructions inserted for doubleword immediate loads don't have a corresponding zext instruction. This is causing a kernel oops on powerpc and can be reproduced by running 'test_cgroup_storage' with bpf_jit_harden=2. Fix this by emitting BPF_ZEXT during constant blinding if prog->aux->verifier_zext is set. Fixes: a4b1d3c1ddf6cb ("bpf: verifier: insert zero extension according to analysis result") Reported-by: Michael Ellerman Signed-off-by: Naveen N. Rao Reviewed-by: Jiong Wang Signed-off-by: Daniel Borkmann --- kernel/bpf/core.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 8191a7db2777..66088a9e9b9e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, static int bpf_jit_blind_insn(const struct bpf_insn *from, const struct bpf_insn *aux, - struct bpf_insn *to_buff) + struct bpf_insn *to_buff, + bool emit_zext) { struct bpf_insn *to = to_buff; u32 imm_rnd = get_random_int(); @@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + if (emit_zext) + *to++ = BPF_ZEXT_REG(BPF_REG_AX); *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); break; @@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) insn[1].code == 0) memcpy(aux, insn, sizeof(aux)); - rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); + rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, + clone->aux->verifier_zext); if (!rewritten) continue; From c3c9e3df49f8d83db09d1f61c8bed54e7fed8662 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 19 Aug 2019 09:25:37 +0100 Subject: [PATCH 05/58] rxrpc: Improve jumbo packet counting Improve the information stored about jumbo packets so that we don't need to reparse them so much later. Signed-off-by: David Howells Reviewed-by: Jeffrey Altman --- net/rxrpc/ar-internal.h | 10 +++++++--- net/rxrpc/input.c | 23 ++++++++++++++--------- net/rxrpc/protocol.h | 9 +++++++++ 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 145335611af6..87cff6c218b6 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -185,11 +185,15 @@ struct rxrpc_host_header { * - max 48 bytes (struct sk_buff::cb) */ struct rxrpc_skb_priv { - union { - u8 nr_jumbo; /* Number of jumbo subpackets */ - }; + u8 nr_subpackets; /* Number of subpackets */ + u8 rx_flags; /* Received packet flags */ +#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ union { int remain; /* amount of space remaining for next write */ + + /* List of requested ACKs on subpackets */ + unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) / + BITS_PER_LONG]; }; struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index dd47d465d1d3..ffcec5117954 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -347,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) } /* - * Scan a jumbo packet to validate its structure and to work out how many + * Scan a data packet to validate its structure and to work out how many * subpackets it contains. * * A jumbo packet is a collection of consecutive packets glued together with @@ -358,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any * size. */ -static bool rxrpc_validate_jumbo(struct sk_buff *skb) +static bool rxrpc_validate_data(struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int len = skb->len; - int nr_jumbo = 1; u8 flags = sp->hdr.flags; - do { - nr_jumbo++; + for (;;) { + if (flags & RXRPC_REQUEST_ACK) + __set_bit(sp->nr_subpackets, sp->rx_req_ack); + sp->nr_subpackets++; + + if (!(flags & RXRPC_JUMBO_PACKET)) + break; + if (len - offset < RXRPC_JUMBO_SUBPKTLEN) goto protocol_error; if (flags & RXRPC_LAST_PACKET) @@ -376,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb) if (skb_copy_bits(skb, offset, &flags, 1) < 0) goto protocol_error; offset += sizeof(struct rxrpc_jumbo_header); - } while (flags & RXRPC_JUMBO_PACKET); + } - sp->nr_jumbo = nr_jumbo; + if (flags & RXRPC_LAST_PACKET) + sp->rx_flags |= RXRPC_SKB_INCL_LAST; return true; protocol_error: @@ -1237,8 +1243,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (sp->hdr.callNumber == 0 || sp->hdr.seq == 0) goto bad_message; - if (sp->hdr.flags & RXRPC_JUMBO_PACKET && - !rxrpc_validate_jumbo(skb)) + if (!rxrpc_validate_data(skb)) goto bad_message; break; diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h index 99ce322d7caa..49bb972539aa 100644 --- a/net/rxrpc/protocol.h +++ b/net/rxrpc/protocol.h @@ -89,6 +89,15 @@ struct rxrpc_jumbo_header { #define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ #define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) +/* + * The maximum number of subpackets that can possibly fit in a UDP packet is: + * + * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1 + * = ((65535 - 28 - 28) / 1416) + 1 + * = 46 non-terminal packets and 1 terminal packet. + */ +#define RXRPC_MAX_NR_JUMBO 47 + /*****************************************************************************/ /* * on-the-wire Rx ACK packet data payload From e2de6c4048989007b353164b19d6b7d5be4fa9e3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Aug 2019 09:51:30 +0100 Subject: [PATCH 06/58] rxrpc: Use info in skbuff instead of reparsing a jumbo packet Use the information now cached in the skbuff private data to avoid the need to reparse a jumbo packet. We can find all the subpackets by dead reckoning, so it's only necessary to note how many there are, whether the last one is flagged as LAST_PACKET and whether any have the REQUEST_ACK flag set. This is necessary as once recvmsg() can see the packet, it can start modifying it, such as doing in-place decryption. Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code") Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 3 +- net/rxrpc/input.c | 229 ++++++++++++++++++++-------------------- net/rxrpc/recvmsg.c | 41 ++++--- 3 files changed, 138 insertions(+), 135 deletions(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 87cff6c218b6..20d7907a5bc6 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -617,8 +617,7 @@ struct rxrpc_call { #define RXRPC_TX_ANNO_LAST 0x04 #define RXRPC_TX_ANNO_RESENT 0x08 -#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ -#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */ +#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */ #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but * not hard-ACK'd packet follows this. diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index ffcec5117954..35b1a9368d80 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -405,10 +405,10 @@ protocol_error: * (that information is encoded in the ACK packet). */ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, - u8 annotation, bool *_jumbo_bad) + bool is_jumbo, bool *_jumbo_bad) { /* Discard normal packets that are duplicates. */ - if (annotation == 0) + if (is_jumbo) return; /* Skip jumbo subpackets that are duplicates. When we've had three or @@ -428,19 +428,17 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); enum rxrpc_call_state state; - unsigned int offset = sizeof(struct rxrpc_wire_header); - unsigned int ix; + unsigned int j; rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; - rxrpc_seq_t seq = sp->hdr.seq, hard_ack; - bool immediate_ack = false, jumbo_bad = false, queued; - u16 len; - u8 ack = 0, flags, annotation = 0; + rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; + bool immediate_ack = false, jumbo_bad = false; + u8 ack = 0; _enter("{%u,%u},{%u,%u}", - call->rx_hard_ack, call->rx_top, skb->len, seq); + call->rx_hard_ack, call->rx_top, skb->len, seq0); - _proto("Rx DATA %%%u { #%u f=%02x }", - sp->hdr.serial, seq, sp->hdr.flags); + _proto("Rx DATA %%%u { #%u f=%02x n=%u }", + sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); state = READ_ONCE(call->state); if (state >= RXRPC_CALL_COMPLETE) @@ -469,137 +467,136 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) !rxrpc_receiving_reply(call)) goto unlock; - call->ackr_prev_seq = seq; - + call->ackr_prev_seq = seq0; hard_ack = READ_ONCE(call->rx_hard_ack); - if (after(seq, hard_ack + call->rx_winsize)) { - ack = RXRPC_ACK_EXCEEDS_WINDOW; - ack_serial = serial; - goto ack; - } - flags = sp->hdr.flags; - if (flags & RXRPC_JUMBO_PACKET) { + if (sp->nr_subpackets > 1) { if (call->nr_jumbo_bad > 3) { ack = RXRPC_ACK_NOSPACE; ack_serial = serial; goto ack; } - annotation = 1; } -next_subpacket: - queued = false; - ix = seq & RXRPC_RXTX_BUFF_MASK; - len = skb->len; - if (flags & RXRPC_JUMBO_PACKET) - len = RXRPC_JUMBO_DATALEN; + for (j = 0; j < sp->nr_subpackets; j++) { + rxrpc_serial_t serial = sp->hdr.serial + j; + rxrpc_seq_t seq = seq0 + j; + unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; + bool terminal = (j == sp->nr_subpackets - 1); + bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); + u8 flags, annotation = j; - if (flags & RXRPC_LAST_PACKET) { - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && - seq != call->rx_top) { - rxrpc_proto_abort("LSN", call, seq); - goto unlock; + _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }", + j, serial, seq, terminal, last); + + if (last) { + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && + seq != call->rx_top) { + rxrpc_proto_abort("LSN", call, seq); + goto unlock; + } + } else { + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && + after_eq(seq, call->rx_top)) { + rxrpc_proto_abort("LSA", call, seq); + goto unlock; + } } - } else { - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && - after_eq(seq, call->rx_top)) { - rxrpc_proto_abort("LSA", call, seq); - goto unlock; - } - } - trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); - if (before_eq(seq, hard_ack)) { - ack = RXRPC_ACK_DUPLICATE; - ack_serial = serial; - goto skip; - } + flags = 0; + if (last) + flags |= RXRPC_LAST_PACKET; + if (!terminal) + flags |= RXRPC_JUMBO_PACKET; + if (test_bit(j, sp->rx_req_ack)) + flags |= RXRPC_REQUEST_ACK; + trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); - if (flags & RXRPC_REQUEST_ACK && !ack) { - ack = RXRPC_ACK_REQUESTED; - ack_serial = serial; - } - - if (call->rxtx_buffer[ix]) { - rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); - if (ack != RXRPC_ACK_DUPLICATE) { + if (before_eq(seq, hard_ack)) { ack = RXRPC_ACK_DUPLICATE; ack_serial = serial; + continue; } - immediate_ack = true; - goto skip; - } - /* Queue the packet. We use a couple of memory barriers here as need - * to make sure that rx_top is perceived to be set after the buffer - * pointer and that the buffer pointer is set after the annotation and - * the skb data. - * - * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() - * and also rxrpc_fill_out_ack(). - */ - rxrpc_get_skb(skb, rxrpc_skb_rx_got); - call->rxtx_annotations[ix] = annotation; - smp_wmb(); - call->rxtx_buffer[ix] = skb; - if (after(seq, call->rx_top)) { - smp_store_release(&call->rx_top, seq); - } else if (before(seq, call->rx_top)) { - /* Send an immediate ACK if we fill in a hole */ - if (!ack) { - ack = RXRPC_ACK_DELAY; - ack_serial = serial; + if (call->rxtx_buffer[ix]) { + rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, + &jumbo_bad); + if (ack != RXRPC_ACK_DUPLICATE) { + ack = RXRPC_ACK_DUPLICATE; + ack_serial = serial; + } + immediate_ack = true; + continue; } - immediate_ack = true; - } - if (flags & RXRPC_LAST_PACKET) { - set_bit(RXRPC_CALL_RX_LAST, &call->flags); - trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); - } else { - trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); - } - queued = true; - if (after_eq(seq, call->rx_expect_next)) { - if (after(seq, call->rx_expect_next)) { - _net("OOS %u > %u", seq, call->rx_expect_next); - ack = RXRPC_ACK_OUT_OF_SEQUENCE; - ack_serial = serial; - } - call->rx_expect_next = seq + 1; - } - -skip: - offset += len; - if (flags & RXRPC_JUMBO_PACKET) { - if (skb_copy_bits(skb, offset, &flags, 1) < 0) { - rxrpc_proto_abort("XJF", call, seq); - goto unlock; - } - offset += sizeof(struct rxrpc_jumbo_header); - seq++; - serial++; - annotation++; - if (flags & RXRPC_JUMBO_PACKET) - annotation |= RXRPC_RX_ANNO_JLAST; if (after(seq, hard_ack + call->rx_winsize)) { ack = RXRPC_ACK_EXCEEDS_WINDOW; ack_serial = serial; - if (!jumbo_bad) { - call->nr_jumbo_bad++; - jumbo_bad = true; + if (flags & RXRPC_JUMBO_PACKET) { + if (!jumbo_bad) { + call->nr_jumbo_bad++; + jumbo_bad = true; + } } + goto ack; } - _proto("Rx DATA Jumbo %%%u", serial); - goto next_subpacket; - } + if (flags & RXRPC_REQUEST_ACK && !ack) { + ack = RXRPC_ACK_REQUESTED; + ack_serial = serial; + } - if (queued && flags & RXRPC_LAST_PACKET && !ack) { - ack = RXRPC_ACK_DELAY; - ack_serial = serial; + /* Queue the packet. We use a couple of memory barriers here as need + * to make sure that rx_top is perceived to be set after the buffer + * pointer and that the buffer pointer is set after the annotation and + * the skb data. + * + * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() + * and also rxrpc_fill_out_ack(). + */ + rxrpc_get_skb(skb, rxrpc_skb_rx_got); + call->rxtx_annotations[ix] = annotation; + smp_wmb(); + call->rxtx_buffer[ix] = skb; + if (after(seq, call->rx_top)) { + smp_store_release(&call->rx_top, seq); + } else if (before(seq, call->rx_top)) { + /* Send an immediate ACK if we fill in a hole */ + if (!ack) { + ack = RXRPC_ACK_DELAY; + ack_serial = serial; + } + immediate_ack = true; + } + + if (terminal) { + /* From this point on, we're not allowed to touch the + * packet any longer as its ref now belongs to the Rx + * ring. + */ + skb = NULL; + } + + if (last) { + set_bit(RXRPC_CALL_RX_LAST, &call->flags); + if (!ack) { + ack = RXRPC_ACK_DELAY; + ack_serial = serial; + } + trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); + } else { + trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); + } + + if (after_eq(seq, call->rx_expect_next)) { + if (after(seq, call->rx_expect_next)) { + _net("OOS %u > %u", seq, call->rx_expect_next); + ack = RXRPC_ACK_OUT_OF_SEQUENCE; + ack_serial = serial; + } + call->rx_expect_next = seq + 1; + } } ack: @@ -612,7 +609,7 @@ ack: false, true, rxrpc_propose_ack_input_data); - if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { + if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { trace_rxrpc_notify_socket(call->debug_id, serial); rxrpc_notify_socket(call); } diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 9a7e1bc9791d..e49eacfaf4d6 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) struct sk_buff *skb; rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; - u8 flags; + bool last = false; + u8 subpacket; int ix; _enter("%d", call->debug_id); @@ -191,10 +192,13 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) skb = call->rxtx_buffer[ix]; rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); sp = rxrpc_skb(skb); - flags = sp->hdr.flags; - serial = sp->hdr.serial; - if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; + + subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; + serial = sp->hdr.serial + subpacket; + + if (subpacket == sp->nr_subpackets - 1 && + sp->rx_flags & RXRPC_SKB_INCL_LAST) + last = true; call->rxtx_buffer[ix] = NULL; call->rxtx_annotations[ix] = 0; @@ -203,9 +207,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) rxrpc_free_skb(skb, rxrpc_skb_rx_freed); - _debug("%u,%u,%02x", hard_ack, top, flags); trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); - if (flags & RXRPC_LAST_PACKET) { + if (last) { rxrpc_end_rx_phase(call, serial); } else { /* Check to see if there's an ACK that needs sending. */ @@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp = rxrpc_skb(skb); rxrpc_seq_t seq = sp->hdr.seq; u16 cksum = sp->hdr.cksum; + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; _enter(""); /* For all but the head jumbo subpacket, the security checksum is in a * jumbo header immediately prior to the data. */ - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { + if (subpacket > 0) { __be16 tmp; if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) BUG(); cksum = ntohs(tmp); - seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; + seq += subpacket; } return call->conn->security->verify_packet(call, skb, offset, len, @@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb, u8 *_annotation, unsigned int *_offset, unsigned int *_len) { + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int len; int ret; u8 annotation = *_annotation; + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; /* Locate the subpacket */ + offset += subpacket * RXRPC_JUMBO_SUBPKTLEN; len = skb->len - offset; - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { - offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * - RXRPC_JUMBO_SUBPKTLEN); - len = (annotation & RXRPC_RX_ANNO_JLAST) ? - skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; - } + if (subpacket < sp->nr_subpackets - 1) + len = RXRPC_JUMBO_DATALEN; if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { ret = rxrpc_verify_packet(call, skb, annotation, offset, len); @@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, { struct rxrpc_skb_priv *sp; struct sk_buff *skb; + rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top, seq; size_t remain; bool last; @@ -339,9 +343,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, rxrpc_see_skb(skb, rxrpc_skb_rx_seen); sp = rxrpc_skb(skb); - if (!(flags & MSG_PEEK)) + if (!(flags & MSG_PEEK)) { + serial = sp->hdr.serial; + serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; trace_rxrpc_receive(call, rxrpc_receive_front, - sp->hdr.serial, seq); + serial, seq); + } if (msg) sock_recv_timestamp(msg, sock->sk, skb); From 4858e40303fba12e7506bf4354f20fdf550b3cd5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 19 Aug 2019 09:25:36 +0100 Subject: [PATCH 07/58] rxrpc: Pass the input handler's data skb reference to the Rx ring Pass the reference held on a DATA skb in the rxrpc input handler into the Rx ring rather than getting an additional ref for this and then dropping the original ref at the end. Signed-off-by: David Howells --- net/rxrpc/input.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 35b1a9368d80..140cede77655 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -422,7 +422,8 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, } /* - * Process a DATA packet, adding the packet to the Rx ring. + * Process a DATA packet, adding the packet to the Rx ring. The caller's + * packet ref must be passed on or discarded. */ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) { @@ -441,8 +442,10 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); state = READ_ONCE(call->state); - if (state >= RXRPC_CALL_COMPLETE) + if (state >= RXRPC_CALL_COMPLETE) { + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); return; + } if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { unsigned long timo = READ_ONCE(call->next_req_timo); @@ -555,7 +558,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() * and also rxrpc_fill_out_ack(). */ - rxrpc_get_skb(skb, rxrpc_skb_rx_got); + if (!terminal) + rxrpc_get_skb(skb, rxrpc_skb_rx_got); call->rxtx_annotations[ix] = annotation; smp_wmb(); call->rxtx_buffer[ix] = skb; @@ -616,6 +620,7 @@ ack: unlock: spin_unlock(&call->input_lock); + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); _leave(" [queued]"); } @@ -1024,7 +1029,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_DATA: rxrpc_input_data(call, skb); - break; + goto no_free; case RXRPC_PACKET_TYPE_ACK: rxrpc_input_ack(call, skb); @@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, break; } + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); +no_free: _leave(""); } @@ -1375,8 +1382,11 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) mutex_unlock(&call->user_mutex); } + /* Process a call packet; this either discards or passes on the ref + * elsewhere. + */ rxrpc_input_call_packet(call, skb); - goto discard; + goto out; discard: rxrpc_free_skb(skb, rxrpc_skb_rx_freed); From a641fd00d05a5ae38c5a3d50d4da10283b15287b Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 19 Aug 2019 09:25:37 +0100 Subject: [PATCH 08/58] rxrpc: Abstract out rxtx ring cleanup Abstract out rxtx ring cleanup into its own function from its two callers. This makes it easier to apply the same changes to both. Signed-off-by: David Howells --- net/rxrpc/call_object.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 217b12be9e08..c9ab2da957fe 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -421,6 +421,21 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) trace_rxrpc_call(call, op, n, here, NULL); } +/* + * Clean up the RxTx skb ring. + */ +static void rxrpc_cleanup_ring(struct rxrpc_call *call) +{ + int i; + + for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { + rxrpc_free_skb(call->rxtx_buffer[i], + (call->tx_phase ? rxrpc_skb_tx_cleaned : + rxrpc_skb_rx_cleaned)); + call->rxtx_buffer[i] = NULL; + } +} + /* * Detach a call from its owning socket. */ @@ -429,7 +444,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) const void *here = __builtin_return_address(0); struct rxrpc_connection *conn = call->conn; bool put = false; - int i; _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); @@ -479,13 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) if (conn) rxrpc_disconnect_call(call); - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - call->rxtx_buffer[i] = NULL; - } - + rxrpc_cleanup_ring(call); _leave(""); } @@ -568,8 +576,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) */ void rxrpc_cleanup_call(struct rxrpc_call *call) { - int i; - _net("DESTROY CALL %d", call->debug_id); memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); @@ -580,12 +586,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); ASSERTCMP(call->conn, ==, NULL); - /* Clean up the Rx/Tx buffer */ - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); - + rxrpc_cleanup_ring(call); rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); call_rcu(&call->rcu, rxrpc_rcu_destroy_call); From b311e68420aa52098591988d0d6868b0b7463c0f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 19 Aug 2019 09:25:37 +0100 Subject: [PATCH 09/58] rxrpc: Add a private skb flag to indicate transmission-phase skbs Add a flag in the private data on an skbuff to indicate that this is a transmission-phase buffer rather than a receive-phase buffer. Signed-off-by: David Howells --- net/rxrpc/ar-internal.h | 1 + net/rxrpc/sendmsg.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 20d7907a5bc6..63d3a91ce5e9 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -188,6 +188,7 @@ struct rxrpc_skb_priv { u8 nr_subpackets; /* Number of subpackets */ u8 rx_flags; /* Received packet flags */ #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ +#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */ union { int remain; /* amount of space remaining for next write */ diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index bae14438f869..472dc3b7d91f 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -336,6 +336,8 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, if (!skb) goto maybe_error; + sp = rxrpc_skb(skb); + sp->rx_flags |= RXRPC_SKB_TX_BUFFER; rxrpc_new_skb(skb, rxrpc_skb_tx_new); _debug("ALLOC SEND %p", skb); @@ -346,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, skb_reserve(skb, call->conn->security_size); skb->len += call->conn->security_size; - sp = rxrpc_skb(skb); sp->remain = chunk; if (sp->remain > skb_tailroom(skb)) sp->remain = skb_tailroom(skb); From 987db9f7cd1e77e611b770a569068c43949aa6fd Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 19 Aug 2019 09:25:38 +0100 Subject: [PATCH 10/58] rxrpc: Use the tx-phase skb flag to simplify tracing Use the previously-added transmit-phase skbuff private flag to simplify the socket buffer tracing a bit. Which phase the skbuff comes from can now be divined from the skb rather than having to be guessed from the call state. We can also reduce the number of rxrpc_skb_trace values by eliminating the difference between Tx and Rx in the symbols. Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 51 ++++++++++++++++-------------------- net/rxrpc/ar-internal.h | 1 + net/rxrpc/call_event.c | 8 +++--- net/rxrpc/call_object.c | 6 ++--- net/rxrpc/conn_event.c | 6 ++--- net/rxrpc/input.c | 22 ++++++++-------- net/rxrpc/local_event.c | 4 +-- net/rxrpc/output.c | 6 ++--- net/rxrpc/peer_event.c | 10 +++---- net/rxrpc/recvmsg.c | 6 ++--- net/rxrpc/sendmsg.c | 10 +++---- net/rxrpc/skbuff.c | 15 ++++++----- 12 files changed, 69 insertions(+), 76 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index fa06b528c73c..e2356c51883b 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -23,20 +23,15 @@ #define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY enum rxrpc_skb_trace { - rxrpc_skb_rx_cleaned, - rxrpc_skb_rx_freed, - rxrpc_skb_rx_got, - rxrpc_skb_rx_lost, - rxrpc_skb_rx_purged, - rxrpc_skb_rx_received, - rxrpc_skb_rx_rotated, - rxrpc_skb_rx_seen, - rxrpc_skb_tx_cleaned, - rxrpc_skb_tx_freed, - rxrpc_skb_tx_got, - rxrpc_skb_tx_new, - rxrpc_skb_tx_rotated, - rxrpc_skb_tx_seen, + rxrpc_skb_cleaned, + rxrpc_skb_freed, + rxrpc_skb_got, + rxrpc_skb_lost, + rxrpc_skb_new, + rxrpc_skb_purged, + rxrpc_skb_received, + rxrpc_skb_rotated, + rxrpc_skb_seen, }; enum rxrpc_local_trace { @@ -228,20 +223,15 @@ enum rxrpc_tx_point { * Declare tracing information enums and their string mappings for display. */ #define rxrpc_skb_traces \ - EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ - EM(rxrpc_skb_rx_freed, "Rx FRE") \ - EM(rxrpc_skb_rx_got, "Rx GOT") \ - EM(rxrpc_skb_rx_lost, "Rx *L*") \ - EM(rxrpc_skb_rx_purged, "Rx PUR") \ - EM(rxrpc_skb_rx_received, "Rx RCV") \ - EM(rxrpc_skb_rx_rotated, "Rx ROT") \ - EM(rxrpc_skb_rx_seen, "Rx SEE") \ - EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ - EM(rxrpc_skb_tx_freed, "Tx FRE") \ - EM(rxrpc_skb_tx_got, "Tx GOT") \ - EM(rxrpc_skb_tx_new, "Tx NEW") \ - EM(rxrpc_skb_tx_rotated, "Tx ROT") \ - E_(rxrpc_skb_tx_seen, "Tx SEE") + EM(rxrpc_skb_cleaned, "CLN") \ + EM(rxrpc_skb_freed, "FRE") \ + EM(rxrpc_skb_got, "GOT") \ + EM(rxrpc_skb_lost, "*L*") \ + EM(rxrpc_skb_new, "NEW") \ + EM(rxrpc_skb_purged, "PUR") \ + EM(rxrpc_skb_received, "RCV") \ + EM(rxrpc_skb_rotated, "ROT") \ + E_(rxrpc_skb_seen, "SEE") #define rxrpc_local_traces \ EM(rxrpc_local_got, "GOT") \ @@ -650,6 +640,7 @@ TRACE_EVENT(rxrpc_skb, TP_STRUCT__entry( __field(struct sk_buff *, skb ) __field(enum rxrpc_skb_trace, op ) + __field(u8, flags ) __field(int, usage ) __field(int, mod_count ) __field(const void *, where ) @@ -657,14 +648,16 @@ TRACE_EVENT(rxrpc_skb, TP_fast_assign( __entry->skb = skb; + __entry->flags = rxrpc_skb(skb)->rx_flags; __entry->op = op; __entry->usage = usage; __entry->mod_count = mod_count; __entry->where = where; ), - TP_printk("s=%p %s u=%d m=%d p=%pSR", + TP_printk("s=%p %cx %s u=%d m=%d p=%pSR", __entry->skb, + __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R', __print_symbolic(__entry->op, rxrpc_skb_traces), __entry->usage, __entry->mod_count, diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 63d3a91ce5e9..2d5294f3e62f 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -185,6 +185,7 @@ struct rxrpc_host_header { * - max 48 bytes (struct sk_buff::cb) */ struct rxrpc_skb_priv { + atomic_t nr_ring_pins; /* Number of rxtx ring pins */ u8 nr_subpackets; /* Number of subpackets */ u8 rx_flags; /* Received packet flags */ #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index c767679bfa5d..cedbbb3a7c2e 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) continue; skb = call->rxtx_buffer[ix]; - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); if (anno_type == RXRPC_TX_ANNO_UNACK) { if (ktime_after(skb->tstamp, max_age)) { @@ -255,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) continue; skb = call->rxtx_buffer[ix]; - rxrpc_get_skb(skb, rxrpc_skb_tx_got); + rxrpc_get_skb(skb, rxrpc_skb_got); spin_unlock_bh(&call->lock); if (rxrpc_send_data_packet(call, skb, true) < 0) { - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); return; } if (rxrpc_is_client_call(call)) rxrpc_expose_client_call(call); - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); spin_lock_bh(&call->lock); /* We need to clear the retransmit state, but there are two diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index c9ab2da957fe..014548c259ce 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -429,9 +429,7 @@ static void rxrpc_cleanup_ring(struct rxrpc_call *call) int i; for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { - rxrpc_free_skb(call->rxtx_buffer[i], - (call->tx_phase ? rxrpc_skb_tx_cleaned : - rxrpc_skb_rx_cleaned)); + rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); call->rxtx_buffer[i] = NULL; } } @@ -587,7 +585,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) ASSERTCMP(call->conn, ==, NULL); rxrpc_cleanup_ring(call); - rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); + rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); call_rcu(&call->rcu, rxrpc_rcu_destroy_call); } diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index df6624c140be..a1ceef4f5cd0 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work) /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); ret = rxrpc_process_event(conn, skb, &abort_code); switch (ret) { case -EPROTO: @@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work) goto requeue_and_leave; case -ECONNABORTED: default: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); break; } } @@ -501,6 +501,6 @@ requeue_and_leave: protocol_error: if (rxrpc_abort_connection(conn, ret, abort_code) < 0) goto requeue_and_leave; - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); goto out; } diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 140cede77655..31090bdf1fae 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -233,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; annotation = call->rxtx_annotations[ix]; - rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); + rxrpc_see_skb(skb, rxrpc_skb_rotated); call->rxtx_buffer[ix] = NULL; call->rxtx_annotations[ix] = 0; skb->next = list; @@ -258,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, skb = list; list = skb->next; skb_mark_not_on_list(skb); - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } return rot_last; @@ -443,7 +443,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) state = READ_ONCE(call->state); if (state >= RXRPC_CALL_COMPLETE) { - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); return; } @@ -559,7 +559,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) * and also rxrpc_fill_out_ack(). */ if (!terminal) - rxrpc_get_skb(skb, rxrpc_skb_rx_got); + rxrpc_get_skb(skb, rxrpc_skb_got); call->rxtx_annotations[ix] = annotation; smp_wmb(); call->rxtx_buffer[ix] = skb; @@ -620,7 +620,7 @@ ack: unlock: spin_unlock(&call->input_lock); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" [queued]"); } @@ -1056,7 +1056,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, break; } - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); no_free: _leave(""); } @@ -1119,7 +1119,7 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, skb_queue_tail(&local->event_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } } @@ -1134,7 +1134,7 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) skb_queue_tail(&local->reject_queue, skb); rxrpc_queue_local(local); } else { - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } } @@ -1198,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) if (skb->tstamp == 0) skb->tstamp = ktime_get_real(); - rxrpc_new_skb(skb, rxrpc_skb_rx_received); + rxrpc_new_skb(skb, rxrpc_skb_received); skb_pull(skb, sizeof(struct udphdr)); @@ -1215,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) static int lose; if ((lose++ & 7) == 7) { trace_rxrpc_rx_lose(sp); - rxrpc_free_skb(skb, rxrpc_skb_rx_lost); + rxrpc_free_skb(skb, rxrpc_skb_lost); return 0; } } @@ -1389,7 +1389,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) goto out; discard: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); out: trace_rxrpc_rx_done(0, 0); return 0; diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index e93a78f7c05e..3ce6d628cd75 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) if (skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); _debug("{%d},{%u}", local->debug_id, sp->hdr.type); switch (sp->hdr.type) { @@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local) break; } - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } _leave(""); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 369e516c4bdf..935bb60fff56 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -565,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) memset(&whdr, 0, sizeof(whdr)); while ((skb = skb_dequeue(&local->reject_queue))) { - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); sp = rxrpc_skb(skb); switch (skb->mark) { @@ -581,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) ioc = 2; break; default: - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); continue; } @@ -606,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local) rxrpc_tx_point_reject); } - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); } _leave(""); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 7666ec72d37e..c97ebdc043e4 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk) _leave("UDP socket errqueue empty"); return; } - rxrpc_new_skb(skb, rxrpc_skb_rx_received); + rxrpc_new_skb(skb, rxrpc_skb_received); serr = SKB_EXT_ERR(skb); if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { _leave("UDP empty message"); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); return; } @@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk) peer = NULL; if (!peer) { rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" [no peer]"); return; } @@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk) serr->ee.ee_code == ICMP_FRAG_NEEDED)) { rxrpc_adjust_mtu(peer, serr); rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_put_peer(peer); _leave(" [MTU update]"); return; @@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk) rxrpc_store_error(peer, serr); rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); rxrpc_put_peer(peer); _leave(""); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index e49eacfaf4d6..3b0becb12041 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -190,7 +190,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) hard_ack++; ix = hard_ack & RXRPC_RXTX_BUFF_MASK; skb = call->rxtx_buffer[ix]; - rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); + rxrpc_see_skb(skb, rxrpc_skb_rotated); sp = rxrpc_skb(skb); subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; @@ -205,7 +205,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) /* Barrier against rxrpc_input_data(). */ smp_store_release(&call->rx_hard_ack, hard_ack); - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); if (last) { @@ -340,7 +340,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, break; } smp_rmb(); - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); sp = rxrpc_skb(skb); if (!(flags & MSG_PEEK)) { diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 472dc3b7d91f..6a1547b270fe 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, skb->tstamp = ktime_get_real(); ix = seq & RXRPC_RXTX_BUFF_MASK; - rxrpc_get_skb(skb, rxrpc_skb_tx_got); + rxrpc_get_skb(skb, rxrpc_skb_got); call->rxtx_annotations[ix] = annotation; smp_wmb(); call->rxtx_buffer[ix] = skb; @@ -248,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, } out: - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" = %d", ret); return ret; } @@ -289,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, skb = call->tx_pending; call->tx_pending = NULL; - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); + rxrpc_see_skb(skb, rxrpc_skb_seen); copied = 0; do { @@ -338,7 +338,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, sp = rxrpc_skb(skb); sp->rx_flags |= RXRPC_SKB_TX_BUFFER; - rxrpc_new_skb(skb, rxrpc_skb_tx_new); + rxrpc_new_skb(skb, rxrpc_skb_new); _debug("ALLOC SEND %p", skb); @@ -440,7 +440,7 @@ out: return ret; call_terminated: - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); + rxrpc_free_skb(skb, rxrpc_skb_freed); _leave(" = %d", call->error); return call->error; diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 9ad5045b7c2f..8e6f45f84b9b 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -14,7 +14,8 @@ #include #include "ar-internal.h" -#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) +#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER) +#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) /* * Note the allocation or reception of a socket buffer. @@ -22,7 +23,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(select_skb_count(op)); + int n = atomic_inc_return(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); } @@ -33,7 +34,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); if (skb) { - int n = atomic_read(select_skb_count(op)); + int n = atomic_read(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); } } @@ -44,7 +45,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); - int n = atomic_inc_return(select_skb_count(op)); + int n = atomic_inc_return(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); skb_get(skb); } @@ -58,7 +59,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) if (skb) { int n; CHECK_SLAB_OKAY(&skb->users); - n = atomic_dec_return(select_skb_count(op)); + n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); kfree_skb(skb); } @@ -72,8 +73,8 @@ void rxrpc_purge_queue(struct sk_buff_head *list) const void *here = __builtin_return_address(0); struct sk_buff *skb; while ((skb = skb_dequeue((list))) != NULL) { - int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); - trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, + int n = atomic_dec_return(select_skb_count(skb)); + trace_rxrpc_skb(skb, rxrpc_skb_purged, refcount_read(&skb->users), n, here); kfree_skb(skb); } From d0d5c0cd1e711c98703f3544c1e6fc1372898de5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Aug 2019 10:13:46 +0100 Subject: [PATCH 11/58] rxrpc: Use skb_unshare() rather than skb_cow_data() The in-place decryption routines in AF_RXRPC's rxkad security module currently call skb_cow_data() to make sure the data isn't shared and that the skb can be written over. This has a problem, however, as the softirq handler may be still holding a ref or the Rx ring may be holding multiple refs when skb_cow_data() is called in rxkad_verify_packet() - and so skb_shared() returns true and __pskb_pull_tail() dislikes that. If this occurs, something like the following report will be generated. kernel BUG at net/core/skbuff.c:1463! ... RIP: 0010:pskb_expand_head+0x253/0x2b0 ... Call Trace: __pskb_pull_tail+0x49/0x460 skb_cow_data+0x6f/0x300 rxkad_verify_packet+0x18b/0xb10 [rxrpc] rxrpc_recvmsg_data.isra.11+0x4a8/0xa10 [rxrpc] rxrpc_kernel_recv_data+0x126/0x240 [rxrpc] afs_extract_data+0x51/0x2d0 [kafs] afs_deliver_fs_fetch_data+0x188/0x400 [kafs] afs_deliver_to_call+0xac/0x430 [kafs] afs_wait_for_call_to_complete+0x22f/0x3d0 [kafs] afs_make_call+0x282/0x3f0 [kafs] afs_fs_fetch_data+0x164/0x300 [kafs] afs_fetch_data+0x54/0x130 [kafs] afs_readpages+0x20d/0x340 [kafs] read_pages+0x66/0x180 __do_page_cache_readahead+0x188/0x1a0 ondemand_readahead+0x17d/0x2e0 generic_file_read_iter+0x740/0xc10 __vfs_read+0x145/0x1a0 vfs_read+0x8c/0x140 ksys_read+0x4a/0xb0 do_syscall_64+0x43/0xf0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fix this by using skb_unshare() instead in the input path for DATA packets that have a security index != 0. Non-DATA packets don't need in-place encryption and neither do unencrypted DATA packets. Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code") Reported-by: Julian Wollrath Signed-off-by: David Howells --- include/trace/events/rxrpc.h | 12 ++++++++---- net/rxrpc/ar-internal.h | 1 + net/rxrpc/input.c | 18 ++++++++++++++++++ net/rxrpc/rxkad.c | 32 +++++++++----------------------- net/rxrpc/skbuff.c | 25 ++++++++++++++++++++----- 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index e2356c51883b..a13a62db3565 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -32,6 +32,8 @@ enum rxrpc_skb_trace { rxrpc_skb_received, rxrpc_skb_rotated, rxrpc_skb_seen, + rxrpc_skb_unshared, + rxrpc_skb_unshared_nomem, }; enum rxrpc_local_trace { @@ -231,7 +233,9 @@ enum rxrpc_tx_point { EM(rxrpc_skb_purged, "PUR") \ EM(rxrpc_skb_received, "RCV") \ EM(rxrpc_skb_rotated, "ROT") \ - E_(rxrpc_skb_seen, "SEE") + EM(rxrpc_skb_seen, "SEE") \ + EM(rxrpc_skb_unshared, "UNS") \ + E_(rxrpc_skb_unshared_nomem, "US0") #define rxrpc_local_traces \ EM(rxrpc_local_got, "GOT") \ @@ -633,9 +637,9 @@ TRACE_EVENT(rxrpc_call, TRACE_EVENT(rxrpc_skb, TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, - int usage, int mod_count, const void *where), + int usage, int mod_count, u8 flags, const void *where), - TP_ARGS(skb, op, usage, mod_count, where), + TP_ARGS(skb, op, usage, mod_count, flags, where), TP_STRUCT__entry( __field(struct sk_buff *, skb ) @@ -648,7 +652,7 @@ TRACE_EVENT(rxrpc_skb, TP_fast_assign( __entry->skb = skb; - __entry->flags = rxrpc_skb(skb)->rx_flags; + __entry->flags = flags; __entry->op = op; __entry->usage = usage; __entry->mod_count = mod_count; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 2d5294f3e62f..852e58781fda 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1110,6 +1110,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); void rxrpc_packet_destructor(struct sk_buff *); void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); +void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 31090bdf1fae..d122c53c8697 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1249,6 +1249,24 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) goto bad_message; if (!rxrpc_validate_data(skb)) goto bad_message; + + /* Unshare the packet so that it can be modified for in-place + * decryption. + */ + if (sp->hdr.securityIndex != 0) { + struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); + if (!nskb) { + rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); + goto out; + } + + if (nskb != skb) { + rxrpc_eaten_skb(skb, rxrpc_skb_received); + rxrpc_new_skb(skb, rxrpc_skb_unshared); + skb = nskb; + sp = rxrpc_skb(skb); + } + } break; case RXRPC_PACKET_TYPE_CHALLENGE: diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index ae8cd8926456..c60c520fde7c 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct rxrpc_skb_priv *sp; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; unsigned int len; u16 check; - int nsg; int err; sp = rxrpc_skb(skb); @@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, crypto_skcipher_encrypt(req); /* we want to encrypt the skbuff in-place */ - nsg = skb_cow_data(skb, 0, &trailer); - err = -ENOMEM; - if (nsg < 0 || nsg > 16) + err = -EMSGSIZE; + if (skb_shinfo(skb)->nr_frags > 16) goto out; len = data_size + call->conn->size_align - 1; len &= ~(call->conn->size_align - 1); - sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); err = skb_to_sgvec(skb, sg, 0, len); if (unlikely(err < 0)) goto out; @@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, struct rxkad_level1_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist sg[16]; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; - int nsg, ret; + int ret; _enter(""); @@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0 || nsg > 16) - goto nomem; - - sg_init_table(sg, nsg); + sg_init_table(sg, ARRAY_SIZE(sg)); ret = skb_to_sgvec(skb, sg, offset, 8); if (unlikely(ret < 0)) return ret; @@ -388,10 +380,6 @@ protocol_error: if (aborted) rxrpc_send_abort_packet(call); return -EPROTO; - -nomem: - _leave(" = -ENOMEM"); - return -ENOMEM; } /* @@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, struct rxkad_level2_hdr sechdr; struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; - struct sk_buff *trailer; bool aborted; u32 data_size, buf; u16 check; @@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ - nsg = skb_cow_data(skb, 0, &trailer); - if (nsg < 0) - goto nomem; - sg = _sg; - if (unlikely(nsg > 4)) { + nsg = skb_shinfo(skb)->nr_frags; + if (nsg <= 4) { + nsg = 4; + } else { sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); if (!sg) goto nomem; diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index 8e6f45f84b9b..0348d2bf6f7d 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c @@ -24,7 +24,8 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); } /* @@ -35,7 +36,8 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) const void *here = __builtin_return_address(0); if (skb) { int n = atomic_read(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); } } @@ -46,10 +48,21 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) { const void *here = __builtin_return_address(0); int n = atomic_inc_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); skb_get(skb); } +/* + * Note the dropping of a ref on a socket buffer by the core. + */ +void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) +{ + const void *here = __builtin_return_address(0); + int n = atomic_inc_return(&rxrpc_n_rx_skbs); + trace_rxrpc_skb(skb, op, 0, n, 0, here); +} + /* * Note the destruction of a socket buffer. */ @@ -60,7 +73,8 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) int n; CHECK_SLAB_OKAY(&skb->users); n = atomic_dec_return(select_skb_count(skb)); - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); kfree_skb(skb); } } @@ -75,7 +89,8 @@ void rxrpc_purge_queue(struct sk_buff_head *list) while ((skb = skb_dequeue((list))) != NULL) { int n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, rxrpc_skb_purged, - refcount_read(&skb->users), n, here); + refcount_read(&skb->users), n, + rxrpc_skb(skb)->rx_flags, here); kfree_skb(skb); } } From 3cf2f450fff304be9cf4868bf0df17f253bc5b1c Mon Sep 17 00:00:00 2001 From: Todd Seidelmann Date: Wed, 21 Aug 2019 11:47:53 -0400 Subject: [PATCH 12/58] netfilter: xt_physdev: Fix spurious error message in physdev_mt_check Simplify the check in physdev_mt_check() to emit an error message only when passed an invalid chain (ie, NF_INET_LOCAL_OUT). This avoids cluttering up the log with errors against valid rules. For large/heavily modified rulesets, current behavior can quickly overwhelm the ring buffer, because this function gets called on every change, regardless of the rule that was changed. Signed-off-by: Todd Seidelmann Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_physdev.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index ead7c6022208..b92b22ce8abd 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c @@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || info->invert & XT_PHYSDEV_OP_BRIDGED) && - par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | - (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { + par->hook_mask & (1 << NF_INET_LOCAL_OUT)) { pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); - if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) - return -EINVAL; + return -EINVAL; } if (!brnf_probed) { From 3a069024d371125227de3ac8fa74223fcf473520 Mon Sep 17 00:00:00 2001 From: Thomas Jarosch Date: Wed, 21 Aug 2019 16:14:28 +0200 Subject: [PATCH 13/58] netfilter: nf_conntrack_ftp: Fix debug output The find_pattern() debug output was printing the 'skip' character. This can be a NULL-byte and messes up further pr_debug() output. Output without the fix: kernel: nf_conntrack_ftp: Pattern matches! kernel: nf_conntrack_ftp: Skipped up to `<7>nf_conntrack_ftp: find_pattern `PORT': dlen = 8 kernel: nf_conntrack_ftp: find_pattern `EPRT': dlen = 8 Output with the fix: kernel: nf_conntrack_ftp: Pattern matches! kernel: nf_conntrack_ftp: Skipped up to 0x0 delimiter! kernel: nf_conntrack_ftp: Match succeeded! kernel: nf_conntrack_ftp: conntrack_ftp: match `172,17,0,100,200,207' (20 bytes at 4150681645) kernel: nf_conntrack_ftp: find_pattern `PORT': dlen = 8 Signed-off-by: Thomas Jarosch Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_ftp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 0ecb3e289ef2..8d96738b7dfd 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen, i++; } - pr_debug("Skipped up to `%c'!\n", skip); + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip); *numoff = i; *numlen = getnum(data + i, dlen - i, cmd, term, numoff); From 478553fd1b6f819390b64a2e13ac756c4d1a2836 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 27 Aug 2019 13:24:52 +0200 Subject: [PATCH 14/58] netfilter: conntrack: make sysctls per-namespace again When I merged the extension sysctl tables with the main one I forgot to reset them on netns creation. They currently read/write init_net settings. Fixes: d912dec12428 ("netfilter: conntrack: merge acct and helper sysctl table with main one") Fixes: cb2833ed0044 ("netfilter: conntrack: merge ecache and timestamp sysctl tables with main one") Reported-by: Shmulik Ladkani Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_standalone.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index e0d392cb3075..0006503d2da9 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1037,8 +1037,13 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; + table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; + table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; #ifdef CONFIG_NF_CONNTRACK_EVENTS table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP + table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; #endif table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; From 981471bd3abf4d572097645d765391533aac327d Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 25 Aug 2019 10:01:32 -0700 Subject: [PATCH 15/58] net_sched: fix a NULL pointer deref in ipt action The net pointer in struct xt_tgdtor_param is not explicitly initialized therefore is still NULL when dereferencing it. So we have to find a way to pass the correct net pointer to ipt_destroy_target(). The best way I find is just saving the net pointer inside the per netns struct tcf_idrinfo, which could make this patch smaller. Fixes: 0c66dc1ea3f0 ("netfilter: conntrack: register hooks in netns when needed by ruleset") Reported-and-tested-by: itugrok@yahoo.com Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/act_api.h | 4 +++- net/sched/act_bpf.c | 2 +- net/sched/act_connmark.c | 2 +- net/sched/act_csum.c | 2 +- net/sched/act_ct.c | 2 +- net/sched/act_ctinfo.c | 2 +- net/sched/act_gact.c | 2 +- net/sched/act_ife.c | 2 +- net/sched/act_ipt.c | 11 ++++++----- net/sched/act_mirred.c | 2 +- net/sched/act_mpls.c | 2 +- net/sched/act_nat.c | 2 +- net/sched/act_pedit.c | 2 +- net/sched/act_police.c | 2 +- net/sched/act_sample.c | 2 +- net/sched/act_simple.c | 2 +- net/sched/act_skbedit.c | 2 +- net/sched/act_skbmod.c | 2 +- net/sched/act_tunnel_key.c | 2 +- net/sched/act_vlan.c | 2 +- 20 files changed, 27 insertions(+), 24 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index c61a1bf4e3de..3a1a72990fce 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -15,6 +15,7 @@ struct tcf_idrinfo { struct mutex lock; struct idr action_idr; + struct net *net; }; struct tc_action_ops; @@ -108,7 +109,7 @@ struct tc_action_net { }; static inline -int tc_action_net_init(struct tc_action_net *tn, +int tc_action_net_init(struct net *net, struct tc_action_net *tn, const struct tc_action_ops *ops) { int err = 0; @@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn, if (!tn->idrinfo) return -ENOMEM; tn->ops = ops; + tn->idrinfo->net = net; mutex_init(&tn->idrinfo->lock); idr_init(&tn->idrinfo->action_idr); return err; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index fd1f7e799e23..04b7bd4ec751 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, bpf_net_id); - return tc_action_net_init(tn, &act_bpf_ops); + return tc_action_net_init(net, tn, &act_bpf_ops); } static void __net_exit bpf_exit_net(struct list_head *net_list) diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 32ac04d77a45..2b43cacf82af 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, connmark_net_id); - return tc_action_net_init(tn, &act_connmark_ops); + return tc_action_net_init(net, tn, &act_connmark_ops); } static void __net_exit connmark_exit_net(struct list_head *net_list) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 9b9288267a54..d3cfad88dc3a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, csum_net_id); - return tc_action_net_init(tn, &act_csum_ops); + return tc_action_net_init(net, tn, &act_csum_ops); } static void __net_exit csum_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 33a1a7406e87..cdd6f3818097 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -939,7 +939,7 @@ static __net_init int ct_init_net(struct net *net) tn->labels = true; } - return tc_action_net_init(&tn->tn, &act_ct_ops); + return tc_action_net_init(net, &tn->tn, &act_ct_ops); } static void __net_exit ct_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 06ef74b74911..0dbcfd1dca7b 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -376,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ctinfo_net_id); - return tc_action_net_init(tn, &act_ctinfo_ops); + return tc_action_net_init(net, tn, &act_ctinfo_ops); } static void __net_exit ctinfo_exit_net(struct list_head *net_list) diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 8f0140c6ca58..324f1d1f6d47 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, gact_net_id); - return tc_action_net_init(tn, &act_gact_ops); + return tc_action_net_init(net, tn, &act_gact_ops); } static void __net_exit gact_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 92ee853d43e6..3a31e241c647 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ife_net_id); - return tc_action_net_init(tn, &act_ife_ops); + return tc_action_net_init(net, tn, &act_ife_ops); } static void __net_exit ife_exit_net(struct list_head *net_list) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index ce2c30a591d2..214a03d405cf 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t, return 0; } -static void ipt_destroy_target(struct xt_entry_target *t) +static void ipt_destroy_target(struct xt_entry_target *t, struct net *net) { struct xt_tgdtor_param par = { .target = t->u.kernel.target, .targinfo = t->data, .family = NFPROTO_IPV4, + .net = net, }; if (par.target->destroy != NULL) par.target->destroy(&par); @@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a) struct tcf_ipt *ipt = to_ipt(a); if (ipt->tcfi_t) { - ipt_destroy_target(ipt->tcfi_t); + ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net); kfree(ipt->tcfi_t); } kfree(ipt->tcfi_tname); @@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, spin_lock_bh(&ipt->tcf_lock); if (ret != ACT_P_CREATED) { - ipt_destroy_target(ipt->tcfi_t); + ipt_destroy_target(ipt->tcfi_t, net); kfree(ipt->tcfi_tname); kfree(ipt->tcfi_t); } @@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, ipt_net_id); - return tc_action_net_init(tn, &act_ipt_ops); + return tc_action_net_init(net, tn, &act_ipt_ops); } static void __net_exit ipt_exit_net(struct list_head *net_list) @@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, xt_net_id); - return tc_action_net_init(tn, &act_xt_ops); + return tc_action_net_init(net, tn, &act_xt_ops); } static void __net_exit xt_exit_net(struct list_head *net_list) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index be3f88dfc37e..9d1bf508075a 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -453,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, mirred_net_id); - return tc_action_net_init(tn, &act_mirred_ops); + return tc_action_net_init(net, tn, &act_mirred_ops); } static void __net_exit mirred_exit_net(struct list_head *net_list) diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 0f299e3b618c..e168df0e008a 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -375,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, mpls_net_id); - return tc_action_net_init(tn, &act_mpls_ops); + return tc_action_net_init(net, tn, &act_mpls_ops); } static void __net_exit mpls_exit_net(struct list_head *net_list) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 7b858c11b1b5..ea4c5359e7df 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, nat_net_id); - return tc_action_net_init(tn, &act_nat_ops); + return tc_action_net_init(net, tn, &act_nat_ops); } static void __net_exit nat_exit_net(struct list_head *net_list) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 17360c6faeaa..cdfaa79382a2 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, pedit_net_id); - return tc_action_net_init(tn, &act_pedit_ops); + return tc_action_net_init(net, tn, &act_pedit_ops); } static void __net_exit pedit_exit_net(struct list_head *net_list) diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 49cec3e64a4d..6315e0f8d26e 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, police_net_id); - return tc_action_net_init(tn, &act_police_ops); + return tc_action_net_init(net, tn, &act_police_ops); } static void __net_exit police_exit_net(struct list_head *net_list) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 595308d60133..7eff363f9f03 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -265,7 +265,7 @@ static __net_init int sample_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, sample_net_id); - return tc_action_net_init(tn, &act_sample_ops); + return tc_action_net_init(net, tn, &act_sample_ops); } static void __net_exit sample_exit_net(struct list_head *net_list) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 33aefa25b545..6120e56117ca 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, simp_net_id); - return tc_action_net_init(tn, &act_simp_ops); + return tc_action_net_init(net, tn, &act_simp_ops); } static void __net_exit simp_exit_net(struct list_head *net_list) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 37dced00b63d..6a8d3337c577 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -336,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbedit_net_id); - return tc_action_net_init(tn, &act_skbedit_ops); + return tc_action_net_init(net, tn, &act_skbedit_ops); } static void __net_exit skbedit_exit_net(struct list_head *net_list) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 7da3518e18ef..888437f97ba6 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); - return tc_action_net_init(tn, &act_skbmod_ops); + return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 6d0debdc9b97..2f83a79f76aa 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); - return tc_action_net_init(tn, &act_tunnel_key_ops); + return tc_action_net_init(net, tn, &act_tunnel_key_ops); } static void __net_exit tunnel_key_exit_net(struct list_head *net_list) diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index a3c9eea1ee8a..287a30bf8930 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, vlan_net_id); - return tc_action_net_init(tn, &act_vlan_ops); + return tc_action_net_init(net, tn, &act_vlan_ops); } static void __net_exit vlan_exit_net(struct list_head *net_list) From 94acaeb50ced653bfe2c4d8037c70b107af14124 Mon Sep 17 00:00:00 2001 From: Marco Hartmann Date: Wed, 21 Aug 2019 11:00:46 +0000 Subject: [PATCH 16/58] Add genphy_c45_config_aneg() function to phy-c45.c Commit 34786005eca3 ("net: phy: prevent PHYs w/o Clause 22 regs from calling genphy_config_aneg") introduced a check that aborts phy_config_aneg() if the phy is a C45 phy. This causes phy_state_machine() to call phy_error() so that the phy ends up in PHY_HALTED state. Instead of returning -EOPNOTSUPP, call genphy_c45_config_aneg() (analogous to the C22 case) so that the state machine can run correctly. genphy_c45_config_aneg() closely resembles mv3310_config_aneg() in drivers/net/phy/marvell10g.c, excluding vendor specific configurations for 1000BaseT. Fixes: 22b56e827093 ("net: phy: replace genphy_10g_driver with genphy_c45_driver") Signed-off-by: Marco Hartmann Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 26 ++++++++++++++++++++++++++ drivers/net/phy/phy.c | 2 +- include/linux/phy.h | 1 + 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 58bb25e4af10..7935593debb1 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -523,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_status); +/** + * genphy_c45_config_aneg - restart auto-negotiation or forced setup + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we force a configuration. + */ +int genphy_c45_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} +EXPORT_SYMBOL_GPL(genphy_c45_config_aneg); + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ef7aa738e0dc..6b0f89369b46 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev) * allowed to call genphy_config_aneg() */ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) - return -EOPNOTSUPP; + return genphy_c45_config_aneg(phydev); return genphy_config_aneg(phydev); } diff --git a/include/linux/phy.h b/include/linux/phy.h index 462b90b73f93..2fb9c8ffaf10 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); +int genphy_c45_config_aneg(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); From bcccb0a535bb99616e4b992568371efab1ab14e8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 25 Aug 2019 21:32:12 +0300 Subject: [PATCH 17/58] net: dsa: tag_8021q: Future-proof the reserved fields in the custom VID After witnessing the discussion in https://lkml.org/lkml/2019/8/14/151 w.r.t. ioctl extensibility, it became clear that such an issue might prevent that the 3 RSV bits inside the DSA 802.1Q tag might also suffer the same fate and be useless for further extension. So clearly specify that the reserved bits should currently be transmitted as zero and ignored on receive. The DSA tagger already does this (and has always did), and is the only known user so far (no Wireshark dissection plugin, etc). So there should be no incompatibility to speak of. Fixes: 0471dd429cea ("net: dsa: tag_8021q: Create a stable binary format") Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_8021q.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 6ebbd799c4eb..67a1bc635a7b 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -28,6 +28,7 @@ * * RSV - VID[9]: * To be used for further expansion of SWITCH_ID or for other purposes. + * Must be transmitted as zero and ignored on receive. * * SWITCH_ID - VID[8:6]: * Index of switch within DSA tree. Must be between 0 and @@ -35,6 +36,7 @@ * * RSV - VID[5:4]: * To be used for further expansion of PORT or for other purposes. + * Must be transmitted as zero and ignored on receive. * * PORT - VID[3:0]: * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. From 2c1644cf6d46a8267d79ed95cb9b563839346562 Mon Sep 17 00:00:00 2001 From: Feng Sun Date: Mon, 26 Aug 2019 14:46:04 +0800 Subject: [PATCH 18/58] net: fix skb use after free in netpoll After commit baeababb5b85d5c4e6c917efe2a1504179438d3b ("tun: return NET_XMIT_DROP for dropped packets"), when tun_net_xmit drop packets, it will free skb and return NET_XMIT_DROP, netpoll_send_skb_on_dev will run into following use after free cases: 1. retry netpoll_start_xmit with freed skb; 2. queue freed skb in npinfo->txq. queue_process will also run into use after free case. hit netpoll_send_skb_on_dev first case with following kernel log: [ 117.864773] kernel BUG at mm/slub.c:306! [ 117.864773] invalid opcode: 0000 [#1] SMP PTI [ 117.864774] CPU: 3 PID: 2627 Comm: loop_printmsg Kdump: loaded Tainted: P OE 5.3.0-050300rc5-generic #201908182231 [ 117.864775] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 117.864775] RIP: 0010:kmem_cache_free+0x28d/0x2b0 [ 117.864781] Call Trace: [ 117.864781] ? tun_net_xmit+0x21c/0x460 [ 117.864781] kfree_skbmem+0x4e/0x60 [ 117.864782] kfree_skb+0x3a/0xa0 [ 117.864782] tun_net_xmit+0x21c/0x460 [ 117.864782] netpoll_start_xmit+0x11d/0x1b0 [ 117.864788] netpoll_send_skb_on_dev+0x1b8/0x200 [ 117.864789] __br_forward+0x1b9/0x1e0 [bridge] [ 117.864789] ? skb_clone+0x53/0xd0 [ 117.864790] ? __skb_clone+0x2e/0x120 [ 117.864790] deliver_clone+0x37/0x50 [bridge] [ 117.864790] maybe_deliver+0x89/0xc0 [bridge] [ 117.864791] br_flood+0x6c/0x130 [bridge] [ 117.864791] br_dev_xmit+0x315/0x3c0 [bridge] [ 117.864792] netpoll_start_xmit+0x11d/0x1b0 [ 117.864792] netpoll_send_skb_on_dev+0x1b8/0x200 [ 117.864792] netpoll_send_udp+0x2c6/0x3e8 [ 117.864793] write_msg+0xd9/0xf0 [netconsole] [ 117.864793] console_unlock+0x386/0x4e0 [ 117.864793] vprintk_emit+0x17e/0x280 [ 117.864794] vprintk_default+0x29/0x50 [ 117.864794] vprintk_func+0x4c/0xbc [ 117.864794] printk+0x58/0x6f [ 117.864795] loop_fun+0x24/0x41 [printmsg_loop] [ 117.864795] kthread+0x104/0x140 [ 117.864795] ? 0xffffffffc05b1000 [ 117.864796] ? kthread_park+0x80/0x80 [ 117.864796] ret_from_fork+0x35/0x40 Signed-off-by: Feng Sun Signed-off-by: Xiaojun Zhao Signed-off-by: David S. Miller --- net/core/netpoll.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 2cf27da1baeb..849380a622ef 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work) txq = netdev_get_tx_queue(dev, q_index); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (netif_xmit_frozen_or_stopped(txq) || - netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { + !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { skb_queue_head(&npinfo->txq, skb); HARD_TX_UNLOCK(dev, txq); local_irq_restore(flags); @@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, HARD_TX_UNLOCK(dev, txq); - if (status == NETDEV_TX_OK) + if (dev_xmit_complete(status)) break; } @@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, } - if (status != NETDEV_TX_OK) { + if (!dev_xmit_complete(status)) { skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } From 7d0a06586b2686ba80c4a2da5f91cb10ffbea736 Mon Sep 17 00:00:00 2001 From: Ka-Cheong Poon Date: Mon, 26 Aug 2019 02:39:12 -0700 Subject: [PATCH 19/58] net/rds: Fix info leak in rds6_inc_info_copy() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rds6_inc_info_copy() function has a couple struct members which are leaking stack information. The ->tos field should hold actual information and the ->flags field needs to be zeroed out. Fixes: 3eb450367d08 ("rds: add type of service(tos) infrastructure") Fixes: b7ff8b1036f0 ("rds: Extend RDS API for IPv6 support") Reported-by: 黄ID蝴蝶 Signed-off-by: Dan Carpenter Signed-off-by: Ka-Cheong Poon Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/recv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/rds/recv.c b/net/rds/recv.c index 853de4876088..a42ba7fa06d5 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc, minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); minfo6.len = be32_to_cpu(inc->i_hdr.h_len); + minfo6.tos = inc->i_conn->c_tos; if (flip) { minfo6.laddr = *daddr; @@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc, minfo6.fport = inc->i_hdr.h_dport; } + minfo6.flags = 0; + rds_info_copy(iter, &minfo6, sizeof(minfo6)); } #endif From fdfc5c8594c24c5df883583ebd286321a80e0a67 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 26 Aug 2019 09:19:15 -0700 Subject: [PATCH 20/58] tcp: remove empty skb from write queue in error cases Vladimir Rutsky reported stuck TCP sessions after memory pressure events. Edge Trigger epoll() user would never receive an EPOLLOUT notification allowing them to retry a sendmsg(). Jason tested the case of sk_stream_alloc_skb() returning NULL, but there are other paths that could lead both sendmsg() and sendpage() to return -1 (EAGAIN), with an empty skb queued on the write queue. This patch makes sure we remove this empty skb so that Jason code can detect that the queue is empty, and call sk->sk_write_space(sk) accordingly. Fixes: ce5ec440994b ("tcp: ensure epoll edge trigger wakeup when write queue is empty") Signed-off-by: Eric Dumazet Cc: Jason Baron Reported-by: Vladimir Rutsky Cc: Soheil Hassas Yeganeh Cc: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 77b485d60b9d..61082065b26a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) return mss_now; } +/* In some cases, both sendpage() and sendmsg() could have added + * an skb to the write queue, but failed adding payload on it. + * We need to remove it to consume less memory, but more + * importantly be able to generate EPOLLOUT for Edge Trigger epoll() + * users. + */ +static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) +{ + if (skb && !skb->len) { + tcp_unlink_write_queue(skb, sk); + if (tcp_write_queue_empty(sk)) + tcp_chrono_stop(sk, TCP_CHRONO_BUSY); + sk_wmem_free_skb(sk, skb); + } +} + ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@ -1064,6 +1080,7 @@ out: return copied; do_error: + tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); if (copied) goto out; out_err: @@ -1388,18 +1405,11 @@ out_nopush: sock_zerocopy_put(uarg); return copied + copied_syn; -do_fault: - if (!skb->len) { - tcp_unlink_write_queue(skb, sk); - /* It is the one place in all of TCP, except connection - * reset, where we can be unlinking the send_head. - */ - if (tcp_write_queue_empty(sk)) - tcp_chrono_stop(sk, TCP_CHRONO_BUSY); - sk_wmem_free_skb(sk, skb); - } - do_error: + skb = tcp_write_queue_tail(sk); +do_fault: + tcp_remove_empty_skb(sk, skb); + if (copied + copied_syn) goto out; out_err: From abecec415dc2b5a5d11fe1b17963564b13f2ff20 Mon Sep 17 00:00:00 2001 From: Yash Shah Date: Tue, 27 Aug 2019 10:36:03 +0530 Subject: [PATCH 21/58] macb: bindings doc: update sifive fu540-c000 binding As per the discussion with Nicolas Ferre[0], rename the compatible property to a more appropriate and specific string. [0] https://lore.kernel.org/netdev/CAJ2_jOFEVZQat0Yprg4hem4jRrqkB72FKSeQj4p8P5KA-+rgww@mail.gmail.com/ Signed-off-by: Yash Shah Acked-by: Nicolas Ferre Reviewed-by: Paul Walmsley Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/macb.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 63c73fafe26d..0b61a90f1592 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -15,10 +15,10 @@ Required properties: Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. - Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. + Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC. Or the generic form: "cdns,emac". - reg: Address and length of the register set for the device - For "sifive,fu540-macb", second range is required to specify the + For "sifive,fu540-c000-gem", second range is required to specify the address and length of the registers for GEMGXL Management block. - interrupts: Should contain macb interrupt - phy-mode: See ethernet.txt file in the same directory. From 6342ea886771d65d718d07049c75b25b27e60cfd Mon Sep 17 00:00:00 2001 From: Yash Shah Date: Tue, 27 Aug 2019 10:36:04 +0530 Subject: [PATCH 22/58] macb: Update compatibility string for SiFive FU540-C000 Update the compatibility string for SiFive FU540-C000 as per the new string updated in the binding doc. Reference: https://lore.kernel.org/netdev/CAJ2_jOFEVZQat0Yprg4hem4jRrqkB72FKSeQj4p8P5KA-+rgww@mail.gmail.com/ Signed-off-by: Yash Shah Acked-by: Nicolas Ferre Reviewed-by: Paul Walmsley Tested-by: Paul Walmsley Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5ca17e62dc3e..35b59b5edf0f 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, - { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, + { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); From 04d37cf46a773910f75fefaa9f9488f42bfe1fe2 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Tue, 27 Aug 2019 12:29:09 +0200 Subject: [PATCH 23/58] net/sched: pfifo_fast: fix wrong dereference when qdisc is reset Now that 'TCQ_F_CPUSTATS' bit can be cleared, depending on the value of 'TCQ_F_NOLOCK' bit in the parent qdisc, we need to be sure that per-cpu counters are present when 'reset()' is called for pfifo_fast qdiscs. Otherwise, the following script: # tc q a dev lo handle 1: root htb default 100 # tc c a dev lo parent 1: classid 1:100 htb \ > rate 95Mbit ceil 100Mbit burst 64k [...] # tc f a dev lo parent 1: protocol arp basic classid 1:100 [...] # tc q a dev lo parent 1:100 handle 100: pfifo_fast [...] # tc q d dev lo root can generate the following splat: Unable to handle kernel paging request at virtual address dfff2c01bd148000 Mem abort info: ESR = 0x96000004 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000004 CM = 0, WnR = 0 [dfff2c01bd148000] address between user and kernel address ranges Internal error: Oops: 96000004 [#1] SMP [...] pstate: 80000005 (Nzcv daif -PAN -UAO) pc : pfifo_fast_reset+0x280/0x4d8 lr : pfifo_fast_reset+0x21c/0x4d8 sp : ffff800d09676fa0 x29: ffff800d09676fa0 x28: ffff200012ee22e4 x27: dfff200000000000 x26: 0000000000000000 x25: ffff800ca0799958 x24: ffff1001940f332b x23: 0000000000000007 x22: ffff200012ee1ab8 x21: 0000600de8a40000 x20: 0000000000000000 x19: ffff800ca0799900 x18: 0000000000000000 x17: 0000000000000002 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: ffff1001b922e6e2 x11: 1ffff001b922e6e1 x10: 0000000000000000 x9 : 1ffff001b922e6e1 x8 : dfff200000000000 x7 : 0000000000000000 x6 : 0000000000000000 x5 : 1fffe400025dc45c x4 : 1fffe400025dc357 x3 : 00000c01bd148000 x2 : 0000600de8a40000 x1 : 0000000000000007 x0 : 0000600de8a40004 Call trace: pfifo_fast_reset+0x280/0x4d8 qdisc_reset+0x6c/0x370 htb_reset+0x150/0x3b8 [sch_htb] qdisc_reset+0x6c/0x370 dev_deactivate_queue.constprop.5+0xe0/0x1a8 dev_deactivate_many+0xd8/0x908 dev_deactivate+0xe4/0x190 qdisc_graft+0x88c/0xbd0 tc_get_qdisc+0x418/0x8a8 rtnetlink_rcv_msg+0x3a8/0xa78 netlink_rcv_skb+0x18c/0x328 rtnetlink_rcv+0x28/0x38 netlink_unicast+0x3c4/0x538 netlink_sendmsg+0x538/0x9a0 sock_sendmsg+0xac/0xf8 ___sys_sendmsg+0x53c/0x658 __sys_sendmsg+0xc8/0x140 __arm64_sys_sendmsg+0x74/0xa8 el0_svc_handler+0x164/0x468 el0_svc+0x10/0x14 Code: 910012a0 92400801 d343fc03 11000c21 (38fb6863) Fix this by testing the value of 'TCQ_F_CPUSTATS' bit in 'qdisc->flags', before dereferencing 'qdisc->cpu_qstats'. Changes since v1: - coding style improvements, thanks to Stefano Brivio Fixes: 8a53e616de29 ("net: sched: when clearing NOLOCK, clear TCQ_F_CPUSTATS, too") CC: Paolo Abeni Reported-by: Li Shuang Signed-off-by: Davide Caratti Acked-by: Paolo Abeni Reviewed-by: Stefano Brivio Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 11c03cf4aa74..099797e5409d 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -688,11 +688,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc) kfree_skb(skb); } - for_each_possible_cpu(i) { - struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); + if (qdisc_is_percpu_stats(qdisc)) { + for_each_possible_cpu(i) { + struct gnet_stats_queue *q; - q->backlog = 0; - q->qlen = 0; + q = per_cpu_ptr(qdisc->cpu_qstats, i); + q->backlog = 0; + q->qlen = 0; + } } } From a84d016479896b5526a2cc54784e6ffc41c9d6f6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 27 Aug 2019 03:33:12 -0700 Subject: [PATCH 24/58] mld: fix memory leak in mld_del_delrec() Similar to the fix done for IPv4 in commit e5b1c6c6277d ("igmp: fix memory leak in igmpv3_del_delrec()"), we need to make sure mca_tomb and mca_sources are not blindly overwritten. Using swap() then a call to ip6_mc_clear_src() will take care of the missing free. BUG: memory leak unreferenced object 0xffff888117d9db00 (size 64): comm "syz-executor247", pid 6918, jiffies 4294943989 (age 25.350s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 fe 88 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<000000005b463030>] kmemleak_alloc_recursive include/linux/kmemleak.h:43 [inline] [<000000005b463030>] slab_post_alloc_hook mm/slab.h:522 [inline] [<000000005b463030>] slab_alloc mm/slab.c:3319 [inline] [<000000005b463030>] kmem_cache_alloc_trace+0x145/0x2c0 mm/slab.c:3548 [<00000000939cbf94>] kmalloc include/linux/slab.h:552 [inline] [<00000000939cbf94>] kzalloc include/linux/slab.h:748 [inline] [<00000000939cbf94>] ip6_mc_add1_src net/ipv6/mcast.c:2236 [inline] [<00000000939cbf94>] ip6_mc_add_src+0x31f/0x420 net/ipv6/mcast.c:2356 [<00000000d8972221>] ip6_mc_source+0x4a8/0x600 net/ipv6/mcast.c:449 [<000000002b203d0d>] do_ipv6_setsockopt.isra.0+0x1b92/0x1dd0 net/ipv6/ipv6_sockglue.c:748 [<000000001f1e2d54>] ipv6_setsockopt+0x89/0xd0 net/ipv6/ipv6_sockglue.c:944 [<00000000c8f7bdf9>] udpv6_setsockopt+0x4e/0x90 net/ipv6/udp.c:1558 [<000000005a9a0c5e>] sock_common_setsockopt+0x38/0x50 net/core/sock.c:3139 [<00000000910b37b2>] __sys_setsockopt+0x10f/0x220 net/socket.c:2084 [<00000000e9108023>] __do_sys_setsockopt net/socket.c:2100 [inline] [<00000000e9108023>] __se_sys_setsockopt net/socket.c:2097 [inline] [<00000000e9108023>] __x64_sys_setsockopt+0x26/0x30 net/socket.c:2097 [<00000000f4818160>] do_syscall_64+0x76/0x1a0 arch/x86/entry/common.c:296 [<000000008d367e8f>] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 1666d49e1d41 ("mld: do not remove mld souce list info when set link down") Fixes: 9c8bb163ae78 ("igmp, mld: Fix memory leak in igmpv3/mld_del_delrec()") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller --- net/ipv6/mcast.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 7f3f13c37916..eaa4c2cc2fbb 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) if (pmc) { im->idev = pmc->idev; if (im->mca_sfmode == MCAST_INCLUDE) { - im->mca_tomb = pmc->mca_tomb; - im->mca_sources = pmc->mca_sources; + swap(im->mca_tomb, pmc->mca_tomb); + swap(im->mca_sources, pmc->mca_sources); for (psf = im->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = idev->mc_qrv; } else { im->mca_crcount = idev->mc_qrv; } in6_dev_put(pmc->idev); + ip6_mc_clear_src(pmc); kfree(pmc); } spin_unlock_bh(&im->mca_lock); From ad06a566e118e57b852cab5933dbbbaebb141de3 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Tue, 27 Aug 2019 07:58:09 -0700 Subject: [PATCH 25/58] openvswitch: Properly set L4 keys on "later" IP fragments When IP fragments are reassembled before being sent to conntrack, the key from the last fragment is used. Unless there are reordering issues, the last fragment received will not contain the L4 ports, so the key for the reassembled datagram won't contain them. This patch updates the key once we have a reassembled datagram. The handle_fragments() function works on L3 headers so we pull the L3/L4 flow key update code from key_extract into a new function 'key_extract_l3l4'. Then we add a another new function ovs_flow_key_update_l3l4() and export it so that it is accessible by handle_fragments() for conntrack packet reassembly. Co-authored-by: Justin Pettit Signed-off-by: Greg Rose Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 5 ++ net/openvswitch/flow.c | 155 +++++++++++++++++++++--------------- net/openvswitch/flow.h | 1 + 3 files changed, 95 insertions(+), 66 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index d8da6477d6be..05249eb45082 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -525,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, return -EPFNOSUPPORT; } + /* The key extracted from the fragment that completed this datagram + * likely didn't have an L4 header, so regenerate it. + */ + ovs_flow_key_update_l3l4(skb, key); + key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index bc89e16e0505..005f7622edac 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key) } /** - * key_extract - extracts a flow key from an Ethernet frame. + * key_extract_l3l4 - extracts L3/L4 header information. * @skb: sk_buff that contains the frame, with skb->data pointing to the - * Ethernet header + * L3 header * @key: output flow key * - * The caller must ensure that skb->len >= ETH_HLEN. - * - * Returns 0 if successful, otherwise a negative errno value. - * - * Initializes @skb header fields as follows: - * - * - skb->mac_header: the L2 header. - * - * - skb->network_header: just past the L2 header, or just past the - * VLAN header, to the first byte of the L2 payload. - * - * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 - * on output, then just past the IP header, if one is present and - * of a correct length, otherwise the same as skb->network_header. - * For other key->eth.type values it is left untouched. - * - * - skb->protocol: the type of the data starting at skb->network_header. - * Equals to key->eth.type. */ -static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) +static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) { int error; - struct ethhdr *eth; - - /* Flags are always used as part of stats */ - key->tp.flags = 0; - - skb_reset_mac_header(skb); - - /* Link layer. */ - clear_vlan(key); - if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { - if (unlikely(eth_type_vlan(skb->protocol))) - return -EINVAL; - - skb_reset_network_header(skb); - key->eth.type = skb->protocol; - } else { - eth = eth_hdr(skb); - ether_addr_copy(key->eth.src, eth->h_source); - ether_addr_copy(key->eth.dst, eth->h_dest); - - __skb_pull(skb, 2 * ETH_ALEN); - /* We are going to push all headers that we pull, so no need to - * update skb->csum here. - */ - - if (unlikely(parse_vlan(skb, key))) - return -ENOMEM; - - key->eth.type = parse_ethertype(skb); - if (unlikely(key->eth.type == htons(0))) - return -ENOMEM; - - /* Multiple tagged packets need to retain TPID to satisfy - * skb_vlan_pop(), which will later shift the ethertype into - * skb->protocol. - */ - if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) - skb->protocol = key->eth.cvlan.tpid; - else - skb->protocol = key->eth.type; - - skb_reset_network_header(skb); - __skb_push(skb, skb->data - skb_mac_header(skb)); - } - skb_reset_mac_len(skb); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { @@ -788,6 +725,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return 0; } +/** + * key_extract - extracts a flow key from an Ethernet frame. + * @skb: sk_buff that contains the frame, with skb->data pointing to the + * Ethernet header + * @key: output flow key + * + * The caller must ensure that skb->len >= ETH_HLEN. + * + * Returns 0 if successful, otherwise a negative errno value. + * + * Initializes @skb header fields as follows: + * + * - skb->mac_header: the L2 header. + * + * - skb->network_header: just past the L2 header, or just past the + * VLAN header, to the first byte of the L2 payload. + * + * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 + * on output, then just past the IP header, if one is present and + * of a correct length, otherwise the same as skb->network_header. + * For other key->eth.type values it is left untouched. + * + * - skb->protocol: the type of the data starting at skb->network_header. + * Equals to key->eth.type. + */ +static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) +{ + struct ethhdr *eth; + + /* Flags are always used as part of stats */ + key->tp.flags = 0; + + skb_reset_mac_header(skb); + + /* Link layer. */ + clear_vlan(key); + if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { + if (unlikely(eth_type_vlan(skb->protocol))) + return -EINVAL; + + skb_reset_network_header(skb); + key->eth.type = skb->protocol; + } else { + eth = eth_hdr(skb); + ether_addr_copy(key->eth.src, eth->h_source); + ether_addr_copy(key->eth.dst, eth->h_dest); + + __skb_pull(skb, 2 * ETH_ALEN); + /* We are going to push all headers that we pull, so no need to + * update skb->csum here. + */ + + if (unlikely(parse_vlan(skb, key))) + return -ENOMEM; + + key->eth.type = parse_ethertype(skb); + if (unlikely(key->eth.type == htons(0))) + return -ENOMEM; + + /* Multiple tagged packets need to retain TPID to satisfy + * skb_vlan_pop(), which will later shift the ethertype into + * skb->protocol. + */ + if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) + skb->protocol = key->eth.cvlan.tpid; + else + skb->protocol = key->eth.type; + + skb_reset_network_header(skb); + __skb_push(skb, skb->data - skb_mac_header(skb)); + } + + skb_reset_mac_len(skb); + + /* Fill out L3/L4 key info, if any */ + return key_extract_l3l4(skb, key); +} + +/* In the case of conntrack fragment handling it expects L3 headers, + * add a helper. + */ +int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key) +{ + return key_extract_l3l4(skb, key); +} + int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) { int res; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index a5506e2d4b7a..b830d5ff7af4 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); +int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key); int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key); From 0754b4e8cdf3eec6e4122e79af26ed9bab20f8f8 Mon Sep 17 00:00:00 2001 From: Justin Pettit Date: Tue, 27 Aug 2019 07:58:10 -0700 Subject: [PATCH 26/58] openvswitch: Clear the L4 portion of the key for "later" fragments. Only the first fragment in a datagram contains the L4 headers. When the Open vSwitch module parses a packet, it always sets the IP protocol field in the key, but can only set the L4 fields on the first fragment. The original behavior would not clear the L4 portion of the key, so garbage values would be sent in the key for "later" fragments. This patch clears the L4 fields in that circumstance to prevent sending those garbage values as part of the upcall. Signed-off-by: Justin Pettit Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/flow.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 005f7622edac..9d81d2c7bf82 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -560,6 +560,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; + memset(&key->tp, 0, sizeof(key->tp)); return 0; } if (nh->frag_off & htons(IP_MF) || @@ -677,8 +678,10 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) return error; } - if (key->ip.frag == OVS_FRAG_TYPE_LATER) + if (key->ip.frag == OVS_FRAG_TYPE_LATER) { + memset(&key->tp, 0, sizeof(key->tp)); return 0; + } if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; From 36f1031c51a2538e5558fb44c6d6b88f98d3c0f2 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 27 Aug 2019 11:10:04 -0500 Subject: [PATCH 27/58] ibmvnic: Do not process reset during or after device removal Currently, the ibmvnic driver will not schedule device resets if the device is being removed, but does not check the device state before the reset is actually processed. This leads to a race where a reset is scheduled with a valid device state but is processed after the driver has been removed, resulting in an oops. Fix this by checking the device state before processing a queued reset event. Reported-by: Abdul Haleem Tested-by: Abdul Haleem Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cebd20f3128d..fa4bb940665c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1983,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); while (rwi) { + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) + goto out; + if (adapter->force_reset_recovery) { adapter->force_reset_recovery = false; rc = do_hard_reset(adapter, rwi, reset_state); @@ -2007,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work) netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); } - +out: adapter->resetting = false; if (we_lock_rtnl) rtnl_unlock(); From dbf47a2a094edf58983265e323ca4bdcdb58b5ee Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Tue, 27 Aug 2019 21:49:38 +0300 Subject: [PATCH 28/58] net: sched: act_sample: fix psample group handling on overwrite Action sample doesn't properly handle psample_group pointer in overwrite case. Following issues need to be fixed: - In tcf_sample_init() function RCU_INIT_POINTER() is used to set s->psample_group, even though we neither setting the pointer to NULL, nor preventing concurrent readers from accessing the pointer in some way. Use rcu_swap_protected() instead to safely reset the pointer. - Old value of s->psample_group is not released or deallocated in any way, which results resource leak. Use psample_group_put() on non-NULL value obtained with rcu_swap_protected(). - The function psample_group_put() that released reference to struct psample_group pointed by rcu-pointer s->psample_group doesn't respect rcu grace period when deallocating it. Extend struct psample_group with rcu head and use kfree_rcu when freeing it. Fixes: 5c5670fae430 ("net/sched: Introduce sample tc action") Signed-off-by: Vlad Buslov Signed-off-by: David S. Miller --- include/net/psample.h | 1 + net/psample/psample.c | 2 +- net/sched/act_sample.c | 6 +++++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/net/psample.h b/include/net/psample.h index 37a4df2325b2..6b578ce69cd8 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -11,6 +11,7 @@ struct psample_group { u32 group_num; u32 refcount; u32 seq; + struct rcu_head rcu; }; struct psample_group *psample_group_get(struct net *net, u32 group_num); diff --git a/net/psample/psample.c b/net/psample/psample.c index 841f198ea1a8..66e4b61a350d 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group) { psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); list_del(&group->list); - kfree(group); + kfree_rcu(group, rcu); } static struct psample_group * diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 7eff363f9f03..10229124a992 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); s->rate = rate; s->psample_group_num = psample_group_num; - RCU_INIT_POINTER(s->psample_group, psample_group); + rcu_swap_protected(s->psample_group, psample_group, + lockdep_is_held(&s->tcf_lock)); if (tb[TCA_SAMPLE_TRUNC_SIZE]) { s->truncate = true; s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); } spin_unlock_bh(&s->tcf_lock); + + if (psample_group) + psample_group_put(psample_group); if (goto_ch) tcf_chain_put_by_act(goto_ch); From 888a5c53c0d8be6e98bc85b677f179f77a647873 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 27 Aug 2019 15:09:33 -0400 Subject: [PATCH 29/58] tcp: inherit timestamp on mtu probe TCP associates tx timestamp requests with a byte in the bytestream. If merging skbs in tcp_mtu_probe, migrate the tstamp request. Similar to MSG_EOR, do not allow moving a timestamp from any segment in the probe but the last. This to avoid merging multiple timestamps. Tested with the packetdrill script at https://github.com/wdebruij/packetdrill/commits/mtu_probe-1 Link: http://patchwork.ozlabs.org/patch/1143278/#2232897 Fixes: 4ed2d765dfac ("net-timestamp: TCP timestamping") Signed-off-by: Willem de Bruijn Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 979520e46e33..8a645f304e6c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2053,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) if (len <= skb->len) break; - if (unlikely(TCP_SKB_CB(skb)->eor)) + if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) return false; len -= skb->len; @@ -2170,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk) * we need to propagate it to the new skb. */ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; + tcp_skb_collapse_tstamp(nskb, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); } else { From 092e22e586236bba106a82113826a68080a03506 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Tue, 27 Aug 2019 23:18:53 +0200 Subject: [PATCH 30/58] net/sched: pfifo_fast: fix wrong dereference in pfifo_fast_enqueue Now that 'TCQ_F_CPUSTATS' bit can be cleared, depending on the value of 'TCQ_F_NOLOCK' bit in the parent qdisc, we can't assume anymore that per-cpu counters are there in the error path of skb_array_produce(). Otherwise, the following splat can be seen: Unable to handle kernel paging request at virtual address 0000600dea430008 Mem abort info: ESR = 0x96000005 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000005 CM = 0, WnR = 0 user pgtable: 64k pages, 48-bit VAs, pgdp = 000000007b97530e [0000600dea430008] pgd=0000000000000000, pud=0000000000000000 Internal error: Oops: 96000005 [#1] SMP [...] pstate: 10000005 (nzcV daif -PAN -UAO) pc : pfifo_fast_enqueue+0x524/0x6e8 lr : pfifo_fast_enqueue+0x46c/0x6e8 sp : ffff800d39376fe0 x29: ffff800d39376fe0 x28: 1ffff001a07d1e40 x27: ffff800d03e8f188 x26: ffff800d03e8f200 x25: 0000000000000062 x24: ffff800d393772f0 x23: 0000000000000000 x22: 0000000000000403 x21: ffff800cca569a00 x20: ffff800d03e8ee00 x19: ffff800cca569a10 x18: 00000000000000bf x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: ffff1001a726edd0 x13: 1fffe4000276a9a4 x12: 0000000000000000 x11: dfff200000000000 x10: ffff800d03e8f1a0 x9 : 0000000000000003 x8 : 0000000000000000 x7 : 00000000f1f1f1f1 x6 : ffff1001a726edea x5 : ffff800cca56a53c x4 : 1ffff001bf9a8003 x3 : 1ffff001bf9a8003 x2 : 1ffff001a07d1dcb x1 : 0000600dea430000 x0 : 0000600dea430008 Process ping (pid: 6067, stack limit = 0x00000000dc0aa557) Call trace: pfifo_fast_enqueue+0x524/0x6e8 htb_enqueue+0x660/0x10e0 [sch_htb] __dev_queue_xmit+0x123c/0x2de0 dev_queue_xmit+0x24/0x30 ip_finish_output2+0xc48/0x1720 ip_finish_output+0x548/0x9d8 ip_output+0x334/0x788 ip_local_out+0x90/0x138 ip_send_skb+0x44/0x1d0 ip_push_pending_frames+0x5c/0x78 raw_sendmsg+0xed8/0x28d0 inet_sendmsg+0xc4/0x5c0 sock_sendmsg+0xac/0x108 __sys_sendto+0x1ac/0x2a0 __arm64_sys_sendto+0xc4/0x138 el0_svc_handler+0x13c/0x298 el0_svc+0x8/0xc Code: f9402e80 d538d081 91002000 8b010000 (885f7c03) Fix this by testing the value of 'TCQ_F_CPUSTATS' bit in 'qdisc->flags', before dereferencing 'qdisc->cpu_qstats'. Fixes: 8a53e616de29 ("net: sched: when clearing NOLOCK, clear TCQ_F_CPUSTATS, too") CC: Paolo Abeni CC: Stefano Brivio Reported-by: Li Shuang Signed-off-by: Davide Caratti Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 099797e5409d..137db1cbde85 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, err = skb_array_produce(q, skb); - if (unlikely(err)) - return qdisc_drop_cpu(skb, qdisc, to_free); + if (unlikely(err)) { + if (qdisc_is_percpu_stats(qdisc)) + return qdisc_drop_cpu(skb, qdisc, to_free); + else + return qdisc_drop(skb, qdisc, to_free); + } qdisc_update_stats_at_enqueue(qdisc, pkt_len); return NET_XMIT_SUCCESS; From 49d4b14113cae1410eb4654ada5b9583bad971c4 Mon Sep 17 00:00:00 2001 From: Hayes Wang Date: Wed, 28 Aug 2019 09:51:41 +0800 Subject: [PATCH 31/58] Revert "r8152: napi hangup fix after disconnect" This reverts commit 0ee1f4734967af8321ecebaf9c74221ace34f2d5. The commit 0ee1f4734967 ("r8152: napi hangup fix after disconnect") adds a check about RTL8152_UNPLUG to determine if calling napi_disable() is invalid in rtl8152_close(), when rtl8152_disconnect() is called. This avoids to use napi_disable() after calling netif_napi_del(). Howver, commit ffa9fec30ca0 ("r8152: set RTL8152_UNPLUG only for real disconnection") causes that RTL8152_UNPLUG is not always set when calling rtl8152_disconnect(). Therefore, I have to revert commit 0ee1f4734967 ("r8152: napi hangup fix after disconnect"), first. And submit another patch to fix it. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index eee0f5007ee3..ad3abe26b51b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4021,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) - napi_disable(&tp->napi); + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); From 973dc6cfc0e2c43ff29ca5645ceaf1ae694ea110 Mon Sep 17 00:00:00 2001 From: Hayes Wang Date: Wed, 28 Aug 2019 09:51:42 +0800 Subject: [PATCH 32/58] r8152: remove calling netif_napi_del Remove unnecessary use of netif_napi_del. This also avoids to call napi_disable() after netif_napi_del(). Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ad3abe26b51b..04137ac373b0 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5352,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf, return 0; out1: - netif_napi_del(&tp->napi); usb_set_intfdata(intf, NULL); out: free_netdev(netdev); @@ -5367,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf) if (tp) { rtl_set_unplug(tp); - netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); From 739d7c5752b255e89ddbb1b0474f3b88ef5cd343 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 27 Aug 2019 22:56:29 -0700 Subject: [PATCH 33/58] nfp: flower: prevent ingress block binds on internal ports Internal port TC offload is implemented through user-space applications (such as OvS) by adding filters at egress via TC clsact qdiscs. Indirect block offload support in the NFP driver accepts both ingress qdisc binds and egress binds if the device is an internal port. However, clsact sends bind notification for both ingress and egress block binds which can lead to the driver registering multiple callbacks and receiving multiple notifications of new filters. Fix this by rejecting ingress block bind callbacks when the port is internal and only adding filter callbacks for egress binds. Fixes: 4d12ba42787b ("nfp: flower: allow offloading of matches on 'internal' ports") Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/offload.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 9917d64694c6..457bdc60f3ee 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1409,9 +1409,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; struct flow_block_cb *block_cb; - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && - nfp_flower_internal_port_can_offload(app, netdev))) + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !nfp_flower_internal_port_can_offload(app, netdev)) || + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { From e8024cb483abb2b0290b3ef5e34c736e9de2492f Mon Sep 17 00:00:00 2001 From: John Hurley Date: Tue, 27 Aug 2019 22:56:30 -0700 Subject: [PATCH 34/58] nfp: flower: handle neighbour events on internal ports Recent code changes to NFP allowed the offload of neighbour entries to FW when the next hop device was an internal port. This allows for offload of tunnel encap when the end-point IP address is applied to such a port. Unfortunately, the neighbour event handler still rejects events that are not associated with a repr dev and so the firmware neighbour table may get out of sync for internal ports. Fix this by allowing internal port neighbour events to be correctly processed. Fixes: 45756dfedab5 ("nfp: flower: allow tunnels to output to internal port") Signed-off-by: John Hurley Reviewed-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index a7a80f4b722a..f0ee982eb1b5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, flow.daddr = *(__be32 *)n->primary_key; - /* Only concerned with route changes for representors. */ - if (!nfp_netdev_is_nfp_repr(n->dev)) - return NOTIFY_DONE; - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; + if (!nfp_netdev_is_nfp_repr(n->dev) && + !nfp_flower_internal_port_can_offload(app, n->dev)) + return NOTIFY_DONE; + /* Only concerned with changes to routes already added to NFP. */ if (!nfp_tun_has_route(app, flow.daddr)) return NOTIFY_DONE; From 189308d5823a089b56e2299cd96589507dac7319 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 28 Aug 2019 08:31:19 +0200 Subject: [PATCH 35/58] sky2: Disable MSI on yet another ASUS boards (P6Xxxx) A similar workaround for the suspend/resume problem is needed for yet another ASUS machines, P6X models. Like the previous fix, the BIOS doesn't provide the standard DMI_SYS_* entry, so again DMI_BOARD_* entries are used instead. Reported-and-tested-by: SteveM Signed-off-by: Takashi Iwai Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index a01c75ede871..e0363870f3a5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = { DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, + { + .ident = "ASUS P6X", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P6X"), + }, + }, {} }; From de20900fbe1c4fd36de25a7a5a43223254ecf0d0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 27 Aug 2019 21:23:45 +0200 Subject: [PATCH 36/58] netfilter: nf_flow_table: clear skb tstamp before xmit If 'fq' qdisc is used and a program has requested timestamps, skb->tstamp needs to be cleared, else fq will treat these as 'transmit time'. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_ip.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index d68c801dd614..b9e7dd6e60ce 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, { skb_orphan(skb); skb_dst_set_noref(skb, dst); - skb->tstamp = 0; dst_output(state->net, state->sk, skb); return NF_STOLEN; } @@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; iph = ip_hdr(skb); ip_decrease_ttl(iph); + skb->tstamp = 0; if (unlikely(dst_xfrm(&rt->dst))) { memset(skb->cb, 0, sizeof(struct inet_skb_parm)); @@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; ip6h = ipv6_hdr(skb); ip6h->hop_limit--; + skb->tstamp = 0; if (unlikely(dst_xfrm(&rt->dst))) { memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); From b9500577d361522a3d9f14da8cf41dc1d824904e Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 21 Aug 2019 20:17:32 +0300 Subject: [PATCH 37/58] iwlwifi: pcie: handle switching killer Qu B0 NICs to C0 We need to use a different firmware for C0 versions of killer Qu NICs. Add structures for them and handle them in the if block that detects C0 revisions. Additionally, instead of having an inclusive check for QnJ devices, make the selection exclusive, so that switching to QnJ is the exception, not the default. This prevents us from having to add all the non-QnJ cards to an exclusion list. To do so, only go into the QnJ block if the device has an RF ID type HR and HW revision QnJ. Cc: stable@vger.kernel.org # 5.2 Signed-off-by: Luca Coelho Link: https://lore.kernel.org/r/20190821171732.2266-1-luca@coelho.fi Signed-off-by: Johannes Berg --- .../net/wireless/intel/iwlwifi/cfg/22000.c | 24 +++++++++++++++++++ .../net/wireless/intel/iwlwifi/iwl-config.h | 2 ++ drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 ++++ .../net/wireless/intel/iwlwifi/pcie/trans.c | 7 +----- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 1f500cddb3a7..55b713255b8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 1c1bf1b281cd..6c04f8223aff 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; +extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 7c5aaeaf7fe5..d9ed53b7c768 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1062,6 +1062,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) + iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; } /* same thing for QuZ... */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 935e35dafce5..db62c8314603 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3602,12 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - ((trans->cfg != &iwl_ax200_cfg_cc && - trans->cfg != &iwl_ax201_cfg_qu_hr && - trans->cfg != &killer1650x_2ax_cfg && - trans->cfg != &killer1650w_2ax_cfg && - trans->cfg != &iwl_ax201_cfg_quz_hr) || - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); From c8a41c6afa27b8c3f61622dfd882b912da9d6721 Mon Sep 17 00:00:00 2001 From: Denis Kenzior Date: Tue, 27 Aug 2019 17:41:19 -0500 Subject: [PATCH 38/58] mac80211: Don't memset RXCB prior to PAE intercept In ieee80211_deliver_skb_to_local_stack intercepts EAPoL frames if mac80211 is configured to do so and forwards the contents over nl80211. During this process some additional data is also forwarded, including whether the frame was received encrypted or not. Unfortunately just prior to the call to ieee80211_deliver_skb_to_local_stack, skb->cb is cleared, resulting in incorrect data being exposed over nl80211. Fixes: 018f6fbf540d ("mac80211: Send control port frames over nl80211") Cc: stable@vger.kernel.org Signed-off-by: Denis Kenzior Link: https://lore.kernel.org/r/20190827224120.14545-2-denkenz@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3c1ab870fefe..7c4aeac006fb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2452,6 +2452,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); } else { + memset(skb->cb, 0, sizeof(skb->cb)); + /* deliver to local stack */ if (rx->napi) napi_gro_receive(rx->napi, skb); @@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) if (skb) { skb->protocol = eth_type_trans(skb, dev); - memset(skb->cb, 0, sizeof(skb->cb)); - ieee80211_deliver_skb_to_local_stack(skb, rx); } From f8b43c5cf4b62a19f2210a0f5367b84e1eff1ab9 Mon Sep 17 00:00:00 2001 From: Denis Kenzior Date: Tue, 27 Aug 2019 17:41:20 -0500 Subject: [PATCH 39/58] mac80211: Correctly set noencrypt for PAE frames The noencrypt flag was intended to be set if the "frame was received unencrypted" according to include/uapi/linux/nl80211.h. However, the current behavior is opposite of this. Cc: stable@vger.kernel.org Fixes: 018f6fbf540d ("mac80211: Send control port frames over nl80211") Signed-off-by: Denis Kenzior Link: https://lore.kernel.org/r/20190827224120.14545-3-denkenz@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 7c4aeac006fb..768d14c9a716 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2447,7 +2447,7 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && sdata->control_port_over_nl80211)) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - bool noencrypt = status->flag & RX_FLAG_DECRYPTED; + bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); cfg80211_rx_control_port(dev, skb, noencrypt); dev_kfree_skb(skb); From daf1de9078792a4d60e36aa7ecf3aadca65277c2 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 28 Aug 2019 23:02:33 +0800 Subject: [PATCH 40/58] netfilter: nft_meta_bridge: Fix get NFT_META_BRI_IIFVPROTO in network byteorder Get the vlan_proto of ingress bridge in network byteorder as userspace expects. Otherwise this is inconsistent with NFT_META_PROTOCOL. Fixes: 2a3a93ef0ba5 ("netfilter: nft_meta_bridge: Add NFT_META_BRI_IIFVPROTO support") Signed-off-by: wenxu Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/nft_meta_bridge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index 1804e867f715..7c9e92b2f806 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr, goto err; br_vlan_get_proto(br_dev, &p_proto); - nft_reg_store16(dest, p_proto); + nft_reg_store16(dest, htons(p_proto)); return; } default: From b6b4dc4c1fa7f1c99398e7dc85758049645e9588 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 29 Aug 2019 10:46:00 +0800 Subject: [PATCH 41/58] amd-xgbe: Fix error path in xgbe_mod_init() In xgbe_mod_init(), we should do cleanup if some error occurs Reported-by: Hulk Robot Fixes: efbaa828330a ("amd-xgbe: Add support to handle device renaming") Fixes: 47f164deab22 ("amd-xgbe: Add PCI device support") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index b41f23679a08..7ce9c69e9c44 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void) ret = xgbe_platform_init(); if (ret) - return ret; + goto err_platform_init; ret = xgbe_pci_init(); if (ret) - return ret; + goto err_pci_init; return 0; + +err_pci_init: + xgbe_platform_exit(); +err_platform_init: + unregister_netdevice_notifier(&xgbe_netdev_notifier); + return ret; } static void __exit xgbe_mod_exit(void) From 3b25528e1e355c803e73aa326ce657b5606cda73 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 29 Aug 2019 11:17:24 +0800 Subject: [PATCH 42/58] net: stmmac: dwmac-rk: Don't fail if phy regulator is absent The devicetree binding lists the phy phy as optional. As such, the driver should not bail out if it can't find a regulator. Instead it should just skip the remaining regulator related code and continue on normally. Skip the remainder of phy_power_on() if a regulator supply isn't available. This also gets rid of the bogus return code. Fixes: 2e12f536635f ("net: stmmac: dwmac-rk: Use standard devicetree property for phy regulator") Signed-off-by: Chen-Yu Tsai Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 4644b2aeeba1..e2e469c37a4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) int ret; struct device *dev = &bsp_priv->pdev->dev; - if (!ldo) { - dev_err(dev, "no regulator found\n"); - return -1; - } + if (!ldo) + return 0; if (enable) { ret = regulator_enable(ldo); From d12040b6933f684a26773afad46dbba9778608d7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Aug 2019 14:12:11 +0100 Subject: [PATCH 43/58] rxrpc: Fix lack of conn cleanup when local endpoint is cleaned up [ver #2] When a local endpoint is ceases to be in use, such as when the kafs module is unloaded, the kernel will emit an assertion failure if there are any outstanding client connections: rxrpc: Assertion failed ------------[ cut here ]------------ kernel BUG at net/rxrpc/local_object.c:433! and even beyond that, will evince other oopses if there are service connections still present. Fix this by: (1) Removing the triggering of connection reaping when an rxrpc socket is released. These don't actually clean up the connections anyway - and further, the local endpoint may still be in use through another socket. (2) Mark the local endpoint as dead when we start the process of tearing it down. (3) When destroying a local endpoint, strip all of its client connections from the idle list and discard the ref on each that the list was holding. (4) When destroying a local endpoint, call the service connection reaper directly (rather than through a workqueue) to immediately kill off all outstanding service connections. (5) Make the service connection reaper reap connections for which the local endpoint is marked dead. Only after destroying the connections can we close the socket lest we get an oops in a workqueue that's looking at a connection or a peer. Fixes: 3d18cbb7fd0c ("rxrpc: Fix conn expiry timers") Signed-off-by: David Howells Tested-by: Marc Dionne Signed-off-by: David S. Miller --- net/rxrpc/af_rxrpc.c | 3 --- net/rxrpc/ar-internal.h | 1 + net/rxrpc/conn_client.c | 44 ++++++++++++++++++++++++++++++++++++++++ net/rxrpc/conn_object.c | 2 +- net/rxrpc/local_object.c | 5 ++++- 5 files changed, 50 insertions(+), 5 deletions(-) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 0dbbfd1b6487..d72ddb67bb74 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk) static int rxrpc_release_sock(struct sock *sk) { struct rxrpc_sock *rx = rxrpc_sk(sk); - struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); @@ -898,8 +897,6 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); - rxrpc_queue_work(&rxnet->service_conn_reaper); - rxrpc_queue_work(&rxnet->client_conn_reaper); rxrpc_unuse_local(rx->local); rx->local = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 852e58781fda..8051dfdcf26d 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -910,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *); void rxrpc_put_client_conn(struct rxrpc_connection *); void rxrpc_discard_expired_client_conns(struct work_struct *); void rxrpc_destroy_all_client_connections(struct rxrpc_net *); +void rxrpc_clean_up_local_conns(struct rxrpc_local *); /* * conn_event.c diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index aea82f909c60..3f1da1b49f69 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) _leave(""); } + +/* + * Clean up the client connections on a local endpoint. + */ +void rxrpc_clean_up_local_conns(struct rxrpc_local *local) +{ + struct rxrpc_connection *conn, *tmp; + struct rxrpc_net *rxnet = local->rxnet; + unsigned int nr_active; + LIST_HEAD(graveyard); + + _enter(""); + + spin_lock(&rxnet->client_conn_cache_lock); + nr_active = rxnet->nr_active_client_conns; + + list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, + cache_link) { + if (conn->params.local == local) { + ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); + + trace_rxrpc_client(conn, -1, rxrpc_client_discard); + if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) + BUG(); + conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; + list_move(&conn->cache_link, &graveyard); + nr_active--; + } + } + + rxnet->nr_active_client_conns = nr_active; + spin_unlock(&rxnet->client_conn_cache_lock); + ASSERTCMP(nr_active, >=, 0); + + while (!list_empty(&graveyard)) { + conn = list_entry(graveyard.next, + struct rxrpc_connection, cache_link); + list_del_init(&conn->cache_link); + + rxrpc_put_connection(conn); + } + + _leave(" [culled]"); +} diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 434ef392212b..ed05b6922132 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; - if (rxnet->live) { + if (rxnet->live && !conn->params.local->dead) { idle_timestamp = READ_ONCE(conn->idle_timestamp); expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; if (conn->params.local->service_closed) diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 72a6e12a9304..36587260cabd 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -426,11 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) _enter("%d", local->debug_id); + local->dead = true; + mutex_lock(&rxnet->local_mutex); list_del_init(&local->link); mutex_unlock(&rxnet->local_mutex); - ASSERT(RB_EMPTY_ROOT(&local->client_conns)); + rxrpc_clean_up_local_conns(local); + rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); ASSERT(!local->service); if (socket) { From 5f81d5455589df2f580e634c2e9da55b80e63c30 Mon Sep 17 00:00:00 2001 From: George McCollister Date: Thu, 29 Aug 2019 09:14:41 -0500 Subject: [PATCH 44/58] net: dsa: microchip: fill regmap_config name Use the register value width as the regmap_config name to prevent the following error when the second and third regmap_configs are initialized. "debugfs: Directory '${bus-id}' with parent 'regmap' already present!" Signed-off-by: George McCollister Reviewed-by: Marek Vasut Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz_common.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index ee7096d8af07..72ec250b9540 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ { \ + .name = #width, \ .val_bits = (width), \ .reg_stride = (width) / 8, \ .reg_bits = (regbits) + (regalign), \ From efb55222d31c8c2af8b4ba42bb56b0b0beedf98e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 30 Aug 2019 04:07:21 +0300 Subject: [PATCH 45/58] taprio: Fix kernel panic in taprio_destroy taprio_init may fail earlier than this line: list_add(&q->taprio_list, &taprio_list); i.e. due to the net device not being multi queue. Attempting to remove q from the global taprio_list when it is not part of it will result in a kernel panic. Fix it by matching list_add and list_del better to one another in the order of operations. This way we can keep the deletion unconditional and with lower complexity - O(1). Cc: Leandro Dorileo Fixes: 7b9eba7ba0c1 ("net/sched: taprio: fix picos_per_byte miscalculation") Signed-off-by: Vladimir Oltean Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- net/sched/sch_taprio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e25d414ae12f..8c85f61e98dd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1249,6 +1249,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, */ q->clockid = -1; + spin_lock(&taprio_list_lock); + list_add(&q->taprio_list, &taprio_list); + spin_unlock(&taprio_list_lock); + if (sch->parent != TC_H_ROOT) return -EOPNOTSUPP; @@ -1266,10 +1270,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, if (!opt) return -EINVAL; - spin_lock(&taprio_list_lock); - list_add(&q->taprio_list, &taprio_list); - spin_unlock(&taprio_list_lock); - for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *dev_queue; struct Qdisc *qdisc; From f04b514c0ce2f20aad757ebfb04e07b1a12d2b58 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 30 Aug 2019 04:07:22 +0300 Subject: [PATCH 46/58] taprio: Set default link speed to 10 Mbps in taprio_set_picos_per_byte The taprio budget needs to be adapted at runtime according to interface link speed. But that handling is problematic. For one thing, installing a qdisc on an interface that doesn't have carrier is not illegal. But taprio prints the following stack trace: [ 31.851373] ------------[ cut here ]------------ [ 31.856024] WARNING: CPU: 1 PID: 207 at net/sched/sch_taprio.c:481 taprio_dequeue+0x1a8/0x2d4 [ 31.864566] taprio: dequeue() called with unknown picos per byte. [ 31.864570] Modules linked in: [ 31.873701] CPU: 1 PID: 207 Comm: tc Not tainted 5.3.0-rc5-01199-g8838fe023cd6 #1689 [ 31.881398] Hardware name: Freescale LS1021A [ 31.885661] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 31.893368] [] (show_stack) from [] (dump_stack+0xb4/0xc8) [ 31.900555] [] (dump_stack) from [] (__warn+0xe0/0xf8) [ 31.907395] [] (__warn) from [] (warn_slowpath_fmt+0x48/0x6c) [ 31.914841] [] (warn_slowpath_fmt) from [] (taprio_dequeue+0x1a8/0x2d4) [ 31.923150] [] (taprio_dequeue) from [] (__qdisc_run+0x90/0x61c) [ 31.930856] [] (__qdisc_run) from [] (net_tx_action+0x12c/0x2bc) [ 31.938560] [] (net_tx_action) from [] (__do_softirq+0x130/0x3c8) [ 31.946350] [] (__do_softirq) from [] (irq_exit+0xbc/0xd8) [ 31.953536] [] (irq_exit) from [] (__handle_domain_irq+0x60/0xb4) [ 31.961328] [] (__handle_domain_irq) from [] (gic_handle_irq+0x58/0x9c) [ 31.969638] [] (gic_handle_irq) from [] (__irq_svc+0x6c/0x90) [ 31.977076] Exception stack(0xe8167b20 to 0xe8167b68) [ 31.982100] 7b20: e9d4bd80 00000cc0 000000cf 00000000 e9d4bd80 c1f38958 00000cc0 c1f38960 [ 31.990234] 7b40: 00000001 000000cf 00000004 e9dc0800 00000000 e8167b70 c0f478ec c0f46d94 [ 31.998363] 7b60: 60070013 ffffffff [ 32.001833] [] (__irq_svc) from [] (netlink_trim+0x18/0xd8) [ 32.009104] [] (netlink_trim) from [] (netlink_broadcast_filtered+0x34/0x414) [ 32.017930] [] (netlink_broadcast_filtered) from [] (netlink_broadcast+0x20/0x28) [ 32.027102] [] (netlink_broadcast) from [] (rtnetlink_send+0x34/0x88) [ 32.035238] [] (rtnetlink_send) from [] (notify_and_destroy+0x2c/0x44) [ 32.043461] [] (notify_and_destroy) from [] (qdisc_graft+0x398/0x470) [ 32.051595] [] (qdisc_graft) from [] (tc_modify_qdisc+0x3a4/0x724) [ 32.059470] [] (tc_modify_qdisc) from [] (rtnetlink_rcv_msg+0x260/0x2ec) [ 32.067864] [] (rtnetlink_rcv_msg) from [] (netlink_rcv_skb+0xb8/0x110) [ 32.076172] [] (netlink_rcv_skb) from [] (netlink_unicast+0x1b4/0x22c) [ 32.084392] [] (netlink_unicast) from [] (netlink_sendmsg+0x33c/0x380) [ 32.092614] [] (netlink_sendmsg) from [] (sock_sendmsg+0x14/0x24) [ 32.100403] [] (sock_sendmsg) from [] (___sys_sendmsg+0x214/0x228) [ 32.108279] [] (___sys_sendmsg) from [] (__sys_sendmsg+0x50/0x8c) [ 32.116068] [] (__sys_sendmsg) from [] (ret_fast_syscall+0x0/0x54) [ 32.123938] Exception stack(0xe8167fa8 to 0xe8167ff0) [ 32.128960] 7fa0: b6fa68c8 000000f8 00000003 bea142d0 00000000 00000000 [ 32.137093] 7fc0: b6fa68c8 000000f8 0052154c 00000128 5d6468a2 00000000 00000028 00558c9c [ 32.145224] 7fe0: 00000070 bea14278 00530d64 b6e17e64 [ 32.150659] ---[ end trace 2139c9827c3e5177 ]--- This happens because the qdisc ->dequeue callback gets called. Which again is not illegal, the qdisc will dequeue even when the interface is up but doesn't have carrier (and hence SPEED_UNKNOWN), and the frames will be dropped further down the stack in dev_direct_xmit(). And, at the end of the day, for what? For calculating the initial budget of an interface which is non-operational at the moment and where frames will get dropped anyway. So if we can't figure out the link speed, default to SPEED_10 and move along. We can also remove the runtime check now. Cc: Leandro Dorileo Fixes: 7b9eba7ba0c1 ("net/sched: taprio: fix picos_per_byte miscalculation") Acked-by: Vinicius Costa Gomes Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- net/sched/sch_taprio.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 8c85f61e98dd..8d8bc2ec5cd6 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch) u32 gate_mask; int i; - if (atomic64_read(&q->picos_per_byte) == -1) { - WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); - return NULL; - } - rcu_read_lock(); entry = rcu_dereference(q->current_entry); /* if there's no entry, it means that the schedule didn't @@ -958,12 +953,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev, struct taprio_sched *q) { struct ethtool_link_ksettings ecmd; - int picos_per_byte = -1; + int speed = SPEED_10; + int picos_per_byte; + int err; - if (!__ethtool_get_link_ksettings(dev, &ecmd) && - ecmd.base.speed != SPEED_UNKNOWN) - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, - ecmd.base.speed * 1000 * 1000); + err = __ethtool_get_link_ksettings(dev, &ecmd); + if (err < 0) + goto skip; + + if (ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + +skip: + picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, + speed * 1000 * 1000); atomic64_set(&q->picos_per_byte, picos_per_byte); netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", From 1c6c09a0ae62fa3ea8f8ead2ac3920e6fff2de64 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 30 Aug 2019 04:07:23 +0300 Subject: [PATCH 47/58] net/sched: cbs: Set default link speed to 10 Mbps in cbs_set_port_rate The discussion to be made is absolutely the same as in the case of previous patch ("taprio: Set default link speed to 10 Mbps in taprio_set_picos_per_byte"). Nothing is lost when setting a default. Cc: Leandro Dorileo Fixes: e0a7683d30e9 ("net/sched: cbs: fix port_rate miscalculation") Acked-by: Vinicius Costa Gomes Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- net/sched/sch_cbs.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 732e109c3055..810645b5c086 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) s64 credits; int len; - if (atomic64_read(&q->port_rate) == -1) { - WARN_ONCE(1, "cbs: dequeue() called with unknown port rate."); - return NULL; - } - if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); @@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; + int speed = SPEED_10; int port_rate = -1; + int err; - if (!__ethtool_get_link_ksettings(dev, &ecmd) && - ecmd.base.speed != SPEED_UNKNOWN) - port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT; + err = __ethtool_get_link_ksettings(dev, &ecmd); + if (err < 0) + goto skip; + + if (ecmd.base.speed != SPEED_UNKNOWN) + speed = ecmd.base.speed; + +skip: + port_rate = speed * 1000 * BYTES_PER_KBIT; atomic64_set(&q->port_rate, port_rate); netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", From 6fdc060d7476ef73c8029b652d252c1a7b4de948 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 30 Aug 2019 12:08:30 +0000 Subject: [PATCH 48/58] net: aquantia: fix removal of vlan 0 Due to absence of checking against the rx flow rule when vlan 0 is being removed, the other rule could be removed instead of the rule with vlan 0 Fixes: 7975d2aff5afb ("net: aquantia: add support of rx-vlan-filter offload") Signed-off-by: Dmitry Bogdanov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_filters.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index 440690b18734..b13704544a23 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id) if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) break; } - if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { + if (rule && rule->type == aq_rx_filter_vlan && + be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { struct ethtool_rxnfc cmd; cmd.fs.location = rule->aq_fsp.location; From 392349f60110dc2c3daf86464fd926afc53d6143 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 30 Aug 2019 12:08:33 +0000 Subject: [PATCH 49/58] net: aquantia: fix limit of vlan filters Fix a limit condition of vlans on the interface before setting vlan promiscuous mode Fixes: 48dd73d08d4dd ("net: aquantia: fix vlans not working over bridged network") Signed-off-by: Dmitry Bogdanov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_filters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c index b13704544a23..aee827f07c16 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -844,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic) return err; if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { + if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) { err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, !(aq_nic->packet_filter & IFF_PROMISC)); aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; From c2ef057ee775e229d3138add59f937d93a3a59d8 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 30 Aug 2019 12:08:35 +0000 Subject: [PATCH 50/58] net: aquantia: reapply vlan filters on up In case of device reconfiguration the driver may reset the device invisible for other modules, vlan module in particular. So vlans will not be removed&created and vlan filters will not be configured in the device. The patch reapplies the vlan filters at device start. Fixes: 7975d2aff5afb ("net: aquantia: add support of rx-vlan-filter offload") Signed-off-by: Dmitry Bogdanov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 100722ad5c2d..b4a0fb281e69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev) if (err < 0) goto err_exit; + err = aq_filters_vlans_update(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); if (err < 0) goto err_exit; From 5c47e3ba6fe52465603cf9d816b3371e6881d649 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Fri, 30 Aug 2019 12:08:36 +0000 Subject: [PATCH 51/58] net: aquantia: linkstate irq should be oneshot Declaring threaded irq handler should also indicate the irq is oneshot. It is oneshot indeed, because HW implements irq automasking on trigger. Not declaring this causes some kernel configurations to fail on interface up, because request_threaded_irq returned an err code. The issue was originally hidden on normal x86_64 configuration with latest kernel, because depending on interrupt controller, irq driver added ONESHOT flag on its own. Issue was observed on older kernels (4.14) where no such logic exists. Fixes: 4c83f170b3ac ("net: aquantia: link status irq handling") Signed-off-by: Igor Russkikh Reported-by: Michael Symolkin Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index e1392766e21e..8f66e7817811 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self) self->aq_nic_cfg.link_irq_vec); err = request_threaded_irq(irqvec, NULL, aq_linkstate_threaded_isr, - IRQF_SHARED, + IRQF_SHARED | IRQF_ONESHOT, self->ndev->name, self); if (err < 0) goto err_exit; From be6cef69ba570ebb327eba1ef6438f7af49aaf86 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 30 Aug 2019 12:08:38 +0000 Subject: [PATCH 52/58] net: aquantia: fix out of memory condition on rx side On embedded environments with hard memory limits it is a normal although rare case when skb can't be allocated on rx part under high traffic. In such OOM cases napi_complete_done() was not called. So the napi object became in an invalid state like it is "scheduled". Kernel do not re-schedules the poll of that napi object. Consequently, kernel can not remove that object the system hangs on `ifconfig down` waiting for a poll. We are fixing this by gracefully closing napi poll routine with correct invocation of napi_complete_done. This was reproduced with artificially failing the allocation of skb to simulate an "out of memory" error case and check that traffic does not get stuck. Fixes: 970a2e9864b0 ("net: ethernet: aquantia: Vector operations") Signed-off-by: Igor Russkikh Signed-off-by: Dmitry Bogdanov Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 715685aa48c3..28892b8acd0e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } } +err_exit: if (!was_tx_cleaned) work_done = budget; @@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) 1U << self->aq_ring_param.vec_idx); } } -err_exit: + return work_done; } From de5eb9e00eba00783f49da37377bd2a93d38fe42 Mon Sep 17 00:00:00 2001 From: Razvan Stefanescu Date: Fri, 30 Aug 2019 10:52:01 +0300 Subject: [PATCH 53/58] dt-bindings: net: dsa: document additional Microchip KSZ8563 switch It is a 3-Port 10/100 Ethernet Switch with 1588v2 PTP. Signed-off-by: Razvan Stefanescu Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/dsa/ksz.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index 4ac21cef370e..113e7ac79aad 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -12,6 +12,7 @@ Required properties: - "microchip,ksz8565" - "microchip,ksz9893" - "microchip,ksz9563" + - "microchip,ksz8563" Optional properties: From d9033ae95cf445150fcc5856ccf024f41f0bd0b9 Mon Sep 17 00:00:00 2001 From: Razvan Stefanescu Date: Fri, 30 Aug 2019 10:52:02 +0300 Subject: [PATCH 54/58] net: dsa: microchip: add KSZ8563 compatibility string It is a 3-Port 10/100 Ethernet Switch with 1588v2 PTP. Signed-off-by: Razvan Stefanescu Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz9477_spi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 5a9e27b337a8..098b01e4ed1a 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9897" }, { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9563" }, + { .compatible = "microchip,ksz8563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); From 02a3f0d5a70a865d55c4b7cb2e327cb30491f7fd Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Fri, 30 Aug 2019 18:51:47 +0200 Subject: [PATCH 55/58] tc-testing: don't hardcode 'ip' in nsPlugin.py the following tdc test fails on Fedora: # ./tdc.py -e 2638 -- ns/SubPlugin.__init__ Test 2638: Add matchall and try to get it -----> prepare stage *** Could not execute: "$TC qdisc add dev $DEV1 clsact" -----> prepare stage *** Error message: "/bin/sh: ip: command not found" returncode 127; expected [0] -----> prepare stage *** Aborting test run. Let nsPlugin.py use the 'IP' variable introduced with commit 92c1a19e2fb9 ("tc-tests: added path to ip command in tdc"), so that the path to 'ip' is correctly resolved to the value we have in tdc_config.py. # ./tdc.py -e 2638 -- ns/SubPlugin.__init__ Test 2638: Add matchall and try to get it All test results: 1..1 ok 1 2638 - Add matchall and try to get it Fixes: 489ce2f42514 ("tc-testing: Restore original behaviour for namespaces in tdc") Reported-by: Hangbin Liu Signed-off-by: Davide Caratti Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller --- .../tc-testing/plugin-lib/nsPlugin.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py index affa7f2d9670..9539cffa9e5e 100644 --- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py +++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py @@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin): cmdlist.insert(0, self.args.NAMES['NS']) cmdlist.insert(0, 'exec') cmdlist.insert(0, 'netns') - cmdlist.insert(0, 'ip') + cmdlist.insert(0, self.args.NAMES['IP']) else: pass @@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin): return command def _ports_create(self): - cmd = 'ip link add $DEV0 type veth peer name $DEV1' + cmd = '$IP link add $DEV0 type veth peer name $DEV1' self._exec_cmd('pre', cmd) - cmd = 'ip link set $DEV0 up' + cmd = '$IP link set $DEV0 up' self._exec_cmd('pre', cmd) if not self.args.namespace: - cmd = 'ip link set $DEV1 up' + cmd = '$IP link set $DEV1 up' self._exec_cmd('pre', cmd) def _ports_destroy(self): - cmd = 'ip link del $DEV0' + cmd = '$IP link del $DEV0' self._exec_cmd('post', cmd) def _ns_create(self): @@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin): ''' self._ports_create() if self.args.namespace: - cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) + cmd = '$IP netns add {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) + cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) + cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) if self.args.device: - cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) + cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) - cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) + cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) self._exec_cmd('pre', cmd) def _ns_destroy(self): @@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin): devices as well) ''' if self.args.namespace: - cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) + cmd = '$IP netns delete {}'.format(self.args.NAMES['NS']) self._exec_cmd('post', cmd) def _exec_cmd(self, stage, command): From dd1bf47a84fe64edf50f6a83fdc164d0da6fa021 Mon Sep 17 00:00:00 2001 From: "Ryan M. Collins" Date: Fri, 30 Aug 2019 14:49:55 -0400 Subject: [PATCH 56/58] net: bcmgenet: use ethtool_op_get_ts_info() This change enables the use of SW timestamping on the Raspberry Pi 4. bcmgenet's transmit function bcmgenet_xmit() implements software timestamping. However the SOF_TIMESTAMPING_TX_SOFTWARE capability was missing and only SOF_TIMESTAMPING_RX_SOFTWARE was announced. By using ethtool_ops bcmgenet_ethtool_ops() as get_ts_info(), the SOF_TIMESTAMPING_TX_SOFTWARE capability is announced. Similar to commit a8f5cb9e7991 ("smsc95xx: use ethtool_op_get_ts_info()") Signed-off-by: Ryan M. Collins Acked-by: Florian Fainelli Acked-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index d3a0b614dbfa..b22196880d6d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .set_coalesce = bcmgenet_set_coalesce, .get_link_ksettings = bcmgenet_get_link_ksettings, .set_link_ksettings = bcmgenet_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, }; /* Power down the unimac, based on mode. */ From dd7078f05e1b774a9e8c9f117101d97e4ccd0691 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 30 Aug 2019 22:23:12 +0200 Subject: [PATCH 57/58] enetc: Add missing call to 'pci_free_irq_vectors()' in probe and remove functions Call to 'pci_free_irq_vectors()' are missing both in the error handling path of the probe function, and in the remove function. Add them. Fixes: 19971f5ea0ab ("enetc: add PTP clock driver") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc_ptp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index 2fd2586e42bf..bc594892507a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev, n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); if (n != 1) { err = -EPERM; - goto err_irq; + goto err_irq_vectors; } ptp_qoriq->irq = pci_irq_vector(pdev, 0); @@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, err_no_clock: free_irq(ptp_qoriq->irq, ptp_qoriq); err_irq: + pci_free_irq_vectors(pdev); +err_irq_vectors: iounmap(base); err_ioremap: kfree(ptp_qoriq); @@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev) enetc_phc_index = -1; ptp_qoriq_free(ptp_qoriq); + pci_free_irq_vectors(pdev); kfree(ptp_qoriq); pci_release_mem_regions(pdev); From e1e54ec7fb55501c33b117c111cb0a045b8eded2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 31 Aug 2019 09:17:51 +0200 Subject: [PATCH 58/58] net: seeq: Fix the function used to release some memory in an error handling path In commit 99cd149efe82 ("sgiseeq: replace use of dma_cache_wback_inv"), a call to 'get_zeroed_page()' has been turned into a call to 'dma_alloc_coherent()'. Only the remove function has been updated to turn the corresponding 'free_page()' into 'dma_free_attrs()'. The error hndling path of the probe function has not been updated. Fix it now. Rename the corresponding label to something more in line. Fixes: 99cd149efe82 ("sgiseeq: replace use of dma_cache_wback_inv") Signed-off-by: Christophe JAILLET Reviewed-by: Thomas Bogendoerfer Signed-off-by: David S. Miller --- drivers/net/ethernet/seeq/sgiseeq.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 7a5e6c5abb57..276c7cae7cee 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev) printk(KERN_ERR "Sgiseeq: Cannot register net device, " "aborting.\n"); err = -ENODEV; - goto err_out_free_page; + goto err_out_free_attrs; } printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); return 0; -err_out_free_page: - free_page((unsigned long) sp->srings); +err_out_free_attrs: + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); err_out_free_dev: free_netdev(dev);