sctp: implement memory accounting on rx path
sk_forward_alloc's updating is also done on rx path, but to be consistent we change to use sk_mem_charge() in sctp_skb_set_owner_r(). In sctp_eat_data(), it's not enough to check sctp_memory_pressure only, which doesn't work for mem_cgroup_sockets_enabled, so we change to use sk_under_memory_pressure(). When it's under memory pressure, sk_mem_reclaim() and sk_rmem_schedule() should be called on both RENEGE or CHUNK DELIVERY path exit the memory pressure status as soon as possible. Note that sk_rmem_schedule() is using datalen to make things easy there. Reported-by: Matteo Croce <mcroce@redhat.com> Tested-by: Matteo Croce <mcroce@redhat.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1033990ac5
commit
9dde27de3e
|
@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
|||
/*
|
||||
* This mimics the behavior of skb_set_owner_r
|
||||
*/
|
||||
sk->sk_forward_alloc -= event->rmem_len;
|
||||
sk_mem_charge(sk, event->rmem_len);
|
||||
}
|
||||
|
||||
/* Tests if the list has one and only one entry. */
|
||||
|
|
|
@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
|
||||
* memory usage too much
|
||||
*/
|
||||
if (*sk->sk_prot_creator->memory_pressure) {
|
||||
if (sk_under_memory_pressure(sk)) {
|
||||
if (sctp_tsnmap_has_gap(map) &&
|
||||
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
|
||||
pr_debug("%s: under pressure, reneging for tsn:%u\n",
|
||||
__func__, tsn);
|
||||
deliver = SCTP_CMD_RENEGE;
|
||||
}
|
||||
} else {
|
||||
sk_mem_reclaim(sk);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
gfp_t gfp)
|
||||
{
|
||||
struct sctp_ulpevent *event = NULL;
|
||||
struct sk_buff *skb;
|
||||
size_t padding, len;
|
||||
struct sk_buff *skb = chunk->skb;
|
||||
struct sock *sk = asoc->base.sk;
|
||||
size_t padding, datalen;
|
||||
int rx_count;
|
||||
|
||||
/*
|
||||
|
@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
if (asoc->ep->rcvbuf_policy)
|
||||
rx_count = atomic_read(&asoc->rmem_alloc);
|
||||
else
|
||||
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
|
||||
rx_count = atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
|
||||
datalen = ntohs(chunk->chunk_hdr->length);
|
||||
|
||||
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
|
||||
(!sk_rmem_schedule(asoc->base.sk, chunk->skb,
|
||||
chunk->skb->truesize)))
|
||||
goto fail;
|
||||
}
|
||||
if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
|
||||
goto fail;
|
||||
|
||||
/* Clone the original skb, sharing the data. */
|
||||
skb = skb_clone(chunk->skb, gfp);
|
||||
|
@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
* The sender should never pad with more than 3 bytes. The receiver
|
||||
* MUST ignore the padding bytes.
|
||||
*/
|
||||
len = ntohs(chunk->chunk_hdr->length);
|
||||
padding = SCTP_PAD4(len) - len;
|
||||
padding = SCTP_PAD4(datalen) - datalen;
|
||||
|
||||
/* Fixup cloned skb with just this chunks data. */
|
||||
skb_trim(skb, chunk->chunk_end - padding - skb->data);
|
||||
|
|
|
@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|||
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
|
||||
}
|
||||
/* If able to free enough room, accept this chunk. */
|
||||
if (freed >= needed) {
|
||||
if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
|
||||
freed >= needed) {
|
||||
int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
||||
/*
|
||||
* Enter partial delivery if chunk has not been
|
||||
|
|
Loading…
Reference in New Issue