mirror of https://gitee.com/openkylin/linux.git
Merge branch 'sctp-fully-support-memory-accounting'
Xin Long says: ==================== sctp: fully support memory accounting sctp memory accounting is added in this patchset by using these kernel APIs on send side: - sk_mem_charge() - sk_mem_uncharge() - sk_wmem_schedule() - sk_under_memory_pressure() - sk_mem_reclaim() and these on receive side: - sk_mem_charge() - sk_mem_uncharge() - sk_rmem_schedule() - sk_under_memory_pressure() - sk_mem_reclaim() With sctp memory accounting, we can limit the memory allocation by either sysctl: # sysctl -w net.sctp.sctp_mem="10 20 50" or cgroup: # echo $((8<<14)) > \ /sys/fs/cgroup/memory/sctp_mem/memory.kmem.tcp.limit_in_bytes When the socket is under memory pressure, the send side will block and wait, while the receive side will renege or drop. v1->v2: - add the missing Reported/Tested/Acked/-bys. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c7cf89b5dd
|
@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
|||
/*
|
||||
* This mimics the behavior of skb_set_owner_r
|
||||
*/
|
||||
sk->sk_forward_alloc -= event->rmem_len;
|
||||
sk_mem_charge(sk, event->rmem_len);
|
||||
}
|
||||
|
||||
/* Tests if the list has one and only one entry. */
|
||||
|
|
|
@ -6412,13 +6412,15 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
|
||||
* memory usage too much
|
||||
*/
|
||||
if (*sk->sk_prot_creator->memory_pressure) {
|
||||
if (sk_under_memory_pressure(sk)) {
|
||||
if (sctp_tsnmap_has_gap(map) &&
|
||||
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
|
||||
pr_debug("%s: under pressure, reneging for tsn:%u\n",
|
||||
__func__, tsn);
|
||||
deliver = SCTP_CMD_RENEGE;
|
||||
}
|
||||
} else {
|
||||
sk_mem_reclaim(sk);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1913,7 +1913,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
|
|||
if (sctp_wspace(asoc) < (int)msg_len)
|
||||
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
|
||||
|
||||
if (sctp_wspace(asoc) <= 0) {
|
||||
if (sk_under_memory_pressure(sk))
|
||||
sk_mem_reclaim(sk);
|
||||
|
||||
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
||||
if (err)
|
||||
|
@ -8930,7 +8933,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|||
goto do_error;
|
||||
if (signal_pending(current))
|
||||
goto do_interrupted;
|
||||
if ((int)msg_len <= sctp_wspace(asoc))
|
||||
if (sk_under_memory_pressure(sk))
|
||||
sk_mem_reclaim(sk);
|
||||
if ((int)msg_len <= sctp_wspace(asoc) &&
|
||||
sk_wmem_schedule(sk, msg_len))
|
||||
break;
|
||||
|
||||
/* Let another process have a go. Since we are going
|
||||
|
|
|
@ -634,8 +634,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
gfp_t gfp)
|
||||
{
|
||||
struct sctp_ulpevent *event = NULL;
|
||||
struct sk_buff *skb;
|
||||
size_t padding, len;
|
||||
struct sk_buff *skb = chunk->skb;
|
||||
struct sock *sk = asoc->base.sk;
|
||||
size_t padding, datalen;
|
||||
int rx_count;
|
||||
|
||||
/*
|
||||
|
@ -646,15 +647,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
if (asoc->ep->rcvbuf_policy)
|
||||
rx_count = atomic_read(&asoc->rmem_alloc);
|
||||
else
|
||||
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
|
||||
rx_count = atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
|
||||
datalen = ntohs(chunk->chunk_hdr->length);
|
||||
|
||||
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
|
||||
(!sk_rmem_schedule(asoc->base.sk, chunk->skb,
|
||||
chunk->skb->truesize)))
|
||||
goto fail;
|
||||
}
|
||||
if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
|
||||
goto fail;
|
||||
|
||||
/* Clone the original skb, sharing the data. */
|
||||
skb = skb_clone(chunk->skb, gfp);
|
||||
|
@ -681,8 +679,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
|||
* The sender should never pad with more than 3 bytes. The receiver
|
||||
* MUST ignore the padding bytes.
|
||||
*/
|
||||
len = ntohs(chunk->chunk_hdr->length);
|
||||
padding = SCTP_PAD4(len) - len;
|
||||
padding = SCTP_PAD4(datalen) - datalen;
|
||||
|
||||
/* Fixup cloned skb with just this chunks data. */
|
||||
skb_trim(skb, chunk->chunk_end - padding - skb->data);
|
||||
|
|
|
@ -1104,7 +1104,8 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
|||
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
|
||||
}
|
||||
/* If able to free enough room, accept this chunk. */
|
||||
if (freed >= needed) {
|
||||
if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
|
||||
freed >= needed) {
|
||||
int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
||||
/*
|
||||
* Enter partial delivery if chunk has not been
|
||||
|
|
Loading…
Reference in New Issue