|
@@ -194,6 +194,7 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
|
|
|
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
{
|
|
|
struct sock *sk = ulpq->asoc->base.sk;
|
|
|
+ struct sctp_sock *sp = sctp_sk(sk);
|
|
|
struct sk_buff_head *queue, *skb_list;
|
|
|
struct sk_buff *skb = sctp_event2skb(event);
|
|
|
int clear_pd = 0;
|
|
@@ -211,7 +212,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
sk_incoming_cpu_update(sk);
|
|
|
}
|
|
|
/* Check if the user wishes to receive this event. */
|
|
|
- if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
|
|
|
+ if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
|
|
|
goto out_free;
|
|
|
|
|
|
/* If we are in partial delivery mode, post to the lobby until
|
|
@@ -219,7 +220,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
* the association the cause of the partial delivery.
|
|
|
*/
|
|
|
|
|
|
- if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
|
|
|
+ if (atomic_read(&sp->pd_mode) == 0) {
|
|
|
queue = &sk->sk_receive_queue;
|
|
|
} else {
|
|
|
if (ulpq->pd_mode) {
|
|
@@ -231,7 +232,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
if ((event->msg_flags & MSG_NOTIFICATION) ||
|
|
|
(SCTP_DATA_NOT_FRAG ==
|
|
|
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
|
|
|
- queue = &sctp_sk(sk)->pd_lobby;
|
|
|
+ queue = &sp->pd_lobby;
|
|
|
else {
|
|
|
clear_pd = event->msg_flags & MSG_EOR;
|
|
|
queue = &sk->sk_receive_queue;
|
|
@@ -242,10 +243,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
* can queue this to the receive queue instead
|
|
|
* of the lobby.
|
|
|
*/
|
|
|
- if (sctp_sk(sk)->frag_interleave)
|
|
|
+ if (sp->frag_interleave)
|
|
|
queue = &sk->sk_receive_queue;
|
|
|
else
|
|
|
- queue = &sctp_sk(sk)->pd_lobby;
|
|
|
+ queue = &sp->pd_lobby;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -264,8 +265,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
|
|
|
if (clear_pd)
|
|
|
sctp_ulpq_clear_pd(ulpq);
|
|
|
|
|
|
- if (queue == &sk->sk_receive_queue)
|
|
|
- sctp_sk(sk)->pending_data_ready = 1;
|
|
|
+ if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
|
|
|
+ sp->data_ready_signalled = 1;
|
|
|
+ sk->sk_data_ready(sk);
|
|
|
+ }
|
|
|
return 1;
|
|
|
|
|
|
out_free:
|
|
@@ -1126,11 +1129,13 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
|
|
|
{
|
|
|
struct sctp_ulpevent *ev = NULL;
|
|
|
struct sock *sk;
|
|
|
+ struct sctp_sock *sp;
|
|
|
|
|
|
if (!ulpq->pd_mode)
|
|
|
return;
|
|
|
|
|
|
sk = ulpq->asoc->base.sk;
|
|
|
+ sp = sctp_sk(sk);
|
|
|
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
|
|
|
&sctp_sk(sk)->subscribe))
|
|
|
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
|
|
@@ -1140,6 +1145,8 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
|
|
|
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
|
|
|
|
|
|
/* If there is data waiting, send it up the socket now. */
|
|
|
- if (sctp_ulpq_clear_pd(ulpq) || ev)
|
|
|
- sctp_sk(sk)->pending_data_ready = 1;
|
|
|
+ if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
|
|
|
+ sp->data_ready_signalled = 1;
|
|
|
+ sk->sk_data_ready(sk);
|
|
|
+ }
|
|
|
}
|