|
@@ -171,9 +171,6 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
|
|
|
*/
|
|
|
|
|
|
/* spin lock wrappers. */
|
|
|
-#define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
|
|
|
-#define sctp_spin_unlock_irqrestore(lock, flags) \
|
|
|
- spin_unlock_irqrestore(lock, flags)
|
|
|
#define sctp_local_bh_disable() local_bh_disable()
|
|
|
#define sctp_local_bh_enable() local_bh_enable()
|
|
|
#define sctp_spin_lock(lock) spin_lock(lock)
|
|
@@ -353,13 +350,13 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- sctp_spin_lock_irqsave(&head->lock, flags);
|
|
|
+ spin_lock_irqsave(&head->lock, flags);
|
|
|
sctp_spin_lock(&list->lock);
|
|
|
|
|
|
skb_queue_splice_tail_init(list, head);
|
|
|
|
|
|
sctp_spin_unlock(&list->lock);
|
|
|
- sctp_spin_unlock_irqrestore(&head->lock, flags);
|
|
|
+ spin_unlock_irqrestore(&head->lock, flags);
|
|
|
}
|
|
|
|
|
|
/**
|