|
@@ -1319,7 +1319,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
|
struct audit_buffer *ab = NULL;
|
|
struct audit_buffer *ab = NULL;
|
|
struct timespec t;
|
|
struct timespec t;
|
|
unsigned int uninitialized_var(serial);
|
|
unsigned int uninitialized_var(serial);
|
|
- int reserve;
|
|
|
|
|
|
+ int reserve = 5; /* Allow atomic callers to go up to five
|
|
|
|
+ entries over the normal backlog limit */
|
|
unsigned long timeout_start = jiffies;
|
|
unsigned long timeout_start = jiffies;
|
|
|
|
|
|
if (audit_initialized != AUDIT_INITIALIZED)
|
|
if (audit_initialized != AUDIT_INITIALIZED)
|
|
@@ -1328,11 +1329,12 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
|
if (unlikely(audit_filter_type(type)))
|
|
if (unlikely(audit_filter_type(type)))
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- if (gfp_mask & __GFP_WAIT)
|
|
|
|
- reserve = 0;
|
|
|
|
- else
|
|
|
|
- reserve = 5; /* Allow atomic callers to go up to five
|
|
|
|
- entries over the normal backlog limit */
|
|
|
|
|
|
+ if (gfp_mask & __GFP_WAIT) {
|
|
|
|
+ if (audit_pid && audit_pid == current->pid)
|
|
|
|
+ gfp_mask &= ~__GFP_WAIT;
|
|
|
|
+ else
|
|
|
|
+ reserve = 0;
|
|
|
|
+ }
|
|
|
|
|
|
while (audit_backlog_limit
|
|
while (audit_backlog_limit
|
|
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
|
|
&& skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
|