[kernel] r18011 - dists/squeeze-security/linux-2.6/debian/patches/bugfix/all

Dann Frazier dannf at alioth.debian.org
Fri Aug 26 23:06:26 UTC 2011


Author: dannf
Date: Fri Aug 26 23:06:25 2011
New Revision: 18011

Log:
Remove remaining 'nmi' reference, additional fix of CVE-2011-2918

Modified:
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/perf-remove-the-nmi-parameter-from-the-swevent-and-overflow-interface.patch

Modified: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/perf-remove-the-nmi-parameter-from-the-swevent-and-overflow-interface.patch
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/perf-remove-the-nmi-parameter-from-the-swevent-and-overflow-interface.patch	Fri Aug 26 03:51:39 2011	(r18010)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/perf-remove-the-nmi-parameter-from-the-swevent-and-overflow-interface.patch	Fri Aug 26 23:06:25 2011	(r18011)
@@ -319,7 +319,7 @@
  
  static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
-index fc5ab8e..62450ac 100644
+index fc5ab8e..a7e1d8c 100644
 --- a/kernel/perf_event.c
 +++ b/kernel/perf_event.c
 @@ -1157,7 +1157,7 @@ void perf_event_task_sched_out(struct task_struct *task,
@@ -331,7 +331,23 @@
  
  	if (likely(!ctx || !cpuctx->task_ctx))
  		return;
-@@ -2773,7 +2773,7 @@ void perf_output_copy(struct perf_output_handle *handle,
+@@ -2647,12 +2647,9 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
+ {
+ 	atomic_set(&handle->data->poll, POLL_IN);
+ 
+-	if (handle->nmi) {
+-		handle->event->pending_wakeup = 1;
+-		perf_pending_queue(&handle->event->pending,
+-				   perf_pending_event);
+-	} else
+-		perf_event_wakeup(handle->event);
++	handle->event->pending_wakeup = 1;
++	perf_pending_queue(&handle->event->pending,
++			   perf_pending_event);
+ }
+ 
+ /*
+@@ -2773,7 +2770,7 @@ void perf_output_copy(struct perf_output_handle *handle,
  
  int perf_output_begin(struct perf_output_handle *handle,
  		      struct perf_event *event, unsigned int size,
@@ -340,7 +356,7 @@
  {
  	struct perf_event *output_event;
  	struct perf_mmap_data *data;
-@@ -2802,7 +2802,6 @@ int perf_output_begin(struct perf_output_handle *handle,
+@@ -2802,7 +2799,6 @@ int perf_output_begin(struct perf_output_handle *handle,
  
  	handle->data	= data;
  	handle->event	= event;
@@ -348,7 +364,7 @@
  	handle->sample	= sample;
  
  	if (!data->nr_pages)
-@@ -3126,7 +3125,7 @@ void perf_prepare_sample(struct perf_event_header *header,
+@@ -3126,7 +3122,7 @@ void perf_prepare_sample(struct perf_event_header *header,
  	}
  }
  
@@ -357,7 +373,7 @@
  				struct perf_sample_data *data,
  				struct pt_regs *regs)
  {
-@@ -3135,7 +3134,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
+@@ -3135,7 +3131,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
  
  	perf_prepare_sample(&header, data, event, regs);
  
@@ -366,7 +382,7 @@
  		return;
  
  	perf_output_sample(&handle, &header, data, event);
-@@ -3170,7 +3169,7 @@ perf_event_read_event(struct perf_event *event,
+@@ -3170,7 +3166,7 @@ perf_event_read_event(struct perf_event *event,
  	};
  	int ret;
  
@@ -375,7 +391,7 @@
  	if (ret)
  		return;
  
-@@ -3210,7 +3209,7 @@ static void perf_event_task_output(struct perf_event *event,
+@@ -3210,7 +3206,7 @@ static void perf_event_task_output(struct perf_event *event,
  	int ret;
  
  	size  = task_event->event_id.header.size;
@@ -384,7 +400,7 @@
  
  	if (ret)
  		return;
-@@ -3332,7 +3331,7 @@ static void perf_event_comm_output(struct perf_event *event,
+@@ -3332,7 +3328,7 @@ static void perf_event_comm_output(struct perf_event *event,
  {
  	struct perf_output_handle handle;
  	int size = comm_event->event_id.header.size;
@@ -393,7 +409,7 @@
  
  	if (ret)
  		return;
-@@ -3461,7 +3460,7 @@ static void perf_event_mmap_output(struct perf_event *event,
+@@ -3461,7 +3457,7 @@ static void perf_event_mmap_output(struct perf_event *event,
  {
  	struct perf_output_handle handle;
  	int size = mmap_event->event_id.header.size;
@@ -402,7 +418,7 @@
  
  	if (ret)
  		return;
-@@ -3632,7 +3631,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
+@@ -3632,7 +3628,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
  	if (enable)
  		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  
@@ -411,7 +427,7 @@
  	if (ret)
  		return;
  
-@@ -3644,7 +3643,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
+@@ -3644,7 +3640,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
   * Generic event overflow handling, sampling.
   */
  
@@ -420,7 +436,7 @@
  				   int throttle, struct perf_sample_data *data,
  				   struct pt_regs *regs)
  {
-@@ -3694,23 +3693,20 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
+@@ -3694,23 +3690,20 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
  	if (events && atomic_dec_and_test(&event->event_limit)) {
  		ret = 1;
  		event->pending_kill = POLL_HUP;
@@ -450,7 +466,7 @@
  }
  
  /*
-@@ -3748,7 +3744,7 @@ again:
+@@ -3748,7 +3741,7 @@ again:
  }
  
  static void perf_swevent_overflow(struct perf_event *event,
@@ -459,7 +475,7 @@
  				    struct pt_regs *regs)
  {
  	struct hw_perf_event *hwc = &event->hw;
-@@ -3762,7 +3758,7 @@ static void perf_swevent_overflow(struct perf_event *event,
+@@ -3762,7 +3755,7 @@ static void perf_swevent_overflow(struct perf_event *event,
  		return;
  
  	for (; overflow; overflow--) {
@@ -468,7 +484,7 @@
  					    data, regs)) {
  			/*
  			 * We inhibit the overflow from happening when
-@@ -3782,7 +3778,7 @@ static void perf_swevent_unthrottle(struct perf_event *event)
+@@ -3782,7 +3775,7 @@ static void perf_swevent_unthrottle(struct perf_event *event)
  }
  
  static void perf_swevent_add(struct perf_event *event, u64 nr,
@@ -477,7 +493,7 @@
  			       struct pt_regs *regs)
  {
  	struct hw_perf_event *hwc = &event->hw;
-@@ -3796,7 +3792,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
+@@ -3796,7 +3789,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
  		return;
  
  	if (!atomic64_add_negative(nr, &hwc->period_left))
@@ -486,7 +502,7 @@
  }
  
  static int perf_swevent_is_counting(struct perf_event *event)
-@@ -3857,7 +3853,7 @@ static int perf_swevent_match(struct perf_event *event,
+@@ -3857,7 +3850,7 @@ static int perf_swevent_match(struct perf_event *event,
  
  static void perf_swevent_ctx_event(struct perf_event_context *ctx,
  				     enum perf_type_id type,
@@ -495,7 +511,7 @@
  				     struct perf_sample_data *data,
  				     struct pt_regs *regs)
  {
-@@ -3869,7 +3865,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
+@@ -3869,7 +3862,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
  	rcu_read_lock();
  	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
  		if (perf_swevent_match(event, type, event_id, regs))
@@ -504,7 +520,7 @@
  	}
  	rcu_read_unlock();
  }
-@@ -3889,7 +3885,7 @@ static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
+@@ -3889,7 +3882,7 @@ static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
  }
  
  static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
@@ -513,7 +529,7 @@
  				    struct perf_sample_data *data,
  				    struct pt_regs *regs)
  {
-@@ -3904,7 +3900,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
+@@ -3904,7 +3897,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  	barrier();
  
  	perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
@@ -522,7 +538,7 @@
  	rcu_read_lock();
  	/*
  	 * doesn't really matter which of the child contexts the
-@@ -3912,7 +3908,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
+@@ -3912,7 +3905,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
  	 */
  	ctx = rcu_dereference(current->perf_event_ctxp);
  	if (ctx)
@@ -531,7 +547,7 @@
  	rcu_read_unlock();
  
  	barrier();
-@@ -3922,14 +3918,14 @@ out:
+@@ -3922,14 +3915,14 @@ out:
  	put_cpu_var(perf_cpu_context);
  }
  
@@ -548,7 +564,7 @@
  				&data, regs);
  }
  
-@@ -3987,7 +3983,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+@@ -3987,7 +3980,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
  
  	if (regs) {
  		if (!(event->attr.exclude_idle && current->pid == 0))
@@ -557,7 +573,7 @@
  				ret = HRTIMER_NORESTART;
  	}
  
-@@ -4153,7 +4149,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
+@@ -4153,7 +4146,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
  	if (!regs)
  		regs = task_pt_regs(current);
  



More information about the Kernel-svn-changes mailing list