# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 899f7b4b19fc364a83650b1e2466393a7bb08ba6
# Parent 349b302f29e2205c44df532e7826fc0cb7c17356
Upgrade tree to 2.6.12.6.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 349b302f29e2 -r 899f7b4b19fc patches/linux-2.6.12/2.6.12.6.patch
--- /dev/null Fri Oct 21 09:24:35 2005
+++ b/patches/linux-2.6.12/2.6.12.6.patch Fri Oct 21 09:46:30 2005
@@ -0,0 +1,1738 @@
+diff --git a/Makefile b/Makefile
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 12
+-EXTRAVERSION =
++EXTRAVERSION = .6
+ NAME=Woozy Numbat
+
+ # *DOCUMENTATION*
+@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
+ #(which is the most common case IMHO) to avoid unneeded clutter in the big
tags file.
+ #Adding $(srctree) adds about 20M on i386 to the size of the output file!
+
+-ifeq ($(KBUILD_OUTPUT),)
++ifeq ($(src),$(obj))
+ __srctree =
+ else
+ __srctree = $(srctree)/
+diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
++++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+@@ -44,7 +44,7 @@
+
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 1.40.2"
++#define VERSION "version 1.40.4"
+ #include "powernow-k8.h"
+
+ /* serialize freq changes */
+@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
+ {
+ struct powernow_k8_data *data;
+ cpumask_t oldmask = CPU_MASK_ALL;
+- int rc;
++ int rc, i;
+
+ if (!check_supported_cpu(pol->cpu))
+ return -ENODEV;
+@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
+ printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+- powernow_data[pol->cpu] = data;
++ for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
++ powernow_data[i] = data;
++ }
+
+ return 0;
+
+diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
+--- a/arch/i386/kernel/process.c
++++ b/arch/i386/kernel/process.c
+@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
++ memset(&info, 0, sizeof(info));
++
+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ info.entry_number = idx;
+diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
+@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
+ *data = (pt->cr_ipsr & IPSR_MASK);
+ return 0;
+
++ case PT_AR_RSC:
++ if (write_access)
++ pt->ar_rsc = *data | (3 << 2); /* force PL3 */
++ else
++ *data = pt->ar_rsc;
++ return 0;
++
+ case PT_AR_RNAT:
+ urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+ rnat_addr = (long) ia64_rse_rnat_addr((long *)
+@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
+ case PT_AR_BSPSTORE:
+ ptr = pt_reg_addr(pt, ar_bspstore);
+ break;
+- case PT_AR_RSC:
+- ptr = pt_reg_addr(pt, ar_rsc);
+- break;
+ case PT_AR_UNAT:
+ ptr = pt_reg_addr(pt, ar_unat);
+ break;
+@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
+ static long
+ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user
*ppr)
+ {
+- unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
++ unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
+ struct unw_frame_info info;
+ struct switch_stack *sw;
+ struct ia64_fpreg fpval;
+@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
+ /* app regs */
+
+ retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+- retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
++ retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
+ retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+ retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+ retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
+ retval |= __get_user(nat_bits, &ppr->nat);
+
+ retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
++ retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
+ retval |= access_uarea(child, PT_AR_EC, &ec, 1);
+ retval |= access_uarea(child, PT_AR_LC, &lc, 1);
+ retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
+ static long
+ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
+ {
+- unsigned long ip, flags, nat, um, cfm;
++ unsigned long ip, flags, nat, um, cfm, rsc;
+ long err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
+ err |= __get_user(ip, &sc->sc_ip); /* instruction
pointer */
+ err |= __get_user(cfm, &sc->sc_cfm);
+ err |= __get_user(um, &sc->sc_um); /* user mask */
+- err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
++ err |= __get_user(rsc, &sc->sc_ar_rsc);
+ err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
+ err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
+ err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
+ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15
*/
+
+ scr->pt.cr_ifs = cfm | (1UL << 63);
++ scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
+
+ /* establish new instruction pointer: */
+ scr->pt.cr_iip = ip & ~0x3UL;
+diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
+--- a/arch/ppc/kernel/time.c
++++ b/arch/ppc/kernel/time.c
+@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
+
+ extern unsigned long wall_jiffies;
+
++/* used for timezone offset */
++static long timezone_offset;
++
+ DEFINE_SPINLOCK(rtc_lock);
+
+ EXPORT_SYMBOL(rtc_lock);
+@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
+ xtime.tv_sec - last_rtc_update >= 659 &&
+ abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) <
500000/HZ &&
+ jiffies - wall_jiffies == 1) {
+- if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset)
== 0)
++ if (ppc_md.set_rtc_time(xtime.tv_sec+1 +
timezone_offset) == 0)
+ last_rtc_update = xtime.tv_sec+1;
+ else
+ /* Try again one minute later */
+@@ -286,7 +289,7 @@ void __init time_init(void)
+ unsigned old_stamp, stamp, elapsed;
+
+ if (ppc_md.time_init != NULL)
+- time_offset = ppc_md.time_init();
++ timezone_offset = ppc_md.time_init();
+
+ if (__USE_RTC()) {
+ /* 601 processor: dec counts down by 128 every 128ns */
+@@ -331,10 +334,10 @@ void __init time_init(void)
+ set_dec(tb_ticks_per_jiffy);
+
+ /* If platform provided a timezone (pmac), we correct the time */
+- if (time_offset) {
+- sys_tz.tz_minuteswest = -time_offset / 60;
++ if (timezone_offset) {
++ sys_tz.tz_minuteswest = -timezone_offset / 60;
+ sys_tz.tz_dsttime = 0;
+- xtime.tv_sec -= time_offset;
++ xtime.tv_sec -= timezone_offset;
+ }
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
+--- a/arch/ppc64/boot/zlib.c
++++ b/arch/ppc64/boot/zlib.c
+@@ -1307,7 +1307,7 @@ local int huft_build(
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+- return Z_OK;
++ return Z_DATA_ERROR;
+ }
+
+
+@@ -1351,6 +1351,7 @@ local int huft_build(
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
++ n = x[g]; /* set n to length of v */
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
+ return(arg.pid);
+ }
+
+-static int ptrace_child(void)
++static int ptrace_child(void *arg)
+ {
+ int ret;
+ int pid = os_getpid(), ppid = getppid();
+@@ -159,16 +159,20 @@ static int ptrace_child(void)
+ _exit(ret);
+ }
+
+-static int start_ptraced_child(void)
++static int start_ptraced_child(void **stack_out)
+ {
++ void *stack;
++ unsigned long sp;
+ int pid, n, status;
+
+- pid = fork();
+- if(pid == 0)
+- ptrace_child();
+-
++ stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
++ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
++ if(stack == MAP_FAILED)
++ panic("check_ptrace : mmap failed, errno = %d", errno);
++ sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
++ pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
+ if(pid < 0)
+- panic("check_ptrace : fork failed, errno = %d", errno);
++ panic("check_ptrace : clone failed, errno = %d", errno);
+ CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ if(n < 0)
+ panic("check_ptrace : wait failed, errno = %d", errno);
+@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
+ panic("check_ptrace : expected SIGSTOP, got status = %d",
+ status);
+
++ *stack_out = stack;
+ return(pid);
+ }
+
+@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
+ * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
+ * So only for SYSEMU features we test mustpanic, while normal host features
+ * must work anyway!*/
+-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
++static int stop_ptraced_child(int pid, void *stack, int exitcode, int
mustpanic)
+ {
+ int status, n, ret = 0;
+
+ if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
+- panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
++ panic("check_ptrace : ptrace failed, errno = %d", errno);
+ CATCH_EINTR(n = waitpid(pid, &status, 0));
+ if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
+ int exit_with = WEXITSTATUS(status);
+@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
+ printk("check_ptrace : child exited with exitcode %d, while "
+ "expecting %d; status 0x%x", exit_with,
+ exitcode, status);
+- if (mustexit)
++ if (mustpanic)
+ panic("\n");
+ else
+ printk("\n");
+ ret = -1;
+ }
+
++ if(munmap(stack, PAGE_SIZE) < 0)
++ panic("check_ptrace : munmap failed, errno = %d", errno);
+ return ret;
+ }
+
+@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
+
+ static void __init check_sysemu(void)
+ {
++ void *stack;
+ int pid, syscall, n, status, count=0;
+
+ printk("Checking syscall emulation patch for ptrace...");
+ sysemu_supported = 0;
+- pid = start_ptraced_child();
++ pid = start_ptraced_child(&stack);
+
+ if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
+ goto fail;
+@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
+ panic("check_sysemu : failed to modify system "
+ "call return, errno = %d", errno);
+
+- if (stop_ptraced_child(pid, 0, 0) < 0)
++ if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+ goto fail_stopped;
+
+ sysemu_supported = 1;
+@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
+ set_using_sysemu(!force_sysemu_disabled);
+
+ printk("Checking advanced syscall emulation patch for ptrace...");
+- pid = start_ptraced_child();
++ pid = start_ptraced_child(&stack);
+ while(1){
+ count++;
+ if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
+@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
+ break;
+ }
+ }
+- if (stop_ptraced_child(pid, 0, 0) < 0)
++ if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+ goto fail_stopped;
+
+ sysemu_supported = 2;
+@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
+ return;
+
+ fail:
+- stop_ptraced_child(pid, 1, 0);
++ stop_ptraced_child(pid, stack, 1, 0);
+ fail_stopped:
+ printk("missing\n");
+ }
+
+ void __init check_ptrace(void)
+ {
++ void *stack;
+ int pid, syscall, n, status;
+
+ printk("Checking that ptrace can change system call numbers...");
+- pid = start_ptraced_child();
++ pid = start_ptraced_child(&stack);
+
+ if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD)
< 0)
+ panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d",
errno);
+@@ -330,7 +339,7 @@ void __init check_ptrace(void)
+ break;
+ }
+ }
+- stop_ptraced_child(pid, 0, 1);
++ stop_ptraced_child(pid, stack, 0, 1);
+ printk("OK\n");
+ check_sysemu();
+ }
+@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
+ static inline int check_skas3_ptrace_support(void)
+ {
+ struct ptrace_faultinfo fi;
++ void *stack;
+ int pid, n, ret = 1;
+
+ printf("Checking for the skas3 patch in the host...");
+- pid = start_ptraced_child();
++ pid = start_ptraced_child(&stack);
+
+ n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
+ if (n < 0) {
+@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
+ }
+
+ init_registers(pid);
+- stop_ptraced_child(pid, 1, 1);
++ stop_ptraced_child(pid, stack, 1, 1);
+
+ return(ret);
+ }
+diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
+--- a/arch/x86_64/ia32/syscall32.c
++++ b/arch/x86_64/ia32/syscall32.c
+@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
+ int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
++ int ret;
+
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma)
+@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
+ vma->vm_mm = mm;
+
+ down_write(&mm->mmap_sem);
+- insert_vm_struct(mm, vma);
++ if ((ret = insert_vm_struct(mm, vma))) {
++ up_write(&mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return ret;
++ }
+ mm->total_vm += npages;
+ up_write(&mm->mmap_sem);
+ return 0;
+diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
+--- a/arch/x86_64/kernel/setup.c
++++ b/arch/x86_64/kernel/setup.c
+@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
+ int cpu = smp_processor_id();
+ int node = 0;
+ unsigned bits;
+- if (c->x86_num_cores == 1)
+- return;
+
+ bits = 0;
+ while ((1 << bits) < c->x86_num_cores)
+diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
+--- a/arch/x86_64/kernel/smp.c
++++ b/arch/x86_64/kernel/smp.c
+@@ -284,6 +284,71 @@ struct call_data_struct {
+ static struct call_data_struct * call_data;
+
+ /*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ */
++static void __smp_call_function_single (int cpu, void (*func) (void *info),
void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = 1;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * Run a function on another CPU
++ * <func> The function to run. This must be fast and non-blocking.
++ * <info> An arbitrary pointer to pass to the function.
++ * <nonatomic> Currently unused.
++ * <wait> If true, wait until function has completed on other CPUs.
++ * [RETURNS] 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++
++ int me = get_cpu(); /* prevent preemption and reschedule on another
processor */
++
++ if (cpu == me) {
++ printk("%s: trying to call self\n", __func__);
++ put_cpu();
++ return -EBUSY;
++ }
++ spin_lock_bh(&call_lock);
++
++ __smp_call_function_single(cpu, func,info,nonatomic,wait);
++
++ spin_unlock_bh(&call_lock);
++ put_cpu();
++ return 0;
++}
++
++/*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+ */
+diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
+--- a/arch/x86_64/kernel/smpboot.c
++++ b/arch/x86_64/kernel/smpboot.c
+@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
+ {
+ unsigned long flags, i;
+
+- if (smp_processor_id() != boot_cpu_id)
+- return;
+-
+ go[MASTER] = 0;
+
+ local_irq_save(flags);
+@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
+ return tcenter - best_tm;
+ }
+
+-static __cpuinit void sync_tsc(void)
++static __cpuinit void sync_tsc(unsigned int master)
+ {
+ int i, done = 0;
+ long delta, adj, adjust_latency = 0;
+@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
+ } t[NUM_ROUNDS] __cpuinitdata;
+ #endif
+
++ printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
++ smp_processor_id(), master);
++
+ go[MASTER] = 1;
+
+- smp_call_function(sync_master, NULL, 1, 0);
++ /* It is dangerous to broadcast IPI as cpus are coming up,
++ * as they may not be ready to accept them. So since
++ * we only need to send the ipi to the boot cpu direct
++ * the message, and avoid the race.
++ */
++ smp_call_function_single(master, sync_master, NULL, 1, 0);
+
+ while (go[MASTER]) /* wait for master to be ready */
+ no_cpu_relax();
+@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
+ printk(KERN_INFO
+ "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
+ "maxerr %lu cycles)\n",
+- smp_processor_id(), boot_cpu_id, delta, rt);
++ smp_processor_id(), master, delta, rt);
+ }
+
+ static void __cpuinit tsc_sync_wait(void)
+ {
+ if (notscsync || !cpu_has_tsc)
+ return;
+- printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
+- boot_cpu_id);
+- sync_tsc();
++ sync_tsc(0);
+ }
+
+ static __init int notscsync_setup(char *s)
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
+ printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
+ pci_name(dev), ('A' + pin));
+ /* Interrupt Line values above 0xF are forbidden */
+- if (dev->irq >= 0 && (dev->irq <= 0xF)) {
++ if (dev->irq > 0 && (dev->irq <= 0xF)) {
+ printk(" - using IRQ %d\n", dev->irq);
++ acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
+ return_VALUE(0);
+ }
+ else {
+diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
+--- a/drivers/char/rocket.c
++++ b/drivers/char/rocket.c
+@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port
+ ToRecv = space;
+
+ if (ToRecv <= 0)
+- return;
++ goto done;
+
+ /*
+ * if status indicates there are errored characters in the
+@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port
+ }
+ /* Push the data up to the tty layer */
+ ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
++done:
+ tty_ldisc_deref(ld);
+ }
+
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -32,12 +32,6 @@
+
+ #define TPM_BUFSIZE 2048
+
+-/* PCI configuration addresses */
+-#define PCI_GEN_PMCON_1 0xA0
+-#define PCI_GEN1_DEC 0xE4
+-#define PCI_LPC_EN 0xE6
+-#define PCI_GEN2_DEC 0xEC
+-
+ static LIST_HEAD(tpm_chip_list);
+ static DEFINE_SPINLOCK(driver_lock);
+ static int dev_mask[32];
+@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
+ EXPORT_SYMBOL_GPL(tpm_time_expired);
+
+ /*
+- * Initialize the LPC bus and enable the TPM ports
+- */
+-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
+-{
+- u32 lpcenable, tmp;
+- int is_lpcm = 0;
+-
+- switch (pci_dev->vendor) {
+- case PCI_VENDOR_ID_INTEL:
+- switch (pci_dev->device) {
+- case PCI_DEVICE_ID_INTEL_82801CA_12:
+- case PCI_DEVICE_ID_INTEL_82801DB_12:
+- is_lpcm = 1;
+- break;
+- }
+- /* init ICH (enable LPC) */
+- pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
+- lpcenable |= 0x20000000;
+- pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
+-
+- if (is_lpcm) {
+- pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
+- &lpcenable);
+- if ((lpcenable & 0x20000000) == 0) {
+- dev_err(&pci_dev->dev,
+- "cannot enable LPC\n");
+- return -ENODEV;
+- }
+- }
+-
+- /* initialize TPM registers */
+- pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
+-
+- if (!is_lpcm)
+- tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
+- else
+- tmp =
+- (tmp & 0xFFFF0000) | (base & 0xFFF0) |
+- 0x00000001;
+-
+- pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
+-
+- if (is_lpcm) {
+- pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
+- &tmp);
+- tmp |= 0x00000004; /* enable CLKRUN */
+- pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
+- tmp);
+- }
+- tpm_write_index(0x0D, 0x55); /* unlock 4F */
+- tpm_write_index(0x0A, 0x00); /* int disable */
+- tpm_write_index(0x08, base); /* base addr lo */
+- tpm_write_index(0x09, (base & 0xFF00) >> 8); /* base addr hi
*/
+- tpm_write_index(0x0D, 0xAA); /* lock 4F */
+- break;
+- case PCI_VENDOR_ID_AMD:
+- /* nothing yet */
+- break;
+- }
+-
+- return 0;
+-}
+-
+-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
+-
+-/*
+ * Internal kernel interface to transmit TPM commands
+ */
+ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
+@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
+ if (chip == NULL)
+ return -ENODEV;
+
+- spin_lock(&driver_lock);
+- tpm_lpc_bus_init(pci_dev, chip->vendor->base);
+- spin_unlock(&driver_lock);
+-
+ return 0;
+ }
+
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
+ }
+
+ extern void tpm_time_expired(unsigned long);
+-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
+-
+ extern int tpm_register_hardware(struct pci_dev *,
+ struct tpm_vendor_specific *);
+ extern int tpm_open(struct inode *, struct file *);
+diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
+--- a/drivers/char/tpm/tpm_atmel.c
++++ b/drivers/char/tpm/tpm_atmel.c
+@@ -22,7 +22,10 @@
+ #include "tpm.h"
+
+ /* Atmel definitions */
+-#define TPM_ATML_BASE 0x400
++enum tpm_atmel_addr {
++ TPM_ATMEL_BASE_ADDR_LO = 0x08,
++ TPM_ATMEL_BASE_ADDR_HI = 0x09
++};
+
+ /* write status bits */
+ #define ATML_STATUS_ABORT 0x01
+@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
+ .cancel = tpm_atml_cancel,
+ .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+ .req_complete_val = ATML_STATUS_DATA_AVAIL,
+- .base = TPM_ATML_BASE,
+ .miscdev = { .fops = &atmel_ops, },
+ };
+
+@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
+ {
+ u8 version[4];
+ int rc = 0;
++ int lo, hi;
+
+ if (pci_enable_device(pci_dev))
+ return -EIO;
+
+- if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
+- rc = -ENODEV;
+- goto out_err;
+- }
++ lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
++ hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
++
++ tpm_atmel.base = (hi<<8)|lo;
++ dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
+
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
+diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
+--- a/drivers/char/tpm/tpm_nsc.c
++++ b/drivers/char/tpm/tpm_nsc.c
+@@ -24,6 +24,10 @@
+ /* National definitions */
+ #define TPM_NSC_BASE 0x360
+ #define TPM_NSC_IRQ 0x07
++#define TPM_NSC_BASE0_HI 0x60
++#define TPM_NSC_BASE0_LO 0x61
++#define TPM_NSC_BASE1_HI 0x62
++#define TPM_NSC_BASE1_LO 0x63
+
+ #define NSC_LDN_INDEX 0x07
+ #define NSC_SID_INDEX 0x20
+@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
+ .cancel = tpm_nsc_cancel,
+ .req_complete_mask = NSC_STATUS_OBF,
+ .req_complete_val = NSC_STATUS_OBF,
+- .base = TPM_NSC_BASE,
+ .miscdev = { .fops = &nsc_ops, },
+
+ };
+@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
+ const struct pci_device_id *pci_id)
+ {
+ int rc = 0;
++ int lo, hi;
++
++ hi = tpm_read_index(TPM_NSC_BASE0_HI);
++ lo = tpm_read_index(TPM_NSC_BASE0_LO);
++
++ tpm_nsc.base = (hi<<8) | lo;
+
+ if (pci_enable_device(pci_dev))
+ return -EIO;
+
+- if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
+- rc = -ENODEV;
+- goto out_err;
+- }
+-
+ /* verify that it is a National part (SID) */
+ if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
+ rc = -ENODEV;
+diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
+--- a/drivers/char/tty_ioctl.c
++++ b/drivers/char/tty_ioctl.c
+@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
+ ld = tty_ldisc_ref(tty);
+ switch (arg) {
+ case TCIFLUSH:
+- if (ld->flush_buffer)
++ if (ld && ld->flush_buffer)
+ ld->flush_buffer(tty);
+ break;
+ case TCIOFLUSH:
+- if (ld->flush_buffer)
++ if (ld && ld->flush_buffer)
+ ld->flush_buffer(tty);
+ /* fall through */
+ case TCOFLUSH:
+diff --git a/drivers/media/video/cx88/cx88-video.c
b/drivers/media/video/cx88/cx88-video.c
+--- a/drivers/media/video/cx88/cx88-video.c
++++ b/drivers/media/video/cx88/cx88-video.c
+@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] =
+ .default_value = 0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },
+- .off = 0,
++ .off = 128,
+ .reg = MO_HUE,
+ .mask = 0x00ff,
+ .shift = 0,
+diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
+--- a/drivers/net/e1000/e1000_main.c
++++ b/drivers/net/e1000/e1000_main.c
+@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
+ tso = e1000_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
++ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
+--- a/drivers/net/hamradio/Kconfig
++++ b/drivers/net/hamradio/Kconfig
+@@ -17,7 +17,7 @@ config MKISS
+
+ config 6PACK
+ tristate "Serial port 6PACK driver"
+- depends on AX25 && BROKEN_ON_SMP
++ depends on AX25
+ ---help---
+ 6pack is a transmission protocol for the data exchange between your
+ PC and your TNC (the Terminal Node Controller acts as a kind of
+diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
+--- a/drivers/net/shaper.c
++++ b/drivers/net/shaper.c
+@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
+ {
+ struct shaper *shaper = dev->priv;
+ struct sk_buff *ptr;
+-
+- if (down_trylock(&shaper->sem))
+- return -1;
+
++ spin_lock(&shaper->lock);
+ ptr=shaper->sendq.prev;
+
+ /*
+@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
+ shaper->stats.collisions++;
+ }
+ shaper_kick(shaper);
+- up(&shaper->sem);
++ spin_unlock(&shaper->lock);
+ return 0;
+ }
+
+@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
+ {
+ struct shaper *shaper = (struct shaper *)data;
+
+- if (!down_trylock(&shaper->sem)) {
+- shaper_kick(shaper);
+- up(&shaper->sem);
+- } else
+- mod_timer(&shaper->timer, jiffies);
++ spin_lock(&shaper->lock);
++ shaper_kick(shaper);
++ spin_unlock(&shaper->lock);
+ }
+
+ /*
+@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
+
+
+ /*
+- * Flush the shaper queues on a closedown
+- */
+-
+-static void shaper_flush(struct shaper *shaper)
+-{
+- struct sk_buff *skb;
+-
+- down(&shaper->sem);
+- while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+- dev_kfree_skb(skb);
+- shaper_kick(shaper);
+- up(&shaper->sem);
+-}
+-
+-/*
+ * Bring the interface up. We just disallow this until a
+ * bind.
+ */
+@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
+ static int shaper_close(struct net_device *dev)
+ {
+ struct shaper *shaper=dev->priv;
+- shaper_flush(shaper);
++ struct sk_buff *skb;
++
++ while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
++ dev_kfree_skb(skb);
++
++ spin_lock_bh(&shaper->lock);
++ shaper_kick(shaper);
++ spin_unlock_bh(&shaper->lock);
++
+ del_timer_sync(&shaper->timer);
+ return 0;
+ }
+@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
+ init_timer(&sh->timer);
+ sh->timer.function=shaper_timer;
+ sh->timer.data=(unsigned long)sh;
++ spin_lock_init(&sh->lock);
+ }
+
+ /*
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
+ /* FIXME, once all of the existing PCI drivers have been fixed to set
+ * the pci shutdown function, this test can go away. */
+ if (!drv->driver.shutdown)
+- drv->driver.shutdown = pci_device_shutdown,
++ drv->driver.shutdown = pci_device_shutdown;
+ drv->driver.owner = drv->owner;
+ drv->driver.kobj.ktype = &pci_driver_kobj_type;
+ pci_init_dynids(&drv->dynids);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t
+ rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+- if (!rport)
++ if (!rport) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate fc remote port!\n");
++ return;
++ }
+
+ if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
+ fcport->os_target_id = rport->scsi_target_id;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1150,7 +1150,7 @@ iospace_error_exit:
+ */
+ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
+ {
+- int ret;
++ int ret = -ENODEV;
+ device_reg_t __iomem *reg;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *ha;
+@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
+ fc_port_t *fcport;
+
+ if (pci_enable_device(pdev))
+- return -1;
++ goto probe_out;
+
+ host = scsi_host_alloc(&qla2x00_driver_template,
+ sizeof(scsi_qla_host_t));
+@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
+
+ /* Configure PCI I/O space */
+ ret = qla2x00_iospace_config(ha);
+- if (ret != 0) {
+- goto probe_alloc_failed;
+- }
++ if (ret)
++ goto probe_failed;
+
+ /* Sanitize the information from PCI BIOS. */
+ host->irq = pdev->irq;
+@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Failed to allocate memory for adapter\n");
+
+- goto probe_alloc_failed;
++ ret = -ENOMEM;
++ goto probe_failed;
+ }
+
+- pci_set_drvdata(pdev, ha);
+- host->this_id = 255;
+- host->cmd_per_lun = 3;
+- host->unique_id = ha->instance;
+- host->max_cmd_len = MAX_CMDSZ;
+- host->max_channel = ha->ports - 1;
+- host->max_id = ha->max_targets;
+- host->max_lun = ha->max_luns;
+- host->transportt = qla2xxx_transport_template;
+- if (scsi_add_host(host, &pdev->dev))
+- goto probe_alloc_failed;
+-
+- qla2x00_alloc_sysfs_attr(ha);
+-
+ if (qla2x00_initialize_adapter(ha) &&
+ !(ha->device_flags & DFLG_NO_CABLE)) {
+
+@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+ "Adapter flags %x.\n",
+ ha->host_no, ha->device_flags));
+
++ ret = -ENODEV;
+ goto probe_failed;
+ }
+
+- qla2x00_init_host_attr(ha);
+-
+ /*
+ * Startup the kernel thread for this host adapter
+ */
+@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
+ qla_printk(KERN_WARNING, ha,
+ "Unable to start DPC thread!\n");
+
++ ret = -ENODEV;
+ goto probe_failed;
+ }
+ wait_for_completion(&ha->dpc_inited);
+
++ host->this_id = 255;
++ host->cmd_per_lun = 3;
++ host->unique_id = ha->instance;
++ host->max_cmd_len = MAX_CMDSZ;
++ host->max_channel = ha->ports - 1;
++ host->max_lun = MAX_LUNS;
++ host->transportt = qla2xxx_transport_template;
++
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ ret = request_irq(host->irq, qla2100_intr_handler,
+ SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+ else
+ ret = request_irq(host->irq, qla2300_intr_handler,
+ SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+- if (ret != 0) {
++ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "Failed to reserve interrupt %d already in use.\n",
+ host->irq);
+@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
+ msleep(10);
+ }
+
++ pci_set_drvdata(pdev, ha);
+ ha->flags.init_done = 1;
+ num_hosts++;
+
++ ret = scsi_add_host(host, &pdev->dev);
++ if (ret)
++ goto probe_failed;
++
++ qla2x00_alloc_sysfs_attr(ha);
++
++ qla2x00_init_host_attr(ha);
++
+ qla_printk(KERN_INFO, ha, "\n"
+ " QLogic Fibre Channel HBA Driver: %s\n"
+ " QLogic %s - %s\n"
+@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
+ probe_failed:
+ fc_remove_host(ha->host);
+
+- scsi_remove_host(host);
+-
+-probe_alloc_failed:
+ qla2x00_free_device(ha);
+
+ scsi_host_put(host);
+@@ -1394,7 +1394,8 @@ probe_alloc_failed:
+ probe_disable_device:
+ pci_disable_device(pdev);
+
+- return -1;
++probe_out:
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(qla2x00_probe_one);
+
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
+ {
+ struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+
++ s->private = it;
+ if (! it)
+ return NULL;
++
+ if (NULL == sg_dev_arr)
+- goto err1;
++ return NULL;
+ it->index = *pos;
+ it->max = sg_last_dev();
+ if (it->index >= it->max)
+- goto err1;
++ return NULL;
+ return it;
+-err1:
+- kfree(it);
+- return NULL;
+ }
+
+ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+ {
+- struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
++ struct sg_proc_deviter * it = s->private;
+
+ *pos = ++it->index;
+ return (it->index < it->max) ? it : NULL;
+@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
+
+ static void dev_seq_stop(struct seq_file *s, void *v)
+ {
+- kfree (v);
++ struct sg_proc_deviter * it = s->private;
++
++ kfree (it);
+ }
+
+ static int sg_proc_open_dev(struct inode *inode, struct file *file)
+diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c
+--- a/drivers/usb/net/usbnet.c
++++ b/drivers/usb/net/usbnet.c
+@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
+
+ // copy the packet data to the new skb
+ memcpy(skb_put(gl_skb, size), packet->packet_data,
size);
+- skb_return (dev, skb);
++ skb_return (dev, gl_skb);
+ }
+
+ // advance to the next packet
+diff --git a/fs/bio.c b/fs/bio.c
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
+ */
+ bio->bi_vcnt = bio_src->bi_vcnt;
+ bio->bi_size = bio_src->bi_size;
++ bio->bi_idx = bio_src->bi_idx;
+ bio_phys_segments(q, bio);
+ bio_hw_segments(q, bio);
+ }
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
+ struct char_device_struct *cd = NULL, **cp;
+ int i = major_to_index(major);
+
+- up(&chrdevs_lock);
++ down(&chrdevs_lock);
+ for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+ if ((*cp)->major == major &&
+ (*cp)->baseminor == baseminor &&
+diff --git a/fs/exec.c b/fs/exec.c
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
+ }
+ sig->group_exit_task = NULL;
+ sig->notify_count = 0;
++ sig->real_timer.data = (unsigned long)current;
+ spin_unlock_irq(lock);
+
+ /*
+diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
+--- a/fs/isofs/compress.c
++++ b/fs/isofs/compress.c
+@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
+ cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
+ brelse(bh);
+
++ if (cstart > cend)
++ goto eio;
++
+ csize = cend-cstart;
+
++ if (csize > deflateBound(1UL << zisofs_block_shift))
++ goto eio;
++
+ /* Now page[] contains an array of pages, any of which can be NULL,
+ and the locks on which we hold. We should now read the data and
+ release the pages. If the pages are NULL the decompressed data
+diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
+--- a/include/asm-i386/string.h
++++ b/include/asm-i386/string.h
+@@ -116,7 +116,8 @@ __asm__ __volatile__(
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+- :"1" (cs),"2" (ct));
++ :"1" (cs),"2" (ct)
++ :"memory");
+ return __res;
+ }
+
+@@ -138,8 +139,9 @@ __asm__ __volatile__(
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+- :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+- :"1" (cs),"2" (ct),"3" (count));
++ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
++ :"1" (cs),"2" (ct),"3" (count)
++ :"memory");
+ return __res;
+ }
+
+@@ -158,7 +160,9 @@ __asm__ __volatile__(
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+- :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
++ :"=a" (__res), "=&S" (d0)
++ :"1" (s),"0" (c)
++ :"memory");
+ return __res;
+ }
+
+@@ -175,7 +179,9 @@ __asm__ __volatile__(
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+- :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
++ :"=g" (__res), "=&S" (d0), "=&a" (d1)
++ :"0" (0),"1" (s),"2" (c)
++ :"memory");
+ return __res;
+ }
+
+@@ -189,7 +195,9 @@ __asm__ __volatile__(
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+- :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
++ :"=c" (__res), "=&D" (d0)
++ :"1" (s),"a" (0), "0" (0xffffffffu)
++ :"memory");
+ return __res;
+ }
+
+@@ -333,7 +341,9 @@ __asm__ __volatile__(
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+- :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
++ :"=D" (__res), "=&c" (d0)
++ :"a" (c),"0" (cs),"1" (count)
++ :"memory");
+ return __res;
+ }
+
+@@ -369,7 +379,7 @@ __asm__ __volatile__(
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+- : "=&c" (d0), "=&D" (d1)
++ :"=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+ return (s);
+@@ -392,7 +402,8 @@ __asm__ __volatile__(
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+- :"c" (s),"1" (count));
++ :"c" (s),"1" (count)
++ :"memory");
+ return __res;
+ }
+ /* end of additional stuff */
+@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
+ "dec %%edi\n"
+ "1:"
+ : "=D" (addr), "=c" (size)
+- : "0" (addr), "1" (size), "a" (c));
++ : "0" (addr), "1" (size), "a" (c)
++ : "memory");
+ return addr;
+ }
+
+diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
+--- a/include/asm-x86_64/smp.h
++++ b/include/asm-x86_64/smp.h
+@@ -46,6 +46,8 @@ extern int pic_mode;
+ extern int smp_num_siblings;
+ extern void smp_flush_tlb(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern int smp_call_function_single (int cpuid, void (*func) (void *info),
void *info,
++ int retry, int wait);
+ extern void smp_send_reschedule(int cpu);
+ extern void smp_invalidate_rcv(void); /* Process an NMI */
+ extern void zap_low_mappings(void);
+diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
+--- a/include/linux/if_shaper.h
++++ b/include/linux/if_shaper.h
+@@ -23,7 +23,7 @@ struct shaper
+ __u32 shapeclock;
+ unsigned long recovery; /* Time we can next clock a packet out on
+ an empty queue */
+- struct semaphore sem;
++ spinlock_t lock;
+ struct net_device_stats stats;
+ struct net_device *dev;
+ int (*hard_start_xmit) (struct sk_buff *skb,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
+ {
+ int hlen = skb_headlen(skb);
+
+- if (offset + len <= hlen)
++ if (hlen - offset >= len)
+ return skb->data + offset;
+
+ if (skb_copy_bits(skb, offset, buffer, len) < 0)
+diff --git a/include/linux/zlib.h b/include/linux/zlib.h
+--- a/include/linux/zlib.h
++++ b/include/linux/zlib.h
+@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp
+ stream state was inconsistent (such as zalloc or state being NULL).
+ */
+
++static inline unsigned long deflateBound(unsigned long s)
++{
++ return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
++}
++
+ extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
+ /*
+ Dynamically update the compression level and compression strategy. The
+diff --git a/kernel/module.c b/kernel/module.c
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+
+-static void *percpu_modalloc(unsigned long size, unsigned long align)
++static void *percpu_modalloc(unsigned long size, unsigned long align,
++ const char *name)
+ {
+ unsigned long extra;
+ unsigned int i;
+ void *ptr;
+
+- BUG_ON(align > SMP_CACHE_BYTES);
++ if (align > SMP_CACHE_BYTES) {
++ printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
++ name, align, SMP_CACHE_BYTES);
++ align = SMP_CACHE_BYTES;
++ }
+
+ ptr = __per_cpu_start;
+ for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+@@ -347,7 +352,8 @@ static int percpu_modinit(void)
+ }
+ __initcall(percpu_modinit);
+ #else /* ... !CONFIG_SMP */
+-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
++ const char *name)
+ {
+ return NULL;
+ }
+@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
+ if (pcpuindex) {
+ /* We have a special allocation for this section. */
+ percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
+- sechdrs[pcpuindex].sh_addralign);
++ sechdrs[pcpuindex].sh_addralign,
++ mod->name);
+ if (!percpu) {
+ err = -ENOMEM;
+ goto free_mod;
+diff --git a/kernel/signal.c b/kernel/signal.c
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig,
+ {
+ struct task_struct *t;
+
+- if (p->flags & SIGNAL_GROUP_EXIT)
++ if (p->signal->flags & SIGNAL_GROUP_EXIT)
+ /*
+ * The process is in the middle of dying already.
+ */
+diff --git a/lib/inflate.c b/lib/inflate.c
+--- a/lib/inflate.c
++++ b/lib/inflate.c
+@@ -326,7 +326,7 @@ DEBG("huft1 ");
+ {
+ *t = (struct huft *)NULL;
+ *m = 0;
+- return 0;
++ return 2;
+ }
+
+ DEBG("huft2 ");
+@@ -374,6 +374,7 @@ DEBG("huft5 ");
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
++ n = x[g]; /* set n to length of v */
+
+ DEBG("h6 ");
+
+@@ -410,12 +411,13 @@ DEBG1("1 ");
+ DEBG1("2 ");
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+- while (++j < z) /* try smaller tables up to z bits */
+- {
+- if ((f <<= 1) <= *++xp)
+- break; /* enough codes to use up j bits */
+- f -= *xp; /* else deduct codes from patterns */
+- }
++ if (j < z)
++ while (++j < z) /* try smaller tables up to z bits */
++ {
++ if ((f <<= 1) <= *++xp)
++ break; /* enough codes to use up j bits */
++ f -= *xp; /* else deduct codes from patterns */
++ }
+ }
+ DEBG1("3 ");
+ z = 1 << j; /* table entries for j-bit table */
+diff --git a/mm/memory.c b/mm/memory.c
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
+ {
+ pgd_t *pgd;
+ unsigned long next;
+- unsigned long end = addr + size;
++ unsigned long end = addr + PAGE_ALIGN(size);
+ struct mm_struct *mm = vma->vm_mm;
+ int err;
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
+ struct mempolicy *new;
+ DECLARE_BITMAP(nodes, MAX_NUMNODES);
+
+- if (mode > MPOL_MAX)
++ if (mode < 0 || mode > MPOL_MAX)
+ return -EINVAL;
+ err = get_nodes(nodes, nmask, maxnode, mode);
+ if (err)
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
+ if (!vlandev)
+ continue;
+
++ if (netif_carrier_ok(dev)) {
++ if (!netif_carrier_ok(vlandev))
++ netif_carrier_on(vlandev);
++ } else {
++ if (netif_carrier_ok(vlandev))
++ netif_carrier_off(vlandev);
++ }
++
+ if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
+ vlandev->state = (vlandev->state &~
VLAN_LINK_STATE_MASK)
+ | flgs;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
+ {
+ struct sk_buff *skb;
+
+- ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+- icmp_param->data_len+icmp_param->head_len,
+- icmp_param->head_len,
+- ipc, rt, MSG_DONTWAIT);
+-
+- if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
++ if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
++ icmp_param->data_len+icmp_param->head_len,
++ icmp_param->head_len,
++ ipc, rt, MSG_DONTWAIT) < 0)
++ ip_flush_pending_frames(icmp_socket->sk);
++ else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+ struct icmphdr *icmph = skb->h.icmph;
+ unsigned int csum = 0;
+ struct sk_buff *skb1;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
+ #ifdef CONFIG_NETFILTER_DEBUG
+ nf_debug_ip_loopback_xmit(newskb);
+ #endif
+- nf_reset(newskb);
+ netif_rx(newskb);
+ return 0;
+ }
+@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
+ nf_debug_ip_finish_output2(skb);
+ #endif /*CONFIG_NETFILTER_DEBUG*/
+
+- nf_reset(skb);
+-
+ if (hh) {
+ int hh_alen;
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -848,6 +848,9 @@ mc_msf_out:
+
+ case IP_IPSEC_POLICY:
+ case IP_XFRM_POLICY:
++ err = -EPERM;
++ if (!capable(CAP_NET_ADMIN))
++ break;
+ err = xfrm_user_policy(sk, optname, optval, optlen);
+ break;
+
+diff --git a/net/ipv4/netfilter/ip_conntrack_core.c
b/net/ipv4/netfilter/ip_conntrack_core.c
+--- a/net/ipv4/netfilter/ip_conntrack_core.c
++++ b/net/ipv4/netfilter/ip_conntrack_core.c
+@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
+ schedule();
+ goto i_see_dead_people;
+ }
++ /* wait until all references to ip_conntrack_untracked are dropped */
++ while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
++ schedule();
+
+ kmem_cache_destroy(ip_conntrack_cachep);
+ kmem_cache_destroy(ip_conntrack_expect_cachep);
+diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c
b/net/ipv4/netfilter/ip_conntrack_standalone.c
+--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
++++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
+@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+ {
++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
++ /* Previously seen (loopback)? Ignore. Do this before
++ fragment check. */
++ if ((*pskb)->nfct)
++ return NF_ACCEPT;
++#endif
++
+ /* Gather fragments. */
+ if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+ *pskb = ip_ct_gather_frags(*pskb,
+diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
+@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
+ enum ip_nat_manip_type maniptype,
+ const struct ip_conntrack *conntrack)
+ {
+- static u_int16_t port, *portptr;
++ static u_int16_t port;
++ u_int16_t *portptr;
+ unsigned int range_size, min, i;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c
b/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
++++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
+@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
+ enum ip_nat_manip_type maniptype,
+ const struct ip_conntrack *conntrack)
+ {
+- static u_int16_t port, *portptr;
++ static u_int16_t port;
++ u_int16_t *portptr;
+ unsigned int range_size, min, i;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -198,12 +198,13 @@ resubmit:
+ if (!raw_sk) {
+ if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+- icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR,
nhoff);
++ icmpv6_send(skb, ICMPV6_PARAMPROB,
++ ICMPV6_UNK_NEXTHDR, nhoff,
++ skb->dev);
+ }
+- } else {
++ } else
+ IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+- kfree_skb(skb);
+- }
++ kfree_skb(skb);
+ }
+ rcu_read_unlock();
+ return 0;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -503,6 +503,9 @@ done:
+ break;
+ case IPV6_IPSEC_POLICY:
+ case IPV6_XFRM_POLICY:
++ retv = -EPERM;
++ if (!capable(CAP_NET_ADMIN))
++ break;
+ retv = xfrm_user_policy(sk, optname, optval, optlen);
+ break;
+
+diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
+--- a/net/ipv6/netfilter/ip6_queue.c
++++ b/net/ipv6/netfilter/ip6_queue.c
+@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
+ static void
+ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
+ {
++ local_bh_disable();
+ nf_reinject(entry->skb, entry->info, verdict);
++ local_bh_enable();
+ kfree(entry);
+ }
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -315,8 +315,8 @@ err:
+ static void netlink_remove(struct sock *sk)
+ {
+ netlink_table_grab();
+- nl_table[sk->sk_protocol].hash.entries--;
+- sk_del_node_init(sk);
++ if (sk_del_node_init(sk))
++ nl_table[sk->sk_protocol].hash.entries--;
+ if (nlk_sk(sk)->groups)
+ __sk_del_bind_node(sk);
+ netlink_table_ungrab();
+@@ -429,7 +429,12 @@ retry:
+ err = netlink_insert(sk, pid);
+ if (err == -EADDRINUSE)
+ goto retry;
+- return 0;
++
++ /* If 2 threads race to autobind, that is fine. */
++ if (err == -EBUSY)
++ err = 0;
++
++ return err;
+ }
+
+ static inline int netlink_capable(struct socket *sock, unsigned int flag)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
++ /* drop conntrack reference */
++ nf_reset(skb);
++
+ spkt = (struct sockaddr_pkt*)skb->cb;
+
+ skb_push(skb, skb->data-skb->mac.raw);
+@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
++ /* drop conntrack reference */
++ nf_reset(skb);
++
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_packets++;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
+ if (nr > XFRM_MAX_DEPTH)
+ return NULL;
+
++ if (p->dir > XFRM_POLICY_OUT)
++ return NULL;
++
+ xp = xfrm_policy_alloc(GFP_KERNEL);
+ if (xp == NULL) {
+ *dir = -ENOBUFS;
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
+
+ if (keyring->description) {
+ write_lock(&keyring_name_lock);
+- list_del(&keyring->type_data.link);
++
++ if (keyring->type_data.link.next != NULL &&
++ !list_empty(&keyring->type_data.link))
++ list_del(&keyring->type_data.link);
++
+ write_unlock(&keyring_name_lock);
+ }
+
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
+ keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+- goto error;
++ goto error2;
+ }
+ }
+ else if (IS_ERR(keyring)) {
diff -r 349b302f29e2 -r 899f7b4b19fc patches/linux-2.6.12/2.6.12.5.patch
--- a/patches/linux-2.6.12/2.6.12.5.patch Fri Oct 21 09:24:35 2005
+++ /dev/null Fri Oct 21 09:46:30 2005
@@ -1,1614 +0,0 @@
-diff --git a/Makefile b/Makefile
---- a/Makefile
-+++ b/Makefile
-@@ -1,7 +1,7 @@
- VERSION = 2
- PATCHLEVEL = 6
- SUBLEVEL = 12
--EXTRAVERSION =
-+EXTRAVERSION = .5
- NAME=Woozy Numbat
-
- # *DOCUMENTATION*
-@@ -1149,7 +1149,7 @@ endif # KBUILD_EXTMOD
- #(which is the most common case IMHO) to avoid unneeded clutter in the big
tags file.
- #Adding $(srctree) adds about 20M on i386 to the size of the output file!
-
--ifeq ($(KBUILD_OUTPUT),)
-+ifeq ($(src),$(obj))
- __srctree =
- else
- __srctree = $(srctree)/
-diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
---- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
-@@ -44,7 +44,7 @@
-
- #define PFX "powernow-k8: "
- #define BFX PFX "BIOS error: "
--#define VERSION "version 1.40.2"
-+#define VERSION "version 1.40.4"
- #include "powernow-k8.h"
-
- /* serialize freq changes */
-@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
- {
- struct powernow_k8_data *data;
- cpumask_t oldmask = CPU_MASK_ALL;
-- int rc;
-+ int rc, i;
-
- if (!check_supported_cpu(pol->cpu))
- return -ENODEV;
-@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
- printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
- data->currfid, data->currvid);
-
-- powernow_data[pol->cpu] = data;
-+ for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
-+ powernow_data[i] = data;
-+ }
-
- return 0;
-
-diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
---- a/arch/i386/kernel/process.c
-+++ b/arch/i386/kernel/process.c
-@@ -827,6 +827,8 @@ asmlinkage int sys_get_thread_area(struc
- if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- return -EINVAL;
-
-+ memset(&info, 0, sizeof(info));
-+
- desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-
- info.entry_number = idx;
-diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
---- a/arch/ia64/kernel/ptrace.c
-+++ b/arch/ia64/kernel/ptrace.c
-@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
- *data = (pt->cr_ipsr & IPSR_MASK);
- return 0;
-
-+ case PT_AR_RSC:
-+ if (write_access)
-+ pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-+ else
-+ *data = pt->ar_rsc;
-+ return 0;
-+
- case PT_AR_RNAT:
- urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
- rnat_addr = (long) ia64_rse_rnat_addr((long *)
-@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
- case PT_AR_BSPSTORE:
- ptr = pt_reg_addr(pt, ar_bspstore);
- break;
-- case PT_AR_RSC:
-- ptr = pt_reg_addr(pt, ar_rsc);
-- break;
- case PT_AR_UNAT:
- ptr = pt_reg_addr(pt, ar_unat);
- break;
-@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
- static long
- ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user
*ppr)
- {
-- unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-+ unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
- struct unw_frame_info info;
- struct switch_stack *sw;
- struct ia64_fpreg fpval;
-@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
- /* app regs */
-
- retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
-- retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
-+ retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
- retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
- retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
- retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
- retval |= __get_user(nat_bits, &ppr->nat);
-
- retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
-+ retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
- retval |= access_uarea(child, PT_AR_EC, &ec, 1);
- retval |= access_uarea(child, PT_AR_LC, &lc, 1);
- retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
-diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
---- a/arch/ia64/kernel/signal.c
-+++ b/arch/ia64/kernel/signal.c
-@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
- static long
- restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
- {
-- unsigned long ip, flags, nat, um, cfm;
-+ unsigned long ip, flags, nat, um, cfm, rsc;
- long err;
-
- /* Always make any pending restarted system calls return -EINTR */
-@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
- err |= __get_user(ip, &sc->sc_ip); /* instruction
pointer */
- err |= __get_user(cfm, &sc->sc_cfm);
- err |= __get_user(um, &sc->sc_um); /* user mask */
-- err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
-+ err |= __get_user(rsc, &sc->sc_ar_rsc);
- err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
- err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
- err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
-@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
- err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15
*/
-
- scr->pt.cr_ifs = cfm | (1UL << 63);
-+ scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
-
- /* establish new instruction pointer: */
- scr->pt.cr_iip = ip & ~0x3UL;
-diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
---- a/arch/ppc/kernel/time.c
-+++ b/arch/ppc/kernel/time.c
-@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
-
- extern unsigned long wall_jiffies;
-
-+/* used for timezone offset */
-+static long timezone_offset;
-+
- DEFINE_SPINLOCK(rtc_lock);
-
- EXPORT_SYMBOL(rtc_lock);
-@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
- xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) <
500000/HZ &&
- jiffies - wall_jiffies == 1) {
-- if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset)
== 0)
-+ if (ppc_md.set_rtc_time(xtime.tv_sec+1 +
timezone_offset) == 0)
- last_rtc_update = xtime.tv_sec+1;
- else
- /* Try again one minute later */
-@@ -286,7 +289,7 @@ void __init time_init(void)
- unsigned old_stamp, stamp, elapsed;
-
- if (ppc_md.time_init != NULL)
-- time_offset = ppc_md.time_init();
-+ timezone_offset = ppc_md.time_init();
-
- if (__USE_RTC()) {
- /* 601 processor: dec counts down by 128 every 128ns */
-@@ -331,10 +334,10 @@ void __init time_init(void)
- set_dec(tb_ticks_per_jiffy);
-
- /* If platform provided a timezone (pmac), we correct the time */
-- if (time_offset) {
-- sys_tz.tz_minuteswest = -time_offset / 60;
-+ if (timezone_offset) {
-+ sys_tz.tz_minuteswest = -timezone_offset / 60;
- sys_tz.tz_dsttime = 0;
-- xtime.tv_sec -= time_offset;
-+ xtime.tv_sec -= timezone_offset;
- }
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
---- a/arch/ppc64/boot/zlib.c
-+++ b/arch/ppc64/boot/zlib.c
-@@ -1307,7 +1307,7 @@ local int huft_build(
- {
- *t = (inflate_huft *)Z_NULL;
- *m = 0;
-- return Z_OK;
-+ return Z_DATA_ERROR;
- }
-
-
-@@ -1351,6 +1351,7 @@ local int huft_build(
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
-+ n = x[g]; /* set n to length of v */
-
-
- /* Generate the Huffman codes and for each, make the table entries */
-diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
---- a/arch/um/kernel/process.c
-+++ b/arch/um/kernel/process.c
-@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
- return(arg.pid);
- }
-
--static int ptrace_child(void)
-+static int ptrace_child(void *arg)
- {
- int ret;
- int pid = os_getpid(), ppid = getppid();
-@@ -159,16 +159,20 @@ static int ptrace_child(void)
- _exit(ret);
- }
-
--static int start_ptraced_child(void)
-+static int start_ptraced_child(void **stack_out)
- {
-+ void *stack;
-+ unsigned long sp;
- int pid, n, status;
-
-- pid = fork();
-- if(pid == 0)
-- ptrace_child();
--
-+ stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
-+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-+ if(stack == MAP_FAILED)
-+ panic("check_ptrace : mmap failed, errno = %d", errno);
-+ sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
-+ pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
- if(pid < 0)
-- panic("check_ptrace : fork failed, errno = %d", errno);
-+ panic("check_ptrace : clone failed, errno = %d", errno);
- CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
- if(n < 0)
- panic("check_ptrace : wait failed, errno = %d", errno);
-@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
- panic("check_ptrace : expected SIGSTOP, got status = %d",
- status);
-
-+ *stack_out = stack;
- return(pid);
- }
-
-@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
- * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
- * So only for SYSEMU features we test mustpanic, while normal host features
- * must work anyway!*/
--static int stop_ptraced_child(int pid, int exitcode, int mustexit)
-+static int stop_ptraced_child(int pid, void *stack, int exitcode, int
mustpanic)
- {
- int status, n, ret = 0;
-
- if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
-- panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
-+ panic("check_ptrace : ptrace failed, errno = %d", errno);
- CATCH_EINTR(n = waitpid(pid, &status, 0));
- if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
- int exit_with = WEXITSTATUS(status);
-@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
- printk("check_ptrace : child exited with exitcode %d, while "
- "expecting %d; status 0x%x", exit_with,
- exitcode, status);
-- if (mustexit)
-+ if (mustpanic)
- panic("\n");
- else
- printk("\n");
- ret = -1;
- }
-
-+ if(munmap(stack, PAGE_SIZE) < 0)
-+ panic("check_ptrace : munmap failed, errno = %d", errno);
- return ret;
- }
-
-@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
-
- static void __init check_sysemu(void)
- {
-+ void *stack;
- int pid, syscall, n, status, count=0;
-
- printk("Checking syscall emulation patch for ptrace...");
- sysemu_supported = 0;
-- pid = start_ptraced_child();
-+ pid = start_ptraced_child(&stack);
-
- if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
- goto fail;
-@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
- panic("check_sysemu : failed to modify system "
- "call return, errno = %d", errno);
-
-- if (stop_ptraced_child(pid, 0, 0) < 0)
-+ if (stop_ptraced_child(pid, stack, 0, 0) < 0)
- goto fail_stopped;
-
- sysemu_supported = 1;
-@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
- set_using_sysemu(!force_sysemu_disabled);
-
- printk("Checking advanced syscall emulation patch for ptrace...");
-- pid = start_ptraced_child();
-+ pid = start_ptraced_child(&stack);
- while(1){
- count++;
- if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
-@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
- break;
- }
- }
-- if (stop_ptraced_child(pid, 0, 0) < 0)
-+ if (stop_ptraced_child(pid, stack, 0, 0) < 0)
- goto fail_stopped;
-
- sysemu_supported = 2;
-@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
- return;
-
- fail:
-- stop_ptraced_child(pid, 1, 0);
-+ stop_ptraced_child(pid, stack, 1, 0);
- fail_stopped:
- printk("missing\n");
- }
-
- void __init check_ptrace(void)
- {
-+ void *stack;
- int pid, syscall, n, status;
-
- printk("Checking that ptrace can change system call numbers...");
-- pid = start_ptraced_child();
-+ pid = start_ptraced_child(&stack);
-
- if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD)
< 0)
- panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d",
errno);
-@@ -330,7 +339,7 @@ void __init check_ptrace(void)
- break;
- }
- }
-- stop_ptraced_child(pid, 0, 1);
-+ stop_ptraced_child(pid, stack, 0, 1);
- printk("OK\n");
- check_sysemu();
- }
-@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
- static inline int check_skas3_ptrace_support(void)
- {
- struct ptrace_faultinfo fi;
-+ void *stack;
- int pid, n, ret = 1;
-
- printf("Checking for the skas3 patch in the host...");
-- pid = start_ptraced_child();
-+ pid = start_ptraced_child(&stack);
-
- n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
- if (n < 0) {
-@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
- }
-
- init_registers(pid);
-- stop_ptraced_child(pid, 1, 1);
-+ stop_ptraced_child(pid, stack, 1, 1);
-
- return(ret);
- }
-diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
---- a/arch/x86_64/ia32/syscall32.c
-+++ b/arch/x86_64/ia32/syscall32.c
-@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
- int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
-+ int ret;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!vma)
-@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
- vma->vm_mm = mm;
-
- down_write(&mm->mmap_sem);
-- insert_vm_struct(mm, vma);
-+ if ((ret = insert_vm_struct(mm, vma))) {
-+ up_write(&mm->mmap_sem);
-+ kmem_cache_free(vm_area_cachep, vma);
-+ return ret;
-+ }
- mm->total_vm += npages;
- up_write(&mm->mmap_sem);
- return 0;
-diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
---- a/arch/x86_64/kernel/setup.c
-+++ b/arch/x86_64/kernel/setup.c
-@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
- int cpu = smp_processor_id();
- int node = 0;
- unsigned bits;
-- if (c->x86_num_cores == 1)
-- return;
-
- bits = 0;
- while ((1 << bits) < c->x86_num_cores)
-diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
---- a/arch/x86_64/kernel/smp.c
-+++ b/arch/x86_64/kernel/smp.c
-@@ -284,6 +284,71 @@ struct call_data_struct {
- static struct call_data_struct * call_data;
-
- /*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ */
-+static void __smp_call_function_single (int cpu, void (*func) (void *info),
void *info,
-+ int nonatomic, int wait)
-+{
-+ struct call_data_struct data;
-+ int cpus = 1;
-+
-+ data.func = func;
-+ data.info = info;
-+ atomic_set(&data.started, 0);
-+ data.wait = wait;
-+ if (wait)
-+ atomic_set(&data.finished, 0);
-+
-+ call_data = &data;
-+ wmb();
-+ /* Send a message to all other CPUs and wait for them to respond */
-+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+ /* Wait for response */
-+ while (atomic_read(&data.started) != cpus)
-+ cpu_relax();
-+
-+ if (!wait)
-+ return;
-+
-+ while (atomic_read(&data.finished) != cpus)
-+ cpu_relax();
-+}
-+
-+/*
-+ * Run a function on another CPU
-+ * <func> The function to run. This must be fast and non-blocking.
-+ * <info> An arbitrary pointer to pass to the function.
-+ * <nonatomic> Currently unused.
-+ * <wait> If true, wait until function has completed on other CPUs.
-+ * [RETURNS] 0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+ int nonatomic, int wait)
-+{
-+
-+ int me = get_cpu(); /* prevent preemption and reschedule on another
processor */
-+
-+ if (cpu == me) {
-+ printk("%s: trying to call self\n", __func__);
-+ put_cpu();
-+ return -EBUSY;
-+ }
-+ spin_lock_bh(&call_lock);
-+
-+ __smp_call_function_single(cpu, func,info,nonatomic,wait);
-+
-+ spin_unlock_bh(&call_lock);
-+ put_cpu();
-+ return 0;
-+}
-+
-+/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
---- a/arch/x86_64/kernel/smpboot.c
-+++ b/arch/x86_64/kernel/smpboot.c
-@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
- {
- unsigned long flags, i;
-
-- if (smp_processor_id() != boot_cpu_id)
-- return;
--
- go[MASTER] = 0;
-
- local_irq_save(flags);
-@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
- return tcenter - best_tm;
- }
-
--static __cpuinit void sync_tsc(void)
-+static __cpuinit void sync_tsc(unsigned int master)
- {
- int i, done = 0;
- long delta, adj, adjust_latency = 0;
-@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
- } t[NUM_ROUNDS] __cpuinitdata;
- #endif
-
-+ printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
-+ smp_processor_id(), master);
-+
- go[MASTER] = 1;
-
-- smp_call_function(sync_master, NULL, 1, 0);
-+ /* It is dangerous to broadcast IPI as cpus are coming up,
-+ * as they may not be ready to accept them. So since
-+ * we only need to send the ipi to the boot cpu direct
-+ * the message, and avoid the race.
-+ */
-+ smp_call_function_single(master, sync_master, NULL, 1, 0);
-
- while (go[MASTER]) /* wait for master to be ready */
- no_cpu_relax();
-@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
- printk(KERN_INFO
- "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
- "maxerr %lu cycles)\n",
-- smp_processor_id(), boot_cpu_id, delta, rt);
-+ smp_processor_id(), master, delta, rt);
- }
-
- static void __cpuinit tsc_sync_wait(void)
- {
- if (notscsync || !cpu_has_tsc)
- return;
-- printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
-- boot_cpu_id);
-- sync_tsc();
-+ sync_tsc(0);
- }
-
- static __init int notscsync_setup(char *s)
-diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
---- a/drivers/acpi/pci_irq.c
-+++ b/drivers/acpi/pci_irq.c
-@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
- printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
- pci_name(dev), ('A' + pin));
- /* Interrupt Line values above 0xF are forbidden */
-- if (dev->irq >= 0 && (dev->irq <= 0xF)) {
-+ if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
-+ acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
- return_VALUE(0);
- }
- else {
-diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
---- a/drivers/char/rocket.c
-+++ b/drivers/char/rocket.c
-@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port
- ToRecv = space;
-
- if (ToRecv <= 0)
-- return;
-+ goto done;
-
- /*
- * if status indicates there are errored characters in the
-@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port
- }
- /* Push the data up to the tty layer */
- ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
-+done:
- tty_ldisc_deref(ld);
- }
-
-diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
---- a/drivers/char/tpm/tpm.c
-+++ b/drivers/char/tpm/tpm.c
-@@ -32,12 +32,6 @@
-
- #define TPM_BUFSIZE 2048
-
--/* PCI configuration addresses */
--#define PCI_GEN_PMCON_1 0xA0
--#define PCI_GEN1_DEC 0xE4
--#define PCI_LPC_EN 0xE6
--#define PCI_GEN2_DEC 0xEC
--
- static LIST_HEAD(tpm_chip_list);
- static DEFINE_SPINLOCK(driver_lock);
- static int dev_mask[32];
-@@ -61,72 +55,6 @@ void tpm_time_expired(unsigned long ptr)
- EXPORT_SYMBOL_GPL(tpm_time_expired);
-
- /*
-- * Initialize the LPC bus and enable the TPM ports
-- */
--int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
--{
-- u32 lpcenable, tmp;
-- int is_lpcm = 0;
--
-- switch (pci_dev->vendor) {
-- case PCI_VENDOR_ID_INTEL:
-- switch (pci_dev->device) {
-- case PCI_DEVICE_ID_INTEL_82801CA_12:
-- case PCI_DEVICE_ID_INTEL_82801DB_12:
-- is_lpcm = 1;
-- break;
-- }
-- /* init ICH (enable LPC) */
-- pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
-- lpcenable |= 0x20000000;
-- pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
--
-- if (is_lpcm) {
-- pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
-- &lpcenable);
-- if ((lpcenable & 0x20000000) == 0) {
-- dev_err(&pci_dev->dev,
-- "cannot enable LPC\n");
-- return -ENODEV;
-- }
-- }
--
-- /* initialize TPM registers */
-- pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
--
-- if (!is_lpcm)
-- tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
-- else
-- tmp =
-- (tmp & 0xFFFF0000) | (base & 0xFFF0) |
-- 0x00000001;
--
-- pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
--
-- if (is_lpcm) {
-- pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
-- &tmp);
-- tmp |= 0x00000004; /* enable CLKRUN */
-- pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
-- tmp);
-- }
-- tpm_write_index(0x0D, 0x55); /* unlock 4F */
-- tpm_write_index(0x0A, 0x00); /* int disable */
-- tpm_write_index(0x08, base); /* base addr lo */
-- tpm_write_index(0x09, (base & 0xFF00) >> 8); /* base addr hi
*/
-- tpm_write_index(0x0D, 0xAA); /* lock 4F */
-- break;
-- case PCI_VENDOR_ID_AMD:
-- /* nothing yet */
-- break;
-- }
--
-- return 0;
--}
--
--EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
--
--/*
- * Internal kernel interface to transmit TPM commands
- */
- static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
-@@ -590,10 +518,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
- if (chip == NULL)
- return -ENODEV;
-
-- spin_lock(&driver_lock);
-- tpm_lpc_bus_init(pci_dev, chip->vendor->base);
-- spin_unlock(&driver_lock);
--
- return 0;
- }
-
-diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
---- a/drivers/char/tpm/tpm.h
-+++ b/drivers/char/tpm/tpm.h
-@@ -79,8 +79,6 @@ static inline void tpm_write_index(int i
- }
-
- extern void tpm_time_expired(unsigned long);
--extern int tpm_lpc_bus_init(struct pci_dev *, u16);
--
- extern int tpm_register_hardware(struct pci_dev *,
- struct tpm_vendor_specific *);
- extern int tpm_open(struct inode *, struct file *);
-diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
---- a/drivers/char/tpm/tpm_atmel.c
-+++ b/drivers/char/tpm/tpm_atmel.c
-@@ -22,7 +22,10 @@
- #include "tpm.h"
-
- /* Atmel definitions */
--#define TPM_ATML_BASE 0x400
-+enum tpm_atmel_addr {
-+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
-+ TPM_ATMEL_BASE_ADDR_HI = 0x09
-+};
-
- /* write status bits */
- #define ATML_STATUS_ABORT 0x01
-@@ -127,7 +130,6 @@ static struct tpm_vendor_specific tpm_at
- .cancel = tpm_atml_cancel,
- .req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
- .req_complete_val = ATML_STATUS_DATA_AVAIL,
-- .base = TPM_ATML_BASE,
- .miscdev = { .fops = &atmel_ops, },
- };
-
-@@ -136,14 +138,16 @@ static int __devinit tpm_atml_init(struc
- {
- u8 version[4];
- int rc = 0;
-+ int lo, hi;
-
- if (pci_enable_device(pci_dev))
- return -EIO;
-
-- if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
-- rc = -ENODEV;
-- goto out_err;
-- }
-+ lo = tpm_read_index( TPM_ATMEL_BASE_ADDR_LO );
-+ hi = tpm_read_index( TPM_ATMEL_BASE_ADDR_HI );
-+
-+ tpm_atmel.base = (hi<<8)|lo;
-+ dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
-
- /* verify that it is an Atmel part */
- if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
-diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
---- a/drivers/char/tpm/tpm_nsc.c
-+++ b/drivers/char/tpm/tpm_nsc.c
-@@ -24,6 +24,10 @@
- /* National definitions */
- #define TPM_NSC_BASE 0x360
- #define TPM_NSC_IRQ 0x07
-+#define TPM_NSC_BASE0_HI 0x60
-+#define TPM_NSC_BASE0_LO 0x61
-+#define TPM_NSC_BASE1_HI 0x62
-+#define TPM_NSC_BASE1_LO 0x63
-
- #define NSC_LDN_INDEX 0x07
- #define NSC_SID_INDEX 0x20
-@@ -234,7 +238,6 @@ static struct tpm_vendor_specific tpm_ns
- .cancel = tpm_nsc_cancel,
- .req_complete_mask = NSC_STATUS_OBF,
- .req_complete_val = NSC_STATUS_OBF,
-- .base = TPM_NSC_BASE,
- .miscdev = { .fops = &nsc_ops, },
-
- };
-@@ -243,15 +246,16 @@ static int __devinit tpm_nsc_init(struct
- const struct pci_device_id *pci_id)
- {
- int rc = 0;
-+ int lo, hi;
-+
-+ hi = tpm_read_index(TPM_NSC_BASE0_HI);
-+ lo = tpm_read_index(TPM_NSC_BASE0_LO);
-+
-+ tpm_nsc.base = (hi<<8) | lo;
-
- if (pci_enable_device(pci_dev))
- return -EIO;
-
-- if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
-- rc = -ENODEV;
-- goto out_err;
-- }
--
- /* verify that it is a National part (SID) */
- if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
- rc = -ENODEV;
-diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
---- a/drivers/char/tty_ioctl.c
-+++ b/drivers/char/tty_ioctl.c
-@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
- ld = tty_ldisc_ref(tty);
- switch (arg) {
- case TCIFLUSH:
-- if (ld->flush_buffer)
-+ if (ld && ld->flush_buffer)
- ld->flush_buffer(tty);
- break;
- case TCIOFLUSH:
-- if (ld->flush_buffer)
-+ if (ld && ld->flush_buffer)
- ld->flush_buffer(tty);
- /* fall through */
- case TCOFLUSH:
-diff --git a/drivers/media/video/cx88/cx88-video.c
b/drivers/media/video/cx88/cx88-video.c
---- a/drivers/media/video/cx88/cx88-video.c
-+++ b/drivers/media/video/cx88/cx88-video.c
-@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] =
- .default_value = 0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
-- .off = 0,
-+ .off = 128,
- .reg = MO_HUE,
- .mask = 0x00ff,
- .shift = 0,
-diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
---- a/drivers/net/e1000/e1000_main.c
-+++ b/drivers/net/e1000/e1000_main.c
-@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
- tso = e1000_tso(adapter, skb);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
-+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
- return NETDEV_TX_OK;
- }
-
-diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
---- a/drivers/net/hamradio/Kconfig
-+++ b/drivers/net/hamradio/Kconfig
-@@ -17,7 +17,7 @@ config MKISS
-
- config 6PACK
- tristate "Serial port 6PACK driver"
-- depends on AX25 && BROKEN_ON_SMP
-+ depends on AX25
- ---help---
- 6pack is a transmission protocol for the data exchange between your
- PC and your TNC (the Terminal Node Controller acts as a kind of
-diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
---- a/drivers/net/shaper.c
-+++ b/drivers/net/shaper.c
-@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
- {
- struct shaper *shaper = dev->priv;
- struct sk_buff *ptr;
--
-- if (down_trylock(&shaper->sem))
-- return -1;
-
-+ spin_lock(&shaper->lock);
- ptr=shaper->sendq.prev;
-
- /*
-@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
- shaper->stats.collisions++;
- }
- shaper_kick(shaper);
-- up(&shaper->sem);
-+ spin_unlock(&shaper->lock);
- return 0;
- }
-
-@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
- {
- struct shaper *shaper = (struct shaper *)data;
-
-- if (!down_trylock(&shaper->sem)) {
-- shaper_kick(shaper);
-- up(&shaper->sem);
-- } else
-- mod_timer(&shaper->timer, jiffies);
-+ spin_lock(&shaper->lock);
-+ shaper_kick(shaper);
-+ spin_unlock(&shaper->lock);
- }
-
- /*
-@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
-
-
- /*
-- * Flush the shaper queues on a closedown
-- */
--
--static void shaper_flush(struct shaper *shaper)
--{
-- struct sk_buff *skb;
--
-- down(&shaper->sem);
-- while((skb=skb_dequeue(&shaper->sendq))!=NULL)
-- dev_kfree_skb(skb);
-- shaper_kick(shaper);
-- up(&shaper->sem);
--}
--
--/*
- * Bring the interface up. We just disallow this until a
- * bind.
- */
-@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
- static int shaper_close(struct net_device *dev)
- {
- struct shaper *shaper=dev->priv;
-- shaper_flush(shaper);
-+ struct sk_buff *skb;
-+
-+ while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
-+ dev_kfree_skb(skb);
-+
-+ spin_lock_bh(&shaper->lock);
-+ shaper_kick(shaper);
-+ spin_unlock_bh(&shaper->lock);
-+
- del_timer_sync(&shaper->timer);
- return 0;
- }
-@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
- init_timer(&sh->timer);
- sh->timer.function=shaper_timer;
- sh->timer.data=(unsigned long)sh;
-+ spin_lock_init(&sh->lock);
- }
-
- /*
-diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
---- a/drivers/pci/pci-driver.c
-+++ b/drivers/pci/pci-driver.c
-@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
- /* FIXME, once all of the existing PCI drivers have been fixed to set
- * the pci shutdown function, this test can go away. */
- if (!drv->driver.shutdown)
-- drv->driver.shutdown = pci_device_shutdown,
-+ drv->driver.shutdown = pci_device_shutdown;
- drv->driver.owner = drv->owner;
- drv->driver.kobj.ktype = &pci_driver_kobj_type;
- pci_init_dynids(&drv->dynids);
-diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
---- a/drivers/scsi/qla2xxx/qla_init.c
-+++ b/drivers/scsi/qla2xxx/qla_init.c
-@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t
- rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
-
- fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
-- if (!rport)
-+ if (!rport) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate fc remote port!\n");
-+ return;
-+ }
-
- if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
- fcport->os_target_id = rport->scsi_target_id;
-diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
---- a/drivers/scsi/qla2xxx/qla_os.c
-+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1150,7 +1150,7 @@ iospace_error_exit:
- */
- int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
- {
-- int ret;
-+ int ret = -ENODEV;
- device_reg_t __iomem *reg;
- struct Scsi_Host *host;
- scsi_qla_host_t *ha;
-@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
- fc_port_t *fcport;
-
- if (pci_enable_device(pdev))
-- return -1;
-+ goto probe_out;
-
- host = scsi_host_alloc(&qla2x00_driver_template,
- sizeof(scsi_qla_host_t));
-@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
-
- /* Configure PCI I/O space */
- ret = qla2x00_iospace_config(ha);
-- if (ret != 0) {
-- goto probe_alloc_failed;
-- }
-+ if (ret)
-+ goto probe_failed;
-
- /* Sanitize the information from PCI BIOS. */
- host->irq = pdev->irq;
-@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for adapter\n");
-
-- goto probe_alloc_failed;
-+ ret = -ENOMEM;
-+ goto probe_failed;
- }
-
-- pci_set_drvdata(pdev, ha);
-- host->this_id = 255;
-- host->cmd_per_lun = 3;
-- host->unique_id = ha->instance;
-- host->max_cmd_len = MAX_CMDSZ;
-- host->max_channel = ha->ports - 1;
-- host->max_id = ha->max_targets;
-- host->max_lun = ha->max_luns;
-- host->transportt = qla2xxx_transport_template;
-- if (scsi_add_host(host, &pdev->dev))
-- goto probe_alloc_failed;
--
-- qla2x00_alloc_sysfs_attr(ha);
--
- if (qla2x00_initialize_adapter(ha) &&
- !(ha->device_flags & DFLG_NO_CABLE)) {
-
-@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
- "Adapter flags %x.\n",
- ha->host_no, ha->device_flags));
-
-+ ret = -ENODEV;
- goto probe_failed;
- }
-
-- qla2x00_init_host_attr(ha);
--
- /*
- * Startup the kernel thread for this host adapter
- */
-@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
- qla_printk(KERN_WARNING, ha,
- "Unable to start DPC thread!\n");
-
-+ ret = -ENODEV;
- goto probe_failed;
- }
- wait_for_completion(&ha->dpc_inited);
-
-+ host->this_id = 255;
-+ host->cmd_per_lun = 3;
-+ host->unique_id = ha->instance;
-+ host->max_cmd_len = MAX_CMDSZ;
-+ host->max_channel = ha->ports - 1;
-+ host->max_lun = MAX_LUNS;
-+ host->transportt = qla2xxx_transport_template;
-+
- if (IS_QLA2100(ha) || IS_QLA2200(ha))
- ret = request_irq(host->irq, qla2100_intr_handler,
- SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
- else
- ret = request_irq(host->irq, qla2300_intr_handler,
- SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
-- if (ret != 0) {
-+ if (ret) {
- qla_printk(KERN_WARNING, ha,
- "Failed to reserve interrupt %d already in use.\n",
- host->irq);
-@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
- msleep(10);
- }
-
-+ pci_set_drvdata(pdev, ha);
- ha->flags.init_done = 1;
- num_hosts++;
-
-+ ret = scsi_add_host(host, &pdev->dev);
-+ if (ret)
-+ goto probe_failed;
-+
-+ qla2x00_alloc_sysfs_attr(ha);
-+
-+ qla2x00_init_host_attr(ha);
-+
- qla_printk(KERN_INFO, ha, "\n"
- " QLogic Fibre Channel HBA Driver: %s\n"
- " QLogic %s - %s\n"
-@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
- probe_failed:
- fc_remove_host(ha->host);
-
-- scsi_remove_host(host);
--
--probe_alloc_failed:
- qla2x00_free_device(ha);
-
- scsi_host_put(host);
-@@ -1394,7 +1394,8 @@ probe_alloc_failed:
- probe_disable_device:
- pci_disable_device(pdev);
-
-- return -1;
-+probe_out:
-+ return ret;
- }
- EXPORT_SYMBOL_GPL(qla2x00_probe_one);
-
-diff --git a/fs/bio.c b/fs/bio.c
---- a/fs/bio.c
-+++ b/fs/bio.c
-@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
- */
- bio->bi_vcnt = bio_src->bi_vcnt;
- bio->bi_size = bio_src->bi_size;
-+ bio->bi_idx = bio_src->bi_idx;
- bio_phys_segments(q, bio);
- bio_hw_segments(q, bio);
- }
-diff --git a/fs/char_dev.c b/fs/char_dev.c
---- a/fs/char_dev.c
-+++ b/fs/char_dev.c
-@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
- struct char_device_struct *cd = NULL, **cp;
- int i = major_to_index(major);
-
-- up(&chrdevs_lock);
-+ down(&chrdevs_lock);
- for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
- if ((*cp)->major == major &&
- (*cp)->baseminor == baseminor &&
-diff --git a/fs/exec.c b/fs/exec.c
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
- }
- sig->group_exit_task = NULL;
- sig->notify_count = 0;
-+ sig->real_timer.data = (unsigned long)current;
- spin_unlock_irq(lock);
-
- /*
-diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
---- a/fs/isofs/compress.c
-+++ b/fs/isofs/compress.c
-@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
- cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
- brelse(bh);
-
-+ if (cstart > cend)
-+ goto eio;
-+
- csize = cend-cstart;
-
-+ if (csize > deflateBound(1UL << zisofs_block_shift))
-+ goto eio;
-+
- /* Now page[] contains an array of pages, any of which can be NULL,
- and the locks on which we hold. We should now read the data and
- release the pages. If the pages are NULL the decompressed data
-diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
---- a/include/asm-i386/string.h
-+++ b/include/asm-i386/string.h
-@@ -116,7 +116,8 @@ __asm__ __volatile__(
- "orb $1,%%al\n"
- "3:"
- :"=a" (__res), "=&S" (d0), "=&D" (d1)
-- :"1" (cs),"2" (ct));
-+ :"1" (cs),"2" (ct)
-+ :"memory");
- return __res;
- }
-
-@@ -138,8 +139,9 @@ __asm__ __volatile__(
- "3:\tsbbl %%eax,%%eax\n\t"
- "orb $1,%%al\n"
- "4:"
-- :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-- :"1" (cs),"2" (ct),"3" (count));
-+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-+ :"1" (cs),"2" (ct),"3" (count)
-+ :"memory");
- return __res;
- }
-
-@@ -158,7 +160,9 @@ __asm__ __volatile__(
- "movl $1,%1\n"
- "2:\tmovl %1,%0\n\t"
- "decl %0"
-- :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
-+ :"=a" (__res), "=&S" (d0)
-+ :"1" (s),"0" (c)
-+ :"memory");
- return __res;
- }
-
-@@ -175,7 +179,9 @@ __asm__ __volatile__(
- "leal -1(%%esi),%0\n"
- "2:\ttestb %%al,%%al\n\t"
- "jne 1b"
-- :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
-+ :"=g" (__res), "=&S" (d0), "=&a" (d1)
-+ :"0" (0),"1" (s),"2" (c)
-+ :"memory");
- return __res;
- }
-
-@@ -189,7 +195,9 @@ __asm__ __volatile__(
- "scasb\n\t"
- "notl %0\n\t"
- "decl %0"
-- :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
-+ :"=c" (__res), "=&D" (d0)
-+ :"1" (s),"a" (0), "0" (0xffffffffu)
-+ :"memory");
- return __res;
- }
-
-@@ -333,7 +341,9 @@ __asm__ __volatile__(
- "je 1f\n\t"
- "movl $1,%0\n"
- "1:\tdecl %0"
-- :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
-+ :"=D" (__res), "=&c" (d0)
-+ :"a" (c),"0" (cs),"1" (count)
-+ :"memory");
- return __res;
- }
-
-@@ -369,7 +379,7 @@ __asm__ __volatile__(
- "je 2f\n\t"
- "stosb\n"
- "2:"
-- : "=&c" (d0), "=&D" (d1)
-+ :"=&c" (d0), "=&D" (d1)
- :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
- :"memory");
- return (s);
-@@ -392,7 +402,8 @@ __asm__ __volatile__(
- "jne 1b\n"
- "3:\tsubl %2,%0"
- :"=a" (__res), "=&d" (d0)
-- :"c" (s),"1" (count));
-+ :"c" (s),"1" (count)
-+ :"memory");
- return __res;
- }
- /* end of additional stuff */
-@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
- "dec %%edi\n"
- "1:"
- : "=D" (addr), "=c" (size)
-- : "0" (addr), "1" (size), "a" (c));
-+ : "0" (addr), "1" (size), "a" (c)
-+ : "memory");
- return addr;
- }
-
-diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
---- a/include/asm-x86_64/smp.h
-+++ b/include/asm-x86_64/smp.h
-@@ -46,6 +46,8 @@ extern int pic_mode;
- extern int smp_num_siblings;
- extern void smp_flush_tlb(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern int smp_call_function_single (int cpuid, void (*func) (void *info),
void *info,
-+ int retry, int wait);
- extern void smp_send_reschedule(int cpu);
- extern void smp_invalidate_rcv(void); /* Process an NMI */
- extern void zap_low_mappings(void);
-diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h
---- a/include/linux/if_shaper.h
-+++ b/include/linux/if_shaper.h
-@@ -23,7 +23,7 @@ struct shaper
- __u32 shapeclock;
- unsigned long recovery; /* Time we can next clock a packet out on
- an empty queue */
-- struct semaphore sem;
-+ spinlock_t lock;
- struct net_device_stats stats;
- struct net_device *dev;
- int (*hard_start_xmit) (struct sk_buff *skb,
-diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -1192,7 +1192,7 @@ static inline void *skb_header_pointer(c
- {
- int hlen = skb_headlen(skb);
-
-- if (offset + len <= hlen)
-+ if (hlen - offset >= len)
- return skb->data + offset;
-
- if (skb_copy_bits(skb, offset, buffer, len) < 0)
-diff --git a/include/linux/zlib.h b/include/linux/zlib.h
---- a/include/linux/zlib.h
-+++ b/include/linux/zlib.h
-@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp
- stream state was inconsistent (such as zalloc or state being NULL).
- */
-
-+static inline unsigned long deflateBound(unsigned long s)
-+{
-+ return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
-+}
-+
- extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
- /*
- Dynamically update the compression level and compression strategy. The
-diff --git a/kernel/module.c b/kernel/module.c
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
- /* Created by linker magic */
- extern char __per_cpu_start[], __per_cpu_end[];
-
--static void *percpu_modalloc(unsigned long size, unsigned long align)
-+static void *percpu_modalloc(unsigned long size, unsigned long align,
-+ const char *name)
- {
- unsigned long extra;
- unsigned int i;
- void *ptr;
-
-- BUG_ON(align > SMP_CACHE_BYTES);
-+ if (align > SMP_CACHE_BYTES) {
-+ printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
-+ name, align, SMP_CACHE_BYTES);
-+ align = SMP_CACHE_BYTES;
-+ }
-
- ptr = __per_cpu_start;
- for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-@@ -347,7 +352,8 @@ static int percpu_modinit(void)
- }
- __initcall(percpu_modinit);
- #else /* ... !CONFIG_SMP */
--static inline void *percpu_modalloc(unsigned long size, unsigned long align)
-+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
-+ const char *name)
- {
- return NULL;
- }
-@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
- if (pcpuindex) {
- /* We have a special allocation for this section. */
- percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
-- sechdrs[pcpuindex].sh_addralign);
-+ sechdrs[pcpuindex].sh_addralign,
-+ mod->name);
- if (!percpu) {
- err = -ENOMEM;
- goto free_mod;
-diff --git a/lib/inflate.c b/lib/inflate.c
---- a/lib/inflate.c
-+++ b/lib/inflate.c
-@@ -326,7 +326,7 @@ DEBG("huft1 ");
- {
- *t = (struct huft *)NULL;
- *m = 0;
-- return 0;
-+ return 2;
- }
-
- DEBG("huft2 ");
-@@ -374,6 +374,7 @@ DEBG("huft5 ");
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
-+ n = x[g]; /* set n to length of v */
-
- DEBG("h6 ");
-
-@@ -410,12 +411,13 @@ DEBG1("1 ");
- DEBG1("2 ");
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
-- while (++j < z) /* try smaller tables up to z bits */
-- {
-- if ((f <<= 1) <= *++xp)
-- break; /* enough codes to use up j bits */
-- f -= *xp; /* else deduct codes from patterns */
-- }
-+ if (j < z)
-+ while (++j < z) /* try smaller tables up to z bits */
-+ {
-+ if ((f <<= 1) <= *++xp)
-+ break; /* enough codes to use up j bits */
-+ f -= *xp; /* else deduct codes from patterns */
-+ }
- }
- DEBG1("3 ");
- z = 1 << j; /* table entries for j-bit table */
-diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
---- a/lib/zlib_inflate/inftrees.c
-+++ b/lib/zlib_inflate/inftrees.c
-@@ -141,7 +141,7 @@ static int huft_build(
- {
- *t = NULL;
- *m = 0;
-- return Z_OK;
-+ return Z_DATA_ERROR;
- }
-
-
-diff --git a/mm/memory.c b/mm/memory.c
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1164,7 +1164,7 @@ int remap_pfn_range(struct vm_area_struc
- {
- pgd_t *pgd;
- unsigned long next;
-- unsigned long end = addr + size;
-+ unsigned long end = addr + PAGE_ALIGN(size);
- struct mm_struct *mm = vma->vm_mm;
- int err;
-
-diff --git a/mm/mempolicy.c b/mm/mempolicy.c
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
- struct mempolicy *new;
- DECLARE_BITMAP(nodes, MAX_NUMNODES);
-
-- if (mode > MPOL_MAX)
-+ if (mode < 0 || mode > MPOL_MAX)
- return -EINVAL;
- err = get_nodes(nodes, nmask, maxnode, mode);
- if (err)
-diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
---- a/net/8021q/vlan.c
-+++ b/net/8021q/vlan.c
-@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
- if (!vlandev)
- continue;
-
-+ if (netif_carrier_ok(dev)) {
-+ if (!netif_carrier_ok(vlandev))
-+ netif_carrier_on(vlandev);
-+ } else {
-+ if (netif_carrier_ok(vlandev))
-+ netif_carrier_off(vlandev);
-+ }
-+
- if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
- vlandev->state = (vlandev->state &~
VLAN_LINK_STATE_MASK)
- | flgs;
-diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
---- a/net/ipv4/ip_output.c
-+++ b/net/ipv4/ip_output.c
-@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
- #ifdef CONFIG_NETFILTER_DEBUG
- nf_debug_ip_loopback_xmit(newskb);
- #endif
-- nf_reset(newskb);
- netif_rx(newskb);
- return 0;
- }
-@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
- nf_debug_ip_finish_output2(skb);
- #endif /*CONFIG_NETFILTER_DEBUG*/
-
-- nf_reset(skb);
--
- if (hh) {
- int hh_alen;
-
-diff --git a/net/ipv4/netfilter/ip_conntrack_core.c
b/net/ipv4/netfilter/ip_conntrack_core.c
---- a/net/ipv4/netfilter/ip_conntrack_core.c
-+++ b/net/ipv4/netfilter/ip_conntrack_core.c
-@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
- schedule();
- goto i_see_dead_people;
- }
-+ /* wait until all references to ip_conntrack_untracked are dropped */
-+ while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
-+ schedule();
-
- kmem_cache_destroy(ip_conntrack_cachep);
- kmem_cache_destroy(ip_conntrack_expect_cachep);
-diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c
b/net/ipv4/netfilter/ip_conntrack_standalone.c
---- a/net/ipv4/netfilter/ip_conntrack_standalone.c
-+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
-@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
- {
-+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
-+ /* Previously seen (loopback)? Ignore. Do this before
-+ fragment check. */
-+ if ((*pskb)->nfct)
-+ return NF_ACCEPT;
-+#endif
-+
- /* Gather fragments. */
- if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- *pskb = ip_ct_gather_frags(*pskb,
-diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c
b/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
-@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
- enum ip_nat_manip_type maniptype,
- const struct ip_conntrack *conntrack)
- {
-- static u_int16_t port, *portptr;
-+ static u_int16_t port;
-+ u_int16_t *portptr;
- unsigned int range_size, min, i;
-
- if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c
b/net/ipv4/netfilter/ip_nat_proto_udp.c
---- a/net/ipv4/netfilter/ip_nat_proto_udp.c
-+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
-@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
- enum ip_nat_manip_type maniptype,
- const struct ip_conntrack *conntrack)
- {
-- static u_int16_t port, *portptr;
-+ static u_int16_t port;
-+ u_int16_t *portptr;
- unsigned int range_size, min, i;
-
- if (maniptype == IP_NAT_MANIP_SRC)
-diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
---- a/net/ipv6/netfilter/ip6_queue.c
-+++ b/net/ipv6/netfilter/ip6_queue.c
-@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
- static void
- ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
- {
-+ local_bh_disable();
- nf_reinject(entry->skb, entry->info, verdict);
-+ local_bh_enable();
- kfree(entry);
- }
-
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -315,8 +315,8 @@ err:
- static void netlink_remove(struct sock *sk)
- {
- netlink_table_grab();
-- nl_table[sk->sk_protocol].hash.entries--;
-- sk_del_node_init(sk);
-+ if (sk_del_node_init(sk))
-+ nl_table[sk->sk_protocol].hash.entries--;
- if (nlk_sk(sk)->groups)
- __sk_del_bind_node(sk);
- netlink_table_ungrab();
-@@ -429,7 +429,12 @@ retry:
- err = netlink_insert(sk, pid);
- if (err == -EADDRINUSE)
- goto retry;
-- return 0;
-+
-+ /* If 2 threads race to autobind, that is fine. */
-+ if (err == -EBUSY)
-+ err = 0;
-+
-+ return err;
- }
-
- static inline int netlink_capable(struct socket *sock, unsigned int flag)
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
- dst_release(skb->dst);
- skb->dst = NULL;
-
-+ /* drop conntrack reference */
-+ nf_reset(skb);
-+
- spkt = (struct sockaddr_pkt*)skb->cb;
-
- skb_push(skb, skb->data-skb->mac.raw);
-@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
- dst_release(skb->dst);
- skb->dst = NULL;
-
-+ /* drop conntrack reference */
-+ nf_reset(skb);
-+
- spin_lock(&sk->sk_receive_queue.lock);
- po->stats.tp_packets++;
- __skb_queue_tail(&sk->sk_receive_queue, skb);
-diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
---- a/net/xfrm/xfrm_user.c
-+++ b/net/xfrm/xfrm_user.c
-@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
- if (nr > XFRM_MAX_DEPTH)
- return NULL;
-
-+ if (p->dir > XFRM_POLICY_OUT)
-+ return NULL;
-+
- xp = xfrm_policy_alloc(GFP_KERNEL);
- if (xp == NULL) {
- *dir = -ENOBUFS;
-diff --git a/security/keys/keyring.c b/security/keys/keyring.c
---- a/security/keys/keyring.c
-+++ b/security/keys/keyring.c
-@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
-
- if (keyring->description) {
- write_lock(&keyring_name_lock);
-- list_del(&keyring->type_data.link);
-+
-+ if (keyring->type_data.link.next != NULL &&
-+ !list_empty(&keyring->type_data.link))
-+ list_del(&keyring->type_data.link);
-+
- write_unlock(&keyring_name_lock);
- }
-
-diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
---- a/security/keys/process_keys.c
-+++ b/security/keys/process_keys.c
-@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
- keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
- if (IS_ERR(keyring)) {
- ret = PTR_ERR(keyring);
-- goto error;
-+ goto error2;
- }
- }
- else if (IS_ERR(keyring)) {
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|