[kernel] r13197 - in dists/lenny/linux-2.6/debian: . patches/bugfix/parisc patches/series
Dann Frazier
dannf at alioth.debian.org
Sun Mar 22 20:18:52 UTC 2009
Author: dannf
Date: Sun Mar 22 20:18:50 2009
New Revision: 13197
Log:
[parisc] Fix the loading of large kernel modules (Closes: #401439)
Added:
dists/lenny/linux-2.6/debian/patches/bugfix/parisc/fix-loading-large-kmods.patch
Modified:
dists/lenny/linux-2.6/debian/changelog
dists/lenny/linux-2.6/debian/patches/series/14
Modified: dists/lenny/linux-2.6/debian/changelog
==============================================================================
--- dists/lenny/linux-2.6/debian/changelog (original)
+++ dists/lenny/linux-2.6/debian/changelog Sun Mar 22 20:18:50 2009
@@ -35,6 +35,7 @@
* Add -fwrapv to CFLAGS to prevent gcc from optimizing out certain
wrap tests. (Closes: #520548)
* Bump ABI to 2.
+ * [parisc] Fix the loading of large kernel modules (Closes: #401439)
[ Martin Michlmayr ]
* rt2x00: Fix VGC lower bound initialization. (Closes: #510607)
@@ -85,7 +86,7 @@
* [openvz] 397500c autofs4: fix ia32 compat mode.
* [openvz] 0328e3d pidns: update leader_pid at pidns attach.
- -- dann frazier <dannf at debian.org> Sun, 22 Mar 2009 13:45:48 -0600
+ -- dann frazier <dannf at debian.org> Sun, 22 Mar 2009 14:09:23 -0600
linux-2.6 (2.6.26-13lenny2) stable-security; urgency=high
Added: dists/lenny/linux-2.6/debian/patches/bugfix/parisc/fix-loading-large-kmods.patch
==============================================================================
--- (empty file)
+++ dists/lenny/linux-2.6/debian/patches/bugfix/parisc/fix-loading-large-kmods.patch Sun Mar 22 20:18:50 2009
@@ -0,0 +1,541 @@
+This file includes the cumulative changes from the following 3 git changesets,
+adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+commit 2f0f2a334bc38b61a9afca951185cd3844ee709d
+Author: Denys Vlasenko <vda.linux at googlemail.com>
+Date: Tue Jul 22 19:24:27 2008 -0500
+
+ module: turn longs into ints for module sizes
+
+ This shrinks module.o and each *.ko file.
+
+ And finally, structure members which hold length of module
+ code (four such members there) and count of symbols
+ are converted from longs to ints.
+
+ We cannot possibly have a module where 32 bits won't
+ be enough to hold such counts.
+
+ For one, module loading checks module size for sanity
+ before loading, so such insanely big module will fail
+ that test first.
+
+ Signed-off-by: Denys Vlasenko <vda.linux at googlemail.com>
+ Signed-off-by: Rusty Russell <rusty at rustcorp.com.au>
+
+commit 088af9a6e05d51e7c3dc85d45d8b7a52c3ee08d7
+Author: Helge Deller <deller at gmx.de>
+Date: Wed Dec 31 12:31:18 2008 +0100
+
+ module: fix module loading failure of large kernel modules for parisc
+
+ When creating the final layout of a kernel module in memory, allow the
+ module loader to reserve some additional memory in front of a given section.
+ This is currently only needed for the parisc port which needs to put the
+ stub entries there to fulfill the 17/22bit PCREL relocations with large
+ kernel modules like xfs.
+
+ Signed-off-by: Helge Deller <deller at gmx.de>
+ Signed-off-by: Rusty Russell <rusty at rustcorp.com.au> (renamed fn)
+
+commit c298be74492bece102f3379d14015638f1fd1fac
+Author: Helge Deller <deller at gmx.de>
+Date: Thu Jan 1 22:25:30 2009 +0100
+
+ parisc: fix module loading failure of large kernel modules
+
+ On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
+ ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
+ fail to reach their PLT stub if we only create one big stub array for
+ all sections at the beginning of the core or init section.
+
+ With this patch we now instead add individual PLT stub entries
+ directly in front of the code sections where the stubs are actually
+ called. This reduces the distance between the PCREL location and the
+ stub entry so that the relocations can be fulfilled.
+
+ While calculating the final layout of the kernel module in memory, the
+ kernel module loader calls arch_mod_section_prepend() to request the
+ to be reserved amount of memory in front of each individual section.
+
+ Tested with 32- and 64bit kernels.
+
+ Signed-off-by: Helge Deller <deller at gmx.de>
+ Signed-off-by: Rusty Russell <rusty at rustcorp.com.au>
+
+diff -urpN a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+--- a/arch/parisc/kernel/module.c 2008-07-13 15:51:29.000000000 -0600
++++ b/arch/parisc/kernel/module.c 2009-03-22 13:33:50.000000000 -0600
+@@ -6,6 +6,7 @@
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 2003 Randolph Chung <tausq at debian . org>
++ * Copyright (C) 2008 Helge Deller <deller at gmx.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -24,6 +25,19 @@
+ *
+ *
+ * Notes:
++ * - PLT stub handling
++ * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
++ * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
++ * fail to reach their PLT stub if we only create one big stub array for
++ * all sections at the beginning of the core or init section.
++ * Instead we now insert individual PLT stub entries directly in front of
++ * of the code sections where the stubs are actually called.
++ * This reduces the distance between the PCREL location and the stub entry
++ * so that the relocations can be fulfilled.
++ * While calculating the final layout of the kernel module in memory, the
++ * kernel module loader calls arch_mod_section_prepend() to request the
++ * to be reserved amount of memory in front of each individual section.
++ *
+ * - SEGREL32 handling
+ * We are not doing SEGREL32 handling correctly. According to the ABI, we
+ * should do a value offset, like this:
+@@ -56,9 +70,13 @@
+ #define DEBUGP(fmt...)
+ #endif
+
++#define RELOC_REACHABLE(val, bits) \
++ (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
++ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
++ 0 : 1)
++
+ #define CHECK_RELOC(val, bits) \
+- if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
+- ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \
++ if (!RELOC_REACHABLE(val, bits)) { \
+ printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
+ me->name, strtab + sym->st_name, (unsigned long)val, bits); \
+ return -ENOEXEC; \
+@@ -90,13 +108,6 @@ static inline int in_local(struct module
+ return in_init(me, loc) || in_core(me, loc);
+ }
+
+-static inline int in_local_section(struct module *me, void *loc, void *dot)
+-{
+- return (in_init(me, loc) && in_init(me, dot)) ||
+- (in_core(me, loc) && in_core(me, dot));
+-}
+-
+-
+ #ifndef CONFIG_64BIT
+ struct got_entry {
+ Elf32_Addr addr;
+@@ -256,23 +267,42 @@ static inline unsigned long count_stubs(
+ /* Free memory returned from module_alloc */
+ void module_free(struct module *mod, void *module_region)
+ {
++ kfree(mod->arch.section);
++ mod->arch.section = NULL;
++
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+ }
+
++/* Additional bytes needed in front of individual sections */
++unsigned int arch_mod_section_prepend(struct module *mod,
++ unsigned int section)
++{
++ /* size needed for all stubs of this section (including
++ * one additional for correct alignment of the stubs) */
++ return (mod->arch.section[section].stub_entries + 1)
++ * sizeof(struct stub_entry);
++}
++
+ #define CONST
+ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ CONST Elf_Shdr *sechdrs,
+ CONST char *secstrings,
+ struct module *me)
+ {
+- unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0;
++ unsigned long gots = 0, fdescs = 0, len;
+ unsigned int i;
+
++ len = hdr->e_shnum * sizeof(me->arch.section[0]);
++ me->arch.section = kzalloc(len, GFP_KERNEL);
++ if (!me->arch.section)
++ return -ENOMEM;
++
+ for (i = 1; i < hdr->e_shnum; i++) {
+- const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset;
++ const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
+ unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
++ unsigned int count, s;
+
+ if (strncmp(secstrings + sechdrs[i].sh_name,
+ ".PARISC.unwind", 14) == 0)
+@@ -288,11 +318,23 @@ int module_frob_arch_sections(CONST Elf_
+ */
+ gots += count_gots(rels, nrels);
+ fdescs += count_fdescs(rels, nrels);
+- if(strncmp(secstrings + sechdrs[i].sh_name,
+- ".rela.init", 10) == 0)
+- init_stubs += count_stubs(rels, nrels);
+- else
+- stubs += count_stubs(rels, nrels);
++
++ /* XXX: By sorting the relocs and finding duplicate entries
++ * we could reduce the number of necessary stubs and save
++ * some memory. */
++ count = count_stubs(rels, nrels);
++ if (!count)
++ continue;
++
++ /* so we need relocation stubs. reserve necessary memory. */
++ /* sh_info gives the section for which we need to add stubs. */
++ s = sechdrs[i].sh_info;
++
++ /* each code section should only have one relocation section */
++ WARN_ON(me->arch.section[s].stub_entries);
++
++ /* store number of stubs we need for this section */
++ me->arch.section[s].stub_entries += count;
+ }
+
+ /* align things a bit */
+@@ -304,18 +346,8 @@ int module_frob_arch_sections(CONST Elf_
+ me->arch.fdesc_offset = me->core_size;
+ me->core_size += fdescs * sizeof(Elf_Fdesc);
+
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.stub_offset = me->core_size;
+- me->core_size += stubs * sizeof(struct stub_entry);
+-
+- me->init_size = ALIGN(me->init_size, 16);
+- me->arch.init_stub_offset = me->init_size;
+- me->init_size += init_stubs * sizeof(struct stub_entry);
+-
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+- me->arch.stub_max = stubs;
+- me->arch.init_stub_max = init_stubs;
+
+ return 0;
+ }
+@@ -378,23 +410,27 @@ enum elf_stub_type {
+ };
+
+ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
+- enum elf_stub_type stub_type, int init_section)
++ enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
+ {
+- unsigned long i;
+ struct stub_entry *stub;
+
+- if(init_section) {
+- i = me->arch.init_stub_count++;
+- BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
+- stub = me->module_init + me->arch.init_stub_offset +
+- i * sizeof(struct stub_entry);
+- } else {
+- i = me->arch.stub_count++;
+- BUG_ON(me->arch.stub_count > me->arch.stub_max);
+- stub = me->module_core + me->arch.stub_offset +
+- i * sizeof(struct stub_entry);
++ /* initialize stub_offset to point in front of the section */
++ if (!me->arch.section[targetsec].stub_offset) {
++ loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
++ sizeof(struct stub_entry);
++ /* get correct alignment for the stubs */
++ loc0 = ALIGN(loc0, sizeof(struct stub_entry));
++ me->arch.section[targetsec].stub_offset = loc0;
+ }
+
++ /* get address of stub entry */
++ stub = (void *) me->arch.section[targetsec].stub_offset;
++ me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
++
++ /* do not write outside available stub area */
++ BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
++
++
+ #ifndef CONFIG_64BIT
+ /* for 32-bit the stub looks like this:
+ * ldil L'XXX,%r1
+@@ -487,15 +523,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs
+ Elf32_Addr val;
+ Elf32_Sword addend;
+ Elf32_Addr dot;
++ Elf_Addr loc0;
++ unsigned int targetsec = sechdrs[relsec].sh_info;
+ //unsigned long dp = (unsigned long)$global$;
+ register unsigned long dp asm ("r27");
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+- sechdrs[relsec].sh_info);
++ targetsec);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
++ loc = (void *)sechdrs[targetsec].sh_addr
+ + rel[i].r_offset;
++ /* This is the start of the target section */
++ loc0 = sechdrs[targetsec].sh_addr;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+@@ -567,19 +607,32 @@ int apply_relocate_add(Elf_Shdr *sechdrs
+ break;
+ case R_PARISC_PCREL17F:
+ /* 17-bit PC relative address */
+- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
++ /* calculate direct call offset */
++ val += addend;
+ val = (val - dot - 8)/4;
+- CHECK_RELOC(val, 17)
++ if (!RELOC_REACHABLE(val, 17)) {
++ /* direct distance too far, create
++ * stub entry instead */
++ val = get_stub(me, sym->st_value, addend,
++ ELF_STUB_DIRECT, loc0, targetsec);
++ val = (val - dot - 8)/4;
++ CHECK_RELOC(val, 17);
++ }
+ *loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
+ break;
+ case R_PARISC_PCREL22F:
+ /* 22-bit PC relative address; only defined for pa20 */
+- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
+- DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n",
+- strtab + sym->st_name, (unsigned long)loc, addend,
+- val)
++ /* calculate direct call offset */
++ val += addend;
+ val = (val - dot - 8)/4;
+- CHECK_RELOC(val, 22);
++ if (!RELOC_REACHABLE(val, 22)) {
++ /* direct distance too far, create
++ * stub entry instead */
++ val = get_stub(me, sym->st_value, addend,
++ ELF_STUB_DIRECT, loc0, targetsec);
++ val = (val - dot - 8)/4;
++ CHECK_RELOC(val, 22);
++ }
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+
+@@ -608,13 +661,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs
+ Elf64_Addr val;
+ Elf64_Sxword addend;
+ Elf64_Addr dot;
++ Elf_Addr loc0;
++ unsigned int targetsec = sechdrs[relsec].sh_info;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+- sechdrs[relsec].sh_info);
++ targetsec);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
++ loc = (void *)sechdrs[targetsec].sh_addr
+ + rel[i].r_offset;
++ /* This is the start of the target section */
++ loc0 = sechdrs[targetsec].sh_addr;
+ /* This is the symbol it is referring to */
+ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ + ELF64_R_SYM(rel[i].r_info);
+@@ -670,42 +727,40 @@ int apply_relocate_add(Elf_Shdr *sechdrs
+ DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
++ val += addend;
+ /* can we reach it locally? */
+- if(!in_local_section(me, (void *)val, (void *)dot)) {
+-
+- if (in_local(me, (void *)val))
+- /* this is the case where the
+- * symbol is local to the
+- * module, but in a different
+- * section, so stub the jump
+- * in case it's more than 22
+- * bits away */
+- val = get_stub(me, val, addend, ELF_STUB_DIRECT,
+- in_init(me, loc));
+- else if (strncmp(strtab + sym->st_name, "$$", 2)
++ if (in_local(me, (void *)val)) {
++ /* this is the case where the symbol is local
++ * to the module, but in a different section,
++ * so stub the jump in case it's more than 22
++ * bits away */
++ val = (val - dot - 8)/4;
++ if (!RELOC_REACHABLE(val, 22)) {
++ /* direct distance too far, create
++ * stub entry instead */
++ val = get_stub(me, sym->st_value,
++ addend, ELF_STUB_DIRECT,
++ loc0, targetsec);
++ } else {
++ /* Ok, we can reach it directly. */
++ val = sym->st_value;
++ val += addend;
++ }
++ } else {
++ val = sym->st_value;
++ if (strncmp(strtab + sym->st_name, "$$", 2)
+ == 0)
+ val = get_stub(me, val, addend, ELF_STUB_MILLI,
+- in_init(me, loc));
++ loc0, targetsec);
+ else
+ val = get_stub(me, val, addend, ELF_STUB_GOT,
+- in_init(me, loc));
++ loc0, targetsec);
+ }
+ DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
+ strtab + sym->st_name, loc, sym->st_value,
+ addend, val);
+- /* FIXME: local symbols work as long as the
+- * core and init pieces aren't separated too
+- * far. If this is ever broken, you will trip
+- * the check below. The way to fix it would
+- * be to generate local stubs to go between init
+- * and core */
+- if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 ||
+- (Elf64_Sxword)(val - dot - 8) < -0x800000) {
+- printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n",
+- me->name, strtab + sym->st_name);
+- return -ENOEXEC;
+- }
+ val = (val - dot - 8)/4;
++ CHECK_RELOC(val, 22);
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+ case R_PARISC_DIR64:
+@@ -792,12 +847,8 @@ int module_finalize(const Elf_Ehdr *hdr,
+ addr = (u32 *)entry->addr;
+ printk("INSNS: %x %x %x %x\n",
+ addr[0], addr[1], addr[2], addr[3]);
+- printk("stubs used %ld, stubs max %ld\n"
+- "init_stubs used %ld, init stubs max %ld\n"
+- "got entries used %ld, gots max %ld\n"
++ printk("got entries used %ld, gots max %ld\n"
+ "fdescs used %ld, fdescs max %ld\n",
+- me->arch.stub_count, me->arch.stub_max,
+- me->arch.init_stub_count, me->arch.init_stub_max,
+ me->arch.got_count, me->arch.got_max,
+ me->arch.fdesc_count, me->arch.fdesc_max);
+ #endif
+@@ -827,7 +878,10 @@ int module_finalize(const Elf_Ehdr *hdr,
+ me->name, me->arch.got_count, MAX_GOTS);
+ return -EINVAL;
+ }
+-
++
++ kfree(me->arch.section);
++ me->arch.section = NULL;
++
+ /* no symbol table */
+ if(symhdr == NULL)
+ return 0;
+diff -urpN a/include/asm-parisc/module.h b/include/asm-parisc/module.h
+--- a/include/asm-parisc/module.h 2008-07-13 15:51:29.000000000 -0600
++++ b/include/asm-parisc/module.h 2009-03-22 13:33:50.000000000 -0600
+@@ -23,8 +23,10 @@ struct mod_arch_specific
+ {
+ unsigned long got_offset, got_count, got_max;
+ unsigned long fdesc_offset, fdesc_count, fdesc_max;
+- unsigned long stub_offset, stub_count, stub_max;
+- unsigned long init_stub_offset, init_stub_count, init_stub_max;
++ struct {
++ unsigned long stub_offset;
++ unsigned int stub_entries;
++ } *section;
+ int unwind_section;
+ struct unwind_table *unwind;
+ };
+diff -urpN a/include/linux/module.h b/include/linux/module.h
+--- a/include/linux/module.h 2008-07-13 15:51:29.000000000 -0600
++++ b/include/linux/module.h 2009-03-22 13:31:53.000000000 -0600
+@@ -285,10 +285,10 @@ struct module
+ void *module_core;
+
+ /* Here are the sizes of the init and core sections */
+- unsigned long init_size, core_size;
++ unsigned int init_size, core_size;
+
+ /* The size of the executable code in each section. */
+- unsigned long init_text_size, core_text_size;
++ unsigned int init_text_size, core_text_size;
+
+ /* The handle returned from unwind_add_table. */
+ void *unwind_info;
+@@ -322,7 +322,7 @@ struct module
+ #ifdef CONFIG_KALLSYMS
+ /* We keep the symbol and string tables for kallsyms. */
+ Elf_Sym *symtab;
+- unsigned long num_symtab;
++ unsigned int num_symtab;
+ char *strtab;
+
+ /* Section attributes */
+diff -urpN a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+--- a/include/linux/moduleloader.h 2008-07-13 15:51:29.000000000 -0600
++++ b/include/linux/moduleloader.h 2009-03-22 13:33:14.000000000 -0600
+@@ -13,6 +13,9 @@ int module_frob_arch_sections(Elf_Ehdr *
+ char *secstrings,
+ struct module *mod);
+
++/* Additional bytes needed by arch in front of individual sections */
++unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
++
+ /* Allocator used for allocating struct module, core sections and init
+ sections. Returns NULL on failure. */
+ void *module_alloc(unsigned long size);
+diff -urpN a/kernel/module.c b/kernel/module.c
+--- a/kernel/module.c 2009-03-20 17:03:33.000000000 -0600
++++ b/kernel/module.c 2009-03-22 13:33:14.000000000 -0600
+@@ -1525,11 +1525,21 @@ static int simplify_symbols(Elf_Shdr *se
+ return ret;
+ }
+
++/* Additional bytes needed by arch in front of individual sections */
++unsigned int __weak arch_mod_section_prepend(struct module *mod,
++ unsigned int section)
++{
++ /* default implementation just returns zero */
++ return 0;
++}
++
+ /* Update size with this section: return offset. */
+-static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
++static long get_offset(struct module *mod, unsigned int *size,
++ Elf_Shdr *sechdr, unsigned int section)
+ {
+ long ret;
+
++ *size += arch_mod_section_prepend(mod, section);
+ ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
+ *size = ret + sechdr->sh_size;
+ return ret;
+@@ -1569,7 +1579,7 @@ static void layout_sections(struct modul
+ || strncmp(secstrings + s->sh_name,
+ ".init", 5) == 0)
+ continue;
+- s->sh_entsize = get_offset(&mod->core_size, s);
++ s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
+ DEBUGP("\t%s\n", secstrings + s->sh_name);
+ }
+ if (m == 0)
+@@ -1587,7 +1597,7 @@ static void layout_sections(struct modul
+ || strncmp(secstrings + s->sh_name,
+ ".init", 5) != 0)
+ continue;
+- s->sh_entsize = (get_offset(&mod->init_size, s)
++ s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
+ | INIT_OFFSET_MASK);
+ DEBUGP("\t%s\n", secstrings + s->sh_name);
+ }
+@@ -2510,7 +2520,7 @@ static int m_show(struct seq_file *m, vo
+ struct module *mod = list_entry(p, struct module, list);
+ char buf[8];
+
+- seq_printf(m, "%s %lu",
++ seq_printf(m, "%s %u",
+ mod->name, mod->init_size + mod->core_size);
+ print_unload_info(m, mod);
+
Modified: dists/lenny/linux-2.6/debian/patches/series/14
==============================================================================
--- dists/lenny/linux-2.6/debian/patches/series/14 (original)
+++ dists/lenny/linux-2.6/debian/patches/series/14 Sun Mar 22 20:18:50 2009
@@ -29,3 +29,4 @@
- bugfix/all/stable/2.6.26.6-abi-1.patch
- bugfix/all/stable/2.6.26.8-abi-1.patch
- bugfix/all/CVE-2009-0029/mips-enable-syscall-wrappers-no-abi-change.patch
++ bugfix/parisc/fix-loading-large-kmods.patch
More information about the Kernel-svn-changes
mailing list