[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [xen-tip:linux-next 7/7] arch/x86/xen/setup.c:761: undefined reference to `xen_saved_max_mem_size'



tree:   https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git linux-next
head:   137d14ed474f7b652fc3ca58bf3b10c9cf4c09a1
commit: 137d14ed474f7b652fc3ca58bf3b10c9cf4c09a1 [7/7] x86/xen: dont add memory 
above max allowed allocation
config: x86_64-randconfig-s1-02172339 (attached as .config)
compiler: gcc-6 (Debian 6.5.0-2) 6.5.0 20181026
reproduce:
        git checkout 137d14ed474f7b652fc3ca58bf3b10c9cf4c09a1
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

   ld: arch/x86/xen/setup.o: in function `xen_memory_setup':
>> arch/x86/xen/setup.c:761: undefined reference to `xen_saved_max_mem_size'

vim +761 arch/x86/xen/setup.c

   736  
   737  /**
   738   * machine_specific_memory_setup - Hook for machine specific memory 
setup.
   739   **/
   740  char * __init xen_memory_setup(void)
   741  {
   742          unsigned long max_pfn, pfn_s, n_pfns;
   743          phys_addr_t mem_end, addr, size, chunk_size;
   744          u32 type;
   745          int rc;
   746          struct xen_memory_map memmap;
   747          unsigned long max_pages;
   748          unsigned long extra_pages = 0;
   749          int i;
   750          int op;
   751  
   752          xen_parse_512gb();
   753          max_pfn = xen_get_pages_limit();
   754          max_pfn = min(max_pfn, xen_start_info->nr_pages);
   755          mem_end = PFN_PHYS(max_pfn);
   756  
   757          memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
   758          set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
   759  
   760  #ifdef CONFIG_MEMORY_HOTPLUG
 > 761          xen_saved_max_mem_size = max_mem_size;
   762  #endif
   763  
   764          op = xen_initial_domain() ?
   765                  XENMEM_machine_memory_map :
   766                  XENMEM_memory_map;
   767          rc = HYPERVISOR_memory_op(op, &memmap);
   768          if (rc == -ENOSYS) {
   769                  BUG_ON(xen_initial_domain());
   770                  memmap.nr_entries = 1;
   771                  xen_e820_table.entries[0].addr = 0ULL;
   772                  xen_e820_table.entries[0].size = mem_end;
   773                  /* 8MB slack (to balance backend allocations). */
   774                  xen_e820_table.entries[0].size += 8ULL << 20;
   775                  xen_e820_table.entries[0].type = E820_TYPE_RAM;
   776                  rc = 0;
   777          }
   778          BUG_ON(rc);
   779          BUG_ON(memmap.nr_entries == 0);
   780          xen_e820_table.nr_entries = memmap.nr_entries;
   781  
   782          /*
   783           * Xen won't allow a 1:1 mapping to be created to UNUSABLE
   784           * regions, so if we're using the machine memory map leave the
   785           * region as RAM as it is in the pseudo-physical map.
   786           *
   787           * UNUSABLE regions in domUs are not handled and will need
   788           * a patch in the future.
   789           */
   790          if (xen_initial_domain())
   791                  xen_ignore_unusable();
   792  
   793          /* Make sure the Xen-supplied memory map is well-ordered. */
   794          e820__update_table(&xen_e820_table);
   795  
   796          max_pages = xen_get_max_pages();
   797  
   798          /* How many extra pages do we need due to remapping? */
   799          max_pages += xen_foreach_remap_area(max_pfn, 
xen_count_remap_pages);
   800  
   801          if (max_pages > max_pfn)
   802                  extra_pages += max_pages - max_pfn;
   803  
   804          /*
   805           * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
   806           * factor the base size.  On non-highmem systems, the base
   807           * size is the full initial memory allocation; on highmem it
   808           * is limited to the max size of lowmem, so that it doesn't
   809           * get completely filled.
   810           *
   811           * Make sure we have no memory above max_pages, as this area
   812           * isn't handled by the p2m management.
   813           *
   814           * In principle there could be a problem in lowmem systems if
   815           * the initial memory is also very large with respect to
   816           * lowmem, but we won't try to deal with that here.
   817           */
   818          extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, 
PFN_DOWN(MAXMEM)),
   819                             extra_pages, max_pages - max_pfn);
   820          i = 0;
   821          addr = xen_e820_table.entries[0].addr;
   822          size = xen_e820_table.entries[0].size;
   823          while (i < xen_e820_table.nr_entries) {
   824                  bool discard = false;
   825  
   826                  chunk_size = size;
   827                  type = xen_e820_table.entries[i].type;
   828  
   829                  if (type == E820_TYPE_RAM) {
   830                          if (addr < mem_end) {
   831                                  chunk_size = min(size, mem_end - addr);
   832                          } else if (extra_pages) {
   833                                  chunk_size = min(size, 
PFN_PHYS(extra_pages));
   834                                  pfn_s = PFN_UP(addr);
   835                                  n_pfns = PFN_DOWN(addr + chunk_size) - 
pfn_s;
   836                                  extra_pages -= n_pfns;
   837                                  xen_add_extra_mem(pfn_s, n_pfns);
   838                                  xen_max_p2m_pfn = pfn_s + n_pfns;
   839                          } else
   840                                  discard = true;
   841                  }
   842  
   843                  if (!discard)
   844                          xen_align_and_add_e820_region(addr, chunk_size, 
type);
   845  
   846                  addr += chunk_size;
   847                  size -= chunk_size;
   848                  if (size == 0) {
   849                          i++;
   850                          if (i < xen_e820_table.nr_entries) {
   851                                  addr = xen_e820_table.entries[i].addr;
   852                                  size = xen_e820_table.entries[i].size;
   853                          }
   854                  }
   855          }
   856  
   857          /*
   858           * Set the rest as identity mapped, in case PCI BARs are
   859           * located here.
   860           */
   861          set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
   862  
   863          /*
   864           * In domU, the ISA region is normal, usable memory, but we
   865           * reserve ISA memory anyway because too many things poke
   866           * about in there.
   867           */
   868          e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - 
ISA_START_ADDRESS, E820_TYPE_RESERVED);
   869  
   870          e820__update_table(e820_table);
   871  
   872          /*
   873           * Check whether the kernel itself conflicts with the target 
E820 map.
   874           * Failing now is better than running into weird problems later 
due
   875           * to relocating (and even reusing) pages with kernel text or 
data.
   876           */
   877          if (xen_is_e820_reserved(__pa_symbol(_text),
   878                          __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
   879                  xen_raw_console_write("Xen hypervisor allocated kernel 
memory conflicts with E820 map\n");
   880                  BUG();
   881          }
   882  
   883          /*
   884           * Check for a conflict of the hypervisor supplied page tables 
with
   885           * the target E820 map.
   886           */
   887          xen_pt_check_e820();
   888  
   889          xen_reserve_xen_mfnlist();
   890  
   891          /* Check for a conflict of the initrd with the target E820 map. 
*/
   892          if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
   893                                   boot_params.hdr.ramdisk_size)) {
   894                  phys_addr_t new_area, start, size;
   895  
   896                  new_area = 
xen_find_free_area(boot_params.hdr.ramdisk_size);
   897                  if (!new_area) {
   898                          xen_raw_console_write("Can't find new memory 
area for initrd needed due to E820 map conflict\n");
   899                          BUG();
   900                  }
   901  
   902                  start = boot_params.hdr.ramdisk_image;
   903                  size = boot_params.hdr.ramdisk_size;
   904                  xen_phys_memcpy(new_area, start, size);
   905                  pr_info("initrd moved from [mem %#010llx-%#010llx] to 
[mem %#010llx-%#010llx]\n",
   906                          start, start + size, new_area, new_area + size);
   907                  memblock_free(start, size);
   908                  boot_params.hdr.ramdisk_image = new_area;
   909                  boot_params.ext_ramdisk_image = new_area >> 32;
   910          }
   911  
   912          /*
   913           * Set identity map on non-RAM pages and prepare remapping the
   914           * underlying RAM.
   915           */
   916          xen_foreach_remap_area(max_pfn, 
xen_set_identity_and_remap_chunk);
   917  
   918          pr_info("Released %ld page(s)\n", xen_released_pages);
   919  
   920          return "Xen";
   921  }
   922  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.