[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 4/7] x86/libelf: pass the destination vCPU to libelf for Dom0 build



On Thu, Feb 23, 2017 at 06:47:24AM -0700, Jan Beulich wrote:
> >>> On 22.02.17 at 15:24, <roger.pau@xxxxxxxxxx> wrote:
> > Allow setting the destination vCPU for libelf, so that elf_load_image can 
> > take
> > it into account when loading the kernel for Dom0. This is needed for PVHv2 
> > Dom0
> > build, so that hvm_copy_to_guest_phys can be called with a Dom0 vCPU 
> > instead of
> > current (that contains the idle vCPU at this point).
> > 
> > Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> 
> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
> albeit ...
> 
> > --- a/xen/common/libelf/libelf-loader.c
> > +++ b/xen/common/libelf/libelf-loader.c
> > @@ -146,6 +146,25 @@ void elf_set_verbose(struct elf_binary *elf)
> >      elf->verbose = 1;
> >  }
> >  
> > +static elf_errorstatus elf_memcpy(struct vcpu *v, void *dst, void *src,
> > +                                  uint64_t size)
> > +{
> > +    int rc = 0;
> > +
> > +#ifdef CONFIG_X86
> > +    if ( is_hvm_vcpu(v) )
> > +    {
> > +        rc = hvm_copy_to_guest_phys((paddr_t)dst, src, size, v);
> > +        rc = rc != HVMCOPY_okay ? -1 : 0;
> > +    }
> > +    else
> > +#endif
> > +        rc = src == NULL ? raw_clear_guest(dst, size) :
> > +                           raw_copy_to_guest(dst, src, size);
> > +
> > +    return rc;
> > +}
> 
> ... elf_errorstatus is not a correct type for the return values of
> raw_{copy_to,clear}_guest(). Nevertheless that's in line with ...
> 
> > static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval 
> > dst, elf_ptrval src, uint64_t filesz, uint64_t memsz)
> > {
> >     elf_errorstatus rc;
> 
> ... the variable declared here having been used ...
> 
> > @@ -153,10 +172,12 @@ static elf_errorstatus elf_load_image(struct 
> > elf_binary *elf, elf_ptrval dst, el
> >          return -1;
> >      /* We trust the dom0 kernel image completely, so we don't care
> >       * about overruns etc. here. */
> > -    rc = raw_copy_to_guest(ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src), 
> > filesz);
> > +    rc = elf_memcpy(elf->vcpu, ELF_UNSAFE_PTR(dst), ELF_UNSAFE_PTR(src),
> > +                    filesz);
> >      if ( rc != 0 )
> >          return -1;
> > -    rc = raw_clear_guest(ELF_UNSAFE_PTR(dst + filesz), memsz - filesz);
> > +    rc = elf_memcpy(elf->vcpu, ELF_UNSAFE_PTR(dst + filesz), NULL,
> > +                    memsz - filesz);
> 
> ... the same (wrong) way, so should be good enough for now.
> Ideally the setting of rc in elf_memcpy() would be corrected, though.

Would you like to squash the following chunk on top of this patch?

---8<---
diff --git a/xen/common/libelf/libelf-loader.c 
b/xen/common/libelf/libelf-loader.c
index 371061c..1acecab 100644
--- a/xen/common/libelf/libelf-loader.c
+++ b/xen/common/libelf/libelf-loader.c
@@ -149,20 +149,22 @@ void elf_set_verbose(struct elf_binary *elf)
 static elf_errorstatus elf_memcpy(struct vcpu *v, void *dst, void *src,
                                   uint64_t size)
 {
-    int rc = 0;
+    unsigned int res;
 
 #ifdef CONFIG_X86
     if ( is_hvm_vcpu(v) )
     {
+        enum hvm_copy_result rc;
+
         rc = hvm_copy_to_guest_phys((paddr_t)dst, src, size, v);
-        rc = rc != HVMCOPY_okay ? -1 : 0;
+        return rc != HVMCOPY_okay ? -1 : 0;
     }
     else
 #endif
-        rc = src == NULL ? raw_clear_guest(dst, size) :
-                           raw_copy_to_guest(dst, src, size);
+        res = src == NULL ? raw_clear_guest(dst, size) :
+                            raw_copy_to_guest(dst, src, size);
 
-    return rc;
+    return res != 0 ? -1 : 0;
 }
 
 static elf_errorstatus elf_load_image(struct elf_binary *elf, elf_ptrval dst, 
elf_ptrval src, uint64_t filesz, uint64_t memsz)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.