[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 8/8] golang/xenlight: Implement cpupool operations



Include some useful "Utility" functions:
- CpupoolFindByName
- CpupoolMakeFree

Still need to implement the following functions:
- libxl_cpupool_rename
- libxl_cpupool_cpuadd_node
- libxl_cpupool_cpuremove_node
- libxl_cpupool_movedomain

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
Signed-off-by: Ronald Rojas <ronladred@xxxxxxxxx>
---
 tools/golang/xenlight/xenlight.go | 238 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 238 insertions(+)

diff --git a/tools/golang/xenlight/xenlight.go 
b/tools/golang/xenlight/xenlight.go
index 0990f07..c856284 100644
--- a/tools/golang/xenlight/xenlight.go
+++ b/tools/golang/xenlight/xenlight.go
@@ -273,6 +273,244 @@ func SchedulerFromString(name string) (s Scheduler, err 
error) {
        return
 }
 
+
+// libxl_cpupoolinfo = Struct("cpupoolinfo", [
+//     ("poolid",      uint32),
+//     ("pool_name",   string),
+//     ("sched",       libxl_scheduler),
+//     ("n_dom",       uint32),
+//     ("cpumap",      libxl_bitmap)
+//     ], dir=DIR_OUT)
+
+type CpupoolInfo struct {
+       Poolid      uint32
+       PoolName    string
+       Scheduler   Scheduler
+       DomainCount int
+       Cpumap      Bitmap
+}
+
+func (cci C.libxl_cpupoolinfo)CToGo ()(gci CpupoolInfo) {
+       gci.Poolid = uint32(cci.poolid)
+       gci.PoolName = C.GoString(cci.pool_name)
+       gci.Scheduler = Scheduler(cci.sched)
+       gci.DomainCount = int(cci.n_dom)
+       gci.Cpumap = cci.cpumap.CToGo()
+
+       return
+}
+
+// libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx*, int *nb_pool_out);
+// void libxl_cpupoolinfo_list_free(libxl_cpupoolinfo *list, int nb_pool);
+func (Ctx *Context) ListCpupool() (list []CpupoolInfo) {
+       err := Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       var nbPool C.int
+
+       c_cpupool_list := C.libxl_list_cpupool(Ctx.ctx, &nbPool)
+
+       defer C.libxl_cpupoolinfo_list_free(c_cpupool_list, nbPool)
+
+       if int(nbPool) == 0 {
+               return
+       }
+
+       // Magic
+       cpupoolListSlice := (*[1 << 
30]C.libxl_cpupoolinfo)(unsafe.Pointer(c_cpupool_list))[:nbPool:nbPool]
+
+       for i := range cpupoolListSlice {
+               info := cpupoolListSlice[i].CToGo()
+
+               list = append(list, info)
+       }
+
+       return
+}
+
+// int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t 
poolid);
+func (Ctx *Context) CpupoolInfo(Poolid uint32) (pool CpupoolInfo) {
+       err := Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       var c_cpupool C.libxl_cpupoolinfo
+
+       ret := C.libxl_cpupool_info(Ctx.ctx, &c_cpupool, C.uint32_t(Poolid))
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+       defer C.libxl_cpupoolinfo_dispose(&c_cpupool)
+
+       pool = c_cpupool.CToGo()
+
+       return
+}
+
+// int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
+//                          libxl_scheduler sched,
+//                          libxl_bitmap cpumap, libxl_uuid *uuid,
+//                          uint32_t *poolid);
+// FIXME: uuid
+// FIXME: Setting poolid
+func (Ctx *Context) CpupoolCreate(Name string, Scheduler Scheduler, Cpumap 
Bitmap) (err error, Poolid uint32) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       poolid := C.uint32_t(0)
+       name := C.CString(Name)
+       defer C.free(unsafe.Pointer(name))
+
+       // For now, just do what xl does, and make a new uuid every time we 
create the pool
+       var uuid C.libxl_uuid
+       C.libxl_uuid_generate(&uuid)
+
+       cbm := Cpumap.GoToC()
+       defer C.libxl_bitmap_dispose(&cbm)
+
+       ret := C.libxl_cpupool_create(Ctx.ctx, name, 
C.libxl_scheduler(Scheduler),
+               cbm, &uuid, &poolid)
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       Poolid = uint32(poolid)
+
+       return
+}
+
+// int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
+func (Ctx *Context) CpupoolDestroy(Poolid uint32) (err error) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       ret := C.libxl_cpupool_destroy(Ctx.ctx, C.uint32_t(Poolid))
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       return
+}
+
+// int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu);
+func (Ctx *Context) CpupoolCpuadd(Poolid uint32, Cpu int) (err error) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       ret := C.libxl_cpupool_cpuadd(Ctx.ctx, C.uint32_t(Poolid), C.int(Cpu))
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       return
+}
+
+// int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+//                                 const libxl_bitmap *cpumap);
+func (Ctx *Context) CpupoolCpuaddCpumap(Poolid uint32, Cpumap Bitmap) (err 
error) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       cbm := Cpumap.GoToC()
+       defer C.libxl_bitmap_dispose(&cbm)
+
+       ret := C.libxl_cpupool_cpuadd_cpumap(Ctx.ctx, C.uint32_t(Poolid), &cbm)
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       return
+}
+
+// int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
+func (Ctx *Context) CpupoolCpuremove(Poolid uint32, Cpu int) (err error) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       ret := C.libxl_cpupool_cpuremove(Ctx.ctx, C.uint32_t(Poolid), 
C.int(Cpu))
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       return
+}
+
+// int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+//                                    const libxl_bitmap *cpumap);
+func (Ctx *Context) CpupoolCpuremoveCpumap(Poolid uint32, Cpumap Bitmap) (err 
error) {
+       err = Ctx.CheckOpen()
+       if err != nil {
+               return
+       }
+
+       cbm := Cpumap.GoToC()
+       defer C.libxl_bitmap_dispose(&cbm)
+
+       ret := C.libxl_cpupool_cpuremove_cpumap(Ctx.ctx, C.uint32_t(Poolid), 
&cbm)
+       if ret != 0 {
+               err = Error(-ret)
+               return
+       }
+
+       return
+}
+
+// int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
+// int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, 
int *cpus);
+// int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, 
int *cpus);
+// int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t 
domid);
+
+//
+// Utility functions
+//
+func (Ctx *Context) CpupoolFindByName(name string) (info CpupoolInfo, found 
bool) {
+       plist := Ctx.ListCpupool()
+
+       for i := range plist {
+               if plist[i].PoolName == name {
+                       found = true
+                       info = plist[i]
+                       return
+               }
+       }
+       return
+}
+
+func (Ctx *Context) CpupoolMakeFree(Cpumap Bitmap) (err error) {
+       plist := Ctx.ListCpupool()
+
+       for i := range plist {
+               var Intersection Bitmap
+               Intersection = Cpumap.And(plist[i].Cpumap)
+               if !Intersection.IsEmpty() {
+                       err = Ctx.CpupoolCpuremoveCpumap(plist[i].Poolid, 
Intersection)
+                       if err != nil {
+                               return
+                       }
+               }
+       }
+       return
+}
+
 /*
  * Bitmap operations
  */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.