Skip to content

Commit

Permalink
Modifying pillar to use native QEMU cpu-pinning option
Browse files Browse the repository at this point in the history
Signed-off-by: Roja Eswaran <roja@zededa.com>
  • Loading branch information
roja-zededa committed Oct 1, 2024
1 parent fa910bf commit 5e78a47
Show file tree
Hide file tree
Showing 5 changed files with 46 additions and 35 deletions.
46 changes: 17 additions & 29 deletions pkg/pillar/cmd/domainmgr/domainmgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -1243,38 +1243,26 @@ func setCgroupCpuset(config *types.DomainConfig, status *types.DomainStatus) err
log.Warnf("Failed to find cgroups directory for %s", config.DisplayName)
return nil
}
err = controller.Update(&specs.LinuxResources{CPU: &specs.LinuxCPU{Cpus: status.VmConfig.CPUs}})

// Convert a list of CPUs to a CPU string
cpuStrings := make([]string, 0)
for _, cpu := range status.VmConfig.CPUs {
cpuStrings = append(cpuStrings, strconv.Itoa(cpu))
}
cpuMask := strings.Join(cpuStrings, ",")

err = controller.Update(&specs.LinuxResources{CPU: &specs.LinuxCPU{Cpus: cpuMask}})

if err != nil {
log.Warnf("Failed to update CPU set for %s", config.DisplayName)
return err
}
log.Functionf("Adjust the cgroups cpuset of %s to %s", config.DisplayName, status.VmConfig.CPUs)
log.Functionf("Adjust the cgroups cpuset of %s to %v", config.DisplayName, status.VmConfig.CPUs)
return nil
}

// constructNonPinnedCpumaskString returns a cpumask that contains at least CPUs reserved for the system
// services. Hence, it can never be empty.
func constructNonPinnedCpumaskString(ctx *domainContext) string {
result := ""
for _, cpu := range ctx.cpuAllocator.GetAllFree() {
addToMask(cpu, &result)
}
return result
}

func addToMask(cpu int, s *string) {
if s == nil {
return
}
if *s == "" {
*s = fmt.Sprintf("%d", cpu)
} else {
*s = fmt.Sprintf("%s,%d", *s, cpu)
}
}

func updateNonPinnedCPUs(ctx *domainContext, config *types.DomainConfig, status *types.DomainStatus) error {
status.VmConfig.CPUs = constructNonPinnedCpumaskString(ctx)
status.VmConfig.CPUs = ctx.cpuAllocator.GetAllFree()
err := setCgroupCpuset(config, status)
if err != nil {
return errors.New("failed to redistribute CPUs between VMs, can affect the inter-VM isolation")
Expand All @@ -1292,10 +1280,10 @@ func assignCPUs(ctx *domainContext, config *types.DomainConfig, status *types.Do
return errors.New("failed to allocate necessary amount of CPUs")
}
for _, cpu := range cpusToAssign {
addToMask(cpu, &status.VmConfig.CPUs)
status.VmConfig.CPUs = append(status.VmConfig.CPUs, cpu)
}
} else { // VM has no pinned CPUs, assign all the CPUs from the shared set
status.VmConfig.CPUs = constructNonPinnedCpumaskString(ctx)
status.VmConfig.CPUs = ctx.cpuAllocator.GetAllFree()
}
return nil
}
Expand Down Expand Up @@ -1330,7 +1318,7 @@ func handleCreate(ctx *domainContext, key string, config *types.DomainConfig) {
Service: config.Service,
}

status.VmConfig.CPUs = ""
status.VmConfig.CPUs = make([]int, 0)

// Note that the -emu interface doesn't exist until after boot of the domU, but we
// initialize the VifList here with the VifUsed.
Expand Down Expand Up @@ -1545,7 +1533,7 @@ func doActivate(ctx *domainContext, config types.DomainConfig,
publishDomainStatus(ctx, status)
return
}
log.Functionf("CPUs for %s assigned: %s", config.DisplayName, status.VmConfig.CPUs)
log.Functionf("CPUs for %s assigned: %v", config.DisplayName, status.VmConfig.CPUs)
}

if errDescription := reserveAdapters(ctx, config); errDescription != nil {
Expand Down Expand Up @@ -1932,7 +1920,7 @@ func doCleanup(ctx *domainContext, status *types.DomainStatus) {
}
triggerCPUNotification()
}
status.VmConfig.CPUs = ""
status.VmConfig.CPUs = nil
}
releaseAdapters(ctx, status.IoAdapterList, status.UUIDandVersion.UUID,
status)
Expand Down
8 changes: 6 additions & 2 deletions pkg/pillar/containerd/oci.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,8 +447,12 @@ func (s *ociSpec) UpdateFromDomain(dom *types.DomainConfig, status *types.Domain
s.Linux.Resources.Memory.Limit = &m
s.Linux.Resources.CPU.Period = &p
s.Linux.Resources.CPU.Quota = &q
if status.VmConfig.CPUs != "" {
s.Linux.Resources.CPU.Cpus = status.VmConfig.CPUs
if len(status.VmConfig.CPUs) != 0 {
cpusAsString := make([]string, len(status.VmConfig.CPUs))
for i, cpu := range status.VmConfig.CPUs {
cpusAsString[i] = fmt.Sprintf("%d", cpu)
}
s.Linux.Resources.CPU.Cpus = strings.Join(cpusAsString, ",")
}

s.Linux.CgroupsPath = fmt.Sprintf("/%s/%s", ctrdServicesNamespace, dom.GetTaskName())
Expand Down
14 changes: 14 additions & 0 deletions pkg/pillar/hypervisor/kvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -846,6 +846,20 @@ func (ctx KvmContext) Setup(status types.DomainStatus, config types.DomainConfig
"-readconfig", file.Name(),
"-pidfile", kvmStateDir+domainName+"/pid")

// Add CPUs affinity as a parameter to qemu.
// It's not supported to be configured in the .ini file so we need to add it here.
// The arguments are in the format of: -object thread-context,id=tc1,cpu-affinity=0-1,cpu-affinity=6-7
// The thread-context object is introduced in qemu 7.2
if config.CPUsPinned {
// Create the thread-context object string
threadContext := "thread-context,id=tc1"
for _, cpu := range status.CPUs {
// Add the cpu-affinity arguments to the thread-context object
threadContext += fmt.Sprintf(",cpu-affinity=%d", cpu)
}
args = append(args, "-object", threadContext)
}

spec, err := ctx.setupSpec(&status, &config, status.OCIConfigDir)

if err != nil {
Expand Down
10 changes: 8 additions & 2 deletions pkg/pillar/hypervisor/xen.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,15 @@ func (ctx xenContext) CreateDomConfig(domainName string,
maxCpus = vCpus
}
file.WriteString(fmt.Sprintf("maxvcpus = %d\n", maxCpus))
if config.CPUs != "" {
file.WriteString(fmt.Sprintf("cpus = \"%s\"\n", config.CPUs))

if len(config.CPUs) > 0 {
cpusString := make([]string, 0)
for _, curCpu := range config.CPUs {

Check failure on line 266 in pkg/pillar/hypervisor/xen.go

View workflow job for this annotation

GitHub Actions / yetus

revive: range var curCpu should be curCPU https://revive.run/r#var-naming
cpusString = append(cpusString, strconv.Itoa(curCpu))
}
file.WriteString(fmt.Sprintf("cpus = \"%s\"\n", strings.Join(cpusString, ",")))
}

if config.DeviceTree != "" {
file.WriteString(fmt.Sprintf("device_tree = \"%s\"\n",
config.DeviceTree))
Expand Down
3 changes: 1 addition & 2 deletions pkg/pillar/types/domainmgrtypes.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,7 @@ type VmConfig struct {
ExtraArgs string // added to bootargs
BootLoader string // default ""
// For CPU pinning
CPUs string // default "", list of "1,2"
// Needed for device passthru
CPUs []int // default nil, list of [1,2]
DeviceTree string // default ""; sets device_tree
// Example: device_tree="guest-gpio.dtb"
DtDev []string // default nil; sets dtdev
Expand Down

0 comments on commit 5e78a47

Please sign in to comment.