Skip to content

Commit

Permalink
Merge pull request #16692 from dcbw/sdn-sync-hostports-less
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 16741, 16692).

sdn: only sync HostPorts when we need to

Which is the first time a pod is started, when there will be active hostports, or when there are current active hostports.  Otherwise the syncer runs iptables-restore for no good reason.

@openshift/networking @knobunc @danwinship
  • Loading branch information
openshift-merge-robot authored Oct 10, 2017
2 parents 7f854d9 + b3fc39c commit e610f2a
Showing 1 changed file with 35 additions and 10 deletions.
45 changes: 35 additions & 10 deletions pkg/network/node/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ type podManager struct {
ovs *ovsController

enableHostports bool
// true if hostports have been synced at least once
hostportsSynced bool
// true if at least one running pod has a hostport mapping
activeHostports bool

// Things only accessed through the processCNIRequests() goroutine
// and thus can be set from Start()
Expand Down Expand Up @@ -188,12 +192,33 @@ func (m *podManager) getPod(request *cniserver.PodRequest) *kubehostport.PodPort
}

// Return a list of Kubernetes RunningPod objects for hostport operations
func (m *podManager) getRunningPods() []*kubehostport.PodPortMapping {
pods := make([]*kubehostport.PodPortMapping, 0)
func (m *podManager) shouldSyncHostports(newPod *kubehostport.PodPortMapping) []*kubehostport.PodPortMapping {
if m.hostportSyncer == nil {
return nil
}

newActiveHostports := false
mappings := make([]*kubehostport.PodPortMapping, 0)
for _, runningPod := range m.runningPods {
pods = append(pods, runningPod.podPortMapping)
mappings = append(mappings, runningPod.podPortMapping)
if !newActiveHostports && len(runningPod.podPortMapping.PortMappings) > 0 {
newActiveHostports = true
}
}
if newPod != nil && len(newPod.PortMappings) > 0 {
newActiveHostports = true
}
return pods

// Sync the first time a pod is started (to clear out stale mappings
// if kubelet crashed), or when there are any/will be active hostports.
// Otherwise don't bother.
if !m.hostportsSynced || m.activeHostports || newActiveHostports {
m.hostportsSynced = true
m.activeHostports = newActiveHostports
return mappings
}

return nil
}

// Add a request to the podManager CNI request queue
Expand Down Expand Up @@ -513,8 +538,8 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
defer func() {
if !success {
m.ipamDel(req.SandboxID)
if m.hostportSyncer != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
if mappings := m.shouldSyncHostports(nil); mappings != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, mappings); err != nil {
glog.Warningf("failed syncing hostports: %v", err)
}
}
Expand All @@ -527,8 +552,8 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running
return nil, nil, err
}
podPortMapping := kubehostport.ConstructPodPortMapping(&v1Pod, podIP)
if m.hostportSyncer != nil {
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, m.getRunningPods()); err != nil {
if mappings := m.shouldSyncHostports(podPortMapping); mappings != nil {
if err := m.hostportSyncer.OpenPodHostportsAndSync(podPortMapping, Tun0, mappings); err != nil {
return nil, nil, err
}
}
Expand Down Expand Up @@ -651,8 +676,8 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error {
errList = append(errList, err)
}

if m.hostportSyncer != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil {
if mappings := m.shouldSyncHostports(nil); mappings != nil {
if err := m.hostportSyncer.SyncHostports(Tun0, mappings); err != nil {
errList = append(errList, err)
}
}
Expand Down

0 comments on commit e610f2a

Please sign in to comment.