Skip to content

Commit

Permalink
avoid klog Info calls without verbosity
Browse files Browse the repository at this point in the history
In the following code pattern, the log message will get logged with v=0 in JSON
output although conceptually it has a higher verbosity:

   if klog.V(5).Enabled() {
       klog.Info("hello world")
   }

Having the actual verbosity in the JSON output is relevant, for example for
filtering out only the important info messages. The solution is to use
klog.V(5).Info or something similar.

Whether the outer if is necessary at all depends on how complex the parameters
are. The return value of klog.V can be captured in a variable and be used
multiple times to avoid the overhead for that function call and to avoid
repeating the verbosity level.
  • Loading branch information
pohly committed Jan 12, 2022
1 parent 9350521 commit 9eaa2dc
Show file tree
Hide file tree
Showing 26 changed files with 184 additions and 130 deletions.
7 changes: 4 additions & 3 deletions cmd/kube-scheduler/app/options/configfile.go
Expand Up @@ -82,7 +82,8 @@ func encodeConfig(cfg *config.KubeSchedulerConfiguration) (*bytes.Buffer, error)

// LogOrWriteConfig logs the completed component config and writes it into the given file name as YAML, if either is enabled
func LogOrWriteConfig(fileName string, cfg *config.KubeSchedulerConfiguration, completedProfiles []config.KubeSchedulerProfile) error {
if !(klog.V(2).Enabled() || len(fileName) > 0) {
klogV := klog.V(2)
if !klogV.Enabled() && len(fileName) == 0 {
return nil
}
cfg.Profiles = completedProfiles
Expand All @@ -92,8 +93,8 @@ func LogOrWriteConfig(fileName string, cfg *config.KubeSchedulerConfiguration, c
return err
}

if klog.V(2).Enabled() {
klog.Info("Using component config", "\n-------------------------Configuration File Contents Start Here---------------------- \n", buf.String(), "\n------------------------------------Configuration File Contents End Here---------------------------------\n")
if klogV.Enabled() {
klogV.Info("Using component config", "\n-------------------------Configuration File Contents Start Here---------------------- \n", buf.String(), "\n------------------------------------Configuration File Contents End Here---------------------------------\n")
}

if len(fileName) > 0 {
Expand Down
1 change: 1 addition & 0 deletions go.mod
Expand Up @@ -36,6 +36,7 @@ require (
github.com/emicklei/go-restful v2.9.5+incompatible
github.com/evanphx/json-patch v4.12.0+incompatible
github.com/fsnotify/fsnotify v1.4.9
github.com/go-logr/logr v1.2.0
github.com/go-ozzo/ozzo-validation v3.5.0+incompatible // indirect
github.com/godbus/dbus/v5 v5.0.4
github.com/gogo/protobuf v1.3.2
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/nodelifecycle/node_lifecycle_controller.go
Expand Up @@ -1042,8 +1042,8 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
} else {
transitionTime = nodeHealth.readyTransitionTimestamp
}
if klog.V(5).Enabled() {
klog.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, nodeHealth.status, node.Status)
if klogV := klog.V(5); klogV.Enabled() {
klogV.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, nodeHealth.status, node.Status)
} else {
klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name)
}
Expand Down
20 changes: 6 additions & 14 deletions pkg/controller/replicaset/replica_set.go
Expand Up @@ -37,7 +37,7 @@ import (
"time"

apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -219,12 +219,10 @@ func (rsc *ReplicaSetController) getReplicaSetsWithSameController(rs *apps.Repli
}
}

if klog.V(2).Enabled() {
var relatedNames []string
for _, r := range relatedRSs {
relatedNames = append(relatedNames, r.Name)
}
klog.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", relatedNames)
// The if check is used to avoid the overhead for the KObjs call, see
// https://github.com/kubernetes/kubernetes/issues/106945.
if klogV := klog.V(2); klogV.Enabled() {
klogV.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", klog.KObjs(relatedRSs))
}

return relatedRSs
Expand Down Expand Up @@ -791,13 +789,7 @@ func (rsc *ReplicaSetController) getIndirectlyRelatedPods(rs *apps.ReplicaSet) (
relatedPods = append(relatedPods, pod)
}
}
if klog.V(4).Enabled() {
var relatedNames []string
for _, related := range relatedPods {
relatedNames = append(relatedNames, related.Name)
}
klog.Infof("Found %d related pods for %v %s/%s: %v", len(relatedPods), rsc.Kind, rs.Namespace, rs.Name, strings.Join(relatedNames, ", "))
}
klog.V(4).InfoS("Found related pods", "kind", rsc.Kind, "object", klog.KObj(rs), "pods", klog.KObjs(relatedPods))
return relatedPods, nil
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/resourcequota/resource_quota_controller.go
Expand Up @@ -25,7 +25,7 @@ import (

"k8s.io/klog/v2"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -435,8 +435,8 @@ func (rq *Controller) Sync(discoveryFunc NamespacedResourcesFunc, period time.Du
defer rq.workerLock.Unlock()

// Something has changed, so track the new state and perform a sync.
if klog.V(2).Enabled() {
klog.Infof("syncing resource quota controller with updated resources from discovery: %s", printDiff(oldResources, newResources))
if klogV := klog.V(2); klogV.Enabled() {
klogV.Infof("syncing resource quota controller with updated resources from discovery: %s", printDiff(oldResources, newResources))
}

// Perform the monitor resync and wait for controllers to report cache sync.
Expand Down
47 changes: 15 additions & 32 deletions pkg/controller/volume/attachdetach/reconciler/reconciler.go
Expand Up @@ -171,9 +171,7 @@ func (rc *reconciler) reconcile() {
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached {
if klog.V(5).Enabled() {
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached--skipping", ""))
}
klog.V(5).InfoS("Volume detached--skipping", "volume", attachedVolume)
continue
}

Expand All @@ -187,7 +185,7 @@ func (rc *reconciler) reconcile() {
timeout := elapsedTime > rc.maxWaitForUnmountDuration
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
if attachedVolume.MountedByNode && !timeout {
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
klog.V(5).InfoS("Cannot detach volume because it is still mounted", "volume", attachedVolume)
continue
}

Expand All @@ -207,21 +205,21 @@ func (rc *reconciler) reconcile() {
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
// Skip detaching this volume if unable to update node status
klog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
klog.ErrorS(err, "UpdateNodeStatuses failed while attempting to report volume as attached", "volume", attachedVolume)
continue
}

// Trigger detach volume which requires verifying safe to detach step
// If timeout is true, skip verifySafeToDetach check
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
klog.V(5).InfoS("Starting attacherDetacher.DetachVolume", "volume", attachedVolume)
verifySafeToDetach := !timeout
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
klog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
klog.InfoS("attacherDetacher.DetachVolume started", "volume", attachedVolume)
} else {
metrics.RecordForcedDetachMetric()
klog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
klog.InfoS("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume)
}
}
if err != nil {
Expand All @@ -233,7 +231,7 @@ func (rc *reconciler) reconcile() {
if !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
klog.ErrorS(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume)
}
}
}
Expand All @@ -254,17 +252,13 @@ func (rc *reconciler) attachDesiredVolumes() {
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
klog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
}
Expand All @@ -277,9 +271,7 @@ func (rc *reconciler) attachDesiredVolumes() {
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime
if klog.V(5).Enabled() {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
}
klog.V(5).InfoS("Volume attached--touching", "volume", volumeToAttach)
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
Expand All @@ -296,17 +288,15 @@ func (rc *reconciler) attachDesiredVolumes() {
}

// Volume/Node doesn't exist, spawn a goroutine to attach it
if klog.V(5).Enabled() {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
}
klog.V(5).InfoS("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
klog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
klog.InfoS("attacherDetacher.AttachVolume started", "volume", volumeToAttach)
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
klog.ErrorS(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach)
}
}
}
Expand Down Expand Up @@ -339,9 +329,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log detailed message to system admin
nodeList := strings.Join(otherNodesStr, ", ")
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
klog.Warning(detailedMsg)
klog.InfoS("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach)
return
}

Expand Down Expand Up @@ -377,10 +365,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
}

// Log all pods for system admin
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
}
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
klog.Warningf(detailedMsg)
klog.InfoS("Multi-Attach error: volume is already used by pods", "pods", klog.KObjs(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach)
}
14 changes: 8 additions & 6 deletions pkg/kubelet/eviction/helpers.go
Expand Up @@ -766,30 +766,32 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv
}

func debugLogObservations(logPrefix string, observations signalObservations) {
if !klog.V(3).Enabled() {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for k, v := range observations {
if !v.time.IsZero() {
klog.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity, "time", v.time)
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity, "time", v.time)
} else {
klog.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity)
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity)
}
}
}

func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionapi.Threshold, observations signalObservations) {
if !klog.V(3).Enabled() {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for i := range thresholds {
threshold := thresholds[i]
observed, found := observations[threshold.Signal]
if found {
quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity)
klog.InfoS("Eviction manager: threshold observed resource", "log", logPrefix, "signal", threshold.Signal, "quantity", quantity, "resource", observed.available)
klogV.InfoS("Eviction manager: threshold observed resource", "log", logPrefix, "signal", threshold.Signal, "quantity", quantity, "resource", observed.available)
} else {
klog.InfoS("Eviction manager: threshold had no observation", "log", logPrefix, "signal", threshold.Signal)
klogV.InfoS("Eviction manager: threshold had no observation", "log", logPrefix, "signal", threshold.Signal)
}
}
}
Expand Down
20 changes: 15 additions & 5 deletions pkg/kubelet/kubelet.go
Expand Up @@ -1796,16 +1796,26 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
return err
}
var runningContainers []string
var containers []string
type container struct {
Name string
State string
ExitCode int
FinishedAt string
}
var containers []container
klogV := klog.V(4)
klogVEnabled := klogV.Enabled()
for _, s := range podStatus.ContainerStatuses {
if s.State == kubecontainer.ContainerStateRunning {
runningContainers = append(runningContainers, s.ID.String())
}
containers = append(containers, fmt.Sprintf("(%s state=%s exitCode=%d finishedAt=%s)", s.Name, s.State, s.ExitCode, s.FinishedAt.UTC().Format(time.RFC3339Nano)))
if klogVEnabled {
containers = append(containers, container{Name: s.Name, State: string(s.State), ExitCode: s.ExitCode, FinishedAt: s.FinishedAt.UTC().Format(time.RFC3339Nano)})
}
}
if klog.V(4).Enabled() {
sort.Strings(containers)
klog.InfoS("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
if klogVEnabled {
sort.Slice(containers, func(i, j int) bool { return containers[i].Name < containers[j].Name })
klog.V(4).InfoS("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", containers)
}
if len(runningContainers) > 0 {
return fmt.Errorf("detected running containers after a successful KillPod, CRI violation: %v", runningContainers)
Expand Down
8 changes: 4 additions & 4 deletions pkg/kubelet/pleg/generic.go
Expand Up @@ -407,13 +407,13 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
if err != nil {
if klog.V(6).Enabled() {
klog.V(6).ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
klog.V(4).ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}
} else {
if klog.V(6).Enabled() {
klog.V(6).InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
if klogV := klog.V(6); klogV.Enabled() {
klogV.InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
klog.V(4).InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/kubelet/status/status_manager.go
Expand Up @@ -439,7 +439,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp

// Perform some more extensive logging of container termination state to assist in
// debugging production races (generally not needed).
if klog.V(5).Enabled() {
if klogV := klog.V(5); klogV.Enabled() {
var containers []string
for _, s := range append(append([]v1.ContainerStatus(nil), status.InitContainerStatuses...), status.ContainerStatuses...) {
var current, previous string
Expand All @@ -466,7 +466,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
containers = append(containers, fmt.Sprintf("(%s state=%s previous=%s)", s.Name, current, previous))
}
sort.Strings(containers)
klog.InfoS("updateStatusInternal", "version", cachedStatus.version+1, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
klogV.InfoS("updateStatusInternal", "version", cachedStatus.version+1, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
}

// The intent here is to prevent concurrent updates to a pod's status from
Expand Down
6 changes: 3 additions & 3 deletions pkg/registry/flowcontrol/ensurer/strategy.go
Expand Up @@ -248,9 +248,9 @@ func ensureConfiguration(wrapper configurationWrapper, strategy ensureStrategy,
return fmt.Errorf("failed to determine whether auto-update is required for %s type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err)
}
if !update {
if klog.V(5).Enabled() {
// TODO: if we use structured logging here the diff gets escaped and very awkward to read in the log
klog.Infof("No update required for the %s type=%s name=%q diff: %s", wrapper.TypeName(), configurationType, name, cmp.Diff(current, bootstrap))
if klogV := klog.V(5); klogV.Enabled() {
klogV.InfoS("No update required", "wrapper", wrapper.TypeName(), "type", configurationType, "name", name,
"diff", cmp.Diff(current, bootstrap))
}
return nil
}
Expand Down
Expand Up @@ -69,12 +69,10 @@ func (r *resourceAllocationScorer) score(

score := r.scorer(requested, allocatable)

if klog.V(10).Enabled() {
klog.InfoS("Listing internal info for allocatable resources, requested resources and score", "pod",
klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
"allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
)
}
klog.V(10).InfoS("Listing internal info for allocatable resources, requested resources and score", "pod",
klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
"allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
)

return score, nil
}
Expand Down Expand Up @@ -107,9 +105,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo
return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest)
}
}
if klog.V(10).Enabled() {
klog.InfoS("Requested resource is omitted for node score calculation", "resourceName", resource)
}
klog.V(10).InfoS("Requested resource is omitted for node score calculation", "resourceName", resource)
return 0, 0
}

Expand Down

0 comments on commit 9eaa2dc

Please sign in to comment.