Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pkg linting issues #5022

Merged
merged 4 commits into from
Oct 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/certificates/certificate_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func (cm CertManager) AddCertificateRotation(ctx context.Context, mgr manager.Ma
if err != nil {
return err
}
extraDNSNames := []string{}
var extraDNSNames []string
extraDNSNames = append(extraDNSNames, getDNSNames(cm.OperatorService, cm.K8sClusterDomain)...)
extraDNSNames = append(extraDNSNames, getDNSNames(cm.WebhookService, cm.K8sClusterDomain)...)
extraDNSNames = append(extraDNSNames, getDNSNames(cm.MetricsServerService, cm.K8sClusterDomain)...)
Expand Down
8 changes: 4 additions & 4 deletions pkg/metricscollector/metricscollectors.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type MetricsCollector interface {
// RecordScalerLatency create a measurement of the latency to external metric
RecordScalerLatency(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64)

// RecordScaledObjectLatency create a measurement of the latency executing scalable object loop
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
RecordScalableObjectLatency(namespace string, name string, isScaledObject bool, value float64)

// RecordScalerActive create a measurement of the activity of the scaler
Expand All @@ -44,7 +44,7 @@ type MetricsCollector interface {
// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA
RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error)

// RecordScaleObjectError counts the number of errors with the scaled object
// RecordScaledObjectError counts the number of errors with the scaled object
RecordScaledObjectError(namespace string, scaledObject string, err error)

IncrementTriggerTotal(triggerType string)
Expand Down Expand Up @@ -82,7 +82,7 @@ func RecordScalerLatency(namespace string, scaledObject string, scaler string, s
}
}

// RecordScaledObjectLatency create a measurement of the latency executing scalable object loop
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
func RecordScalableObjectLatency(namespace string, name string, isScaledObject bool, value float64) {
for _, element := range collectors {
element.RecordScalableObjectLatency(namespace, name, isScaledObject, value)
Expand All @@ -103,7 +103,7 @@ func RecordScalerError(namespace string, scaledObject string, scaler string, sca
}
}

// RecordScaleObjectError counts the number of errors with the scaled object
// RecordScaledObjectError counts the number of errors with the scaled object
func RecordScaledObjectError(namespace string, scaledObject string, err error) {
for _, element := range collectors {
element.RecordScaledObjectError(namespace, scaledObject, err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/metricscollector/opentelemetry.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func (o *OtelMetrics) RecordScalerLatency(namespace string, scaledObject string,
}
}

// RecordScaledObjectLatency create a measurement of the latency executing scalable object loop
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
func (o *OtelMetrics) RecordScalableObjectLatency(namespace string, name string, isScaledObject bool, value float64) {
resourceType := "scaledjob"
if isScaledObject {
Expand Down Expand Up @@ -167,7 +167,7 @@ func (o *OtelMetrics) RecordScalerError(namespace string, scaledObject string, s
}
}

// RecordScaleObjectError counts the number of errors with the scaled object
// RecordScaledObjectError counts the number of errors with the scaled object
func (o *OtelMetrics) RecordScaledObjectError(namespace string, scaledObject string, err error) {
opt := api.WithAttributes(
attribute.Key("namespace").String(namespace),
Expand Down
4 changes: 2 additions & 2 deletions pkg/metricscollector/prommetrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (p *PromMetrics) RecordScalerLatency(namespace string, scaledObject string,
scalerMetricsLatency.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Set(value)
}

// RecordScaledObjectLatency create a measurement of the latency executing scalable object loop
// RecordScalableObjectLatency create a measurement of the latency executing scalable object loop
func (p *PromMetrics) RecordScalableObjectLatency(namespace string, name string, isScaledObject bool, value float64) {
resourceType := "scaledjob"
if isScaledObject {
Expand Down Expand Up @@ -192,7 +192,7 @@ func (p *PromMetrics) RecordScalerError(namespace string, scaledObject string, s
}
}

// RecordScaleObjectError counts the number of errors with the scaled object
// RecordScaledObjectError counts the number of errors with the scaled object
func (p *PromMetrics) RecordScaledObjectError(namespace string, scaledObject string, err error) {
labels := prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject}
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions pkg/scalers/azure/azure_cloud_environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ const (
// PrivateCloud cloud type
PrivateCloud string = "Private"

// Default Endpoint key in trigger metadata
// DefaultEndpointSuffixKey is the default endpoint key in trigger metadata
DefaultEndpointSuffixKey string = "endpointSuffix"

// Default Storage Endpoint key in trigger metadata
// DefaultStorageSuffixKey is the default storage endpoint key in trigger metadata
DefaultStorageSuffixKey string = "storageEndpointSuffix"

// Default Active Directory Endpoint Key in trigger metadata
// DefaultActiveDirectoryEndpointKey is the default active directory endpoint key in trigger metadata
DefaultActiveDirectoryEndpointKey string = "activeDirectoryEndpoint"
)

Expand Down
8 changes: 4 additions & 4 deletions pkg/scalers/azure/azure_eventhub_checkpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,22 +100,22 @@ func GetCheckpointFromBlobStorage(ctx context.Context, httpClient util.HTTPDoer,

func newCheckpointer(info EventHubInfo, partitionID string) checkpointer {
switch {
case (info.CheckpointStrategy == "goSdk"):
case info.CheckpointStrategy == "goSdk":
return &goSdkCheckpointer{
containerName: info.BlobContainer,
partitionID: partitionID,
}
case (info.CheckpointStrategy == "dapr"):
case info.CheckpointStrategy == "dapr":
return &daprCheckpointer{
containerName: info.BlobContainer,
partitionID: partitionID,
}
case (info.CheckpointStrategy == "blobMetadata"):
case info.CheckpointStrategy == "blobMetadata":
return &blobMetadataCheckpointer{
containerName: info.BlobContainer,
partitionID: partitionID,
}
case (info.CheckpointStrategy == "azureFunction" || info.BlobContainer == ""):
case info.CheckpointStrategy == "azureFunction" || info.BlobContainer == "":
return &azureFunctionCheckpointer{
containerName: "azure-webjobs-eventhub",
partitionID: partitionID,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type azureManagedPrometheusHTTPRoundTripper struct {
resourceURL string
}

// Tries to get a round tripper.
// TryAndGetAzureManagedPrometheusHTTPRoundTripper tries to get a round tripper.
// If the pod identity represents azure auth, it creates a round tripper and returns that. Returns error if fails to create one.
// If its not azure auth, then this becomes a no-op. Neither returns round tripper nor error.
func TryAndGetAzureManagedPrometheusHTTPRoundTripper(logger logr.Logger, podIdentity kedav1alpha1.AuthPodIdentity, triggerMetadata map[string]string) (http.RoundTripper, error) {
Expand Down Expand Up @@ -68,7 +68,7 @@ func TryAndGetAzureManagedPrometheusHTTPRoundTripper(logger logr.Logger, podIden
return nil, nil
}

// Sets Auhtorization header for requests
// RoundTrip sets authorization header for requests
func (rt *azureManagedPrometheusHTTPRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
token, err := rt.chainedCredential.GetToken(req.Context(), policy.TokenRequestOptions{Scopes: []string{rt.resourceURL}})

Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/azure_servicebus_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ func (s *azureServiceBusScaler) Close(context.Context) error {
return nil
}

// Returns the metric spec to be used by the HPA
// GetMetricSpecForScaling returns the metric spec to be used by the HPA
func (s *azureServiceBusScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
metricName := ""

Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/cpu_memory_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (s *cpuMemoryScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp
return []v2.MetricSpec{metricSpec}
}

// GetMetrics no need for cpu/memory scaler and always active for cpu/memory scaler
// GetMetricsAndActivity no need for cpu/memory scaler and always active for cpu/memory scaler
func (s *cpuMemoryScaler) GetMetricsAndActivity(_ context.Context, _ string) ([]external_metrics.ExternalMetricValue, bool, error) {
return nil, true, nil
}
4 changes: 2 additions & 2 deletions pkg/scalers/datadog_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ func (s *datadogScaler) GetMetricsAndActivity(ctx context.Context, metricName st
return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil
}

// Find the largest value in a slice of floats
// MaxFloatFromSlice finds the largest value in a slice of floats
func MaxFloatFromSlice(results []float64) float64 {
max := results[0]
for _, result := range results {
Expand All @@ -407,7 +407,7 @@ func MaxFloatFromSlice(results []float64) float64 {
return max
}

// Find the average value in a slice of floats
// AvgFloatFromSlice finds the average value in a slice of floats
func AvgFloatFromSlice(results []float64) float64 {
total := 0.0
for _, result := range results {
Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/gcp_cloud_tasks_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type gcpCloudTaskMetadata struct {
scalerIndex int
}

// NewCloudTaskScaler creates a new cloudTaskScaler
// NewGcpCloudTasksScaler creates a new cloudTaskScaler
func NewGcpCloudTasksScaler(config *ScalerConfig) (Scaler, error) {
metricType, err := GetMetricTargetType(config)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/gcp_stackdriver_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ func NewStackDriverClient(ctx context.Context, credentials string) (*StackDriver
}, nil
}

// NewStackDriverClient creates a new stackdriver client with the credentials underlying
// NewStackDriverClientPodIdentity creates a new stackdriver client with the credentials underlying
func NewStackDriverClientPodIdentity(ctx context.Context) (*StackDriverClient, error) {
client, err := monitoring.NewMetricClient(ctx)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/pulsar_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func (s *pulsarScaler) getMsgBackLog(ctx context.Context) (int64, bool, error) {
return v.Msgbacklog, found, nil
}

// GetGetMetricsAndActivityMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric
func (s *pulsarScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) {
msgBacklog, found, err := s.getMsgBackLog(ctx)
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions pkg/scalers/rabbitmq_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ func Test_getVhostAndPathFromURL(t *testing.T) {
}

func TestGetQueueInfo(t *testing.T) {
allTestData := []getQueueInfoTestData{}
var allTestData []getQueueInfoTestData
allTestData = append(allTestData, testQueueInfoTestDataSingleVhost...)
for _, testData := range testQueueInfoTestData {
for _, vhostAnsSubpathsData := range getVhostAndPathFromURLTestData {
Expand Down Expand Up @@ -477,7 +477,7 @@ var testRegexQueueInfoTestData = []getQueueInfoTestData{
}

func TestGetQueueInfoWithRegex(t *testing.T) {
allTestData := []getQueueInfoTestData{}
var allTestData []getQueueInfoTestData
for _, testData := range testRegexQueueInfoTestData {
for _, vhostAndSubpathsData := range getVhostAndPathFromURLTestData {
testData := testData
Expand Down Expand Up @@ -563,7 +563,7 @@ var testRegexPageSizeTestData = []getRegexPageSizeTestData{
}

func TestGetPageSizeWithRegex(t *testing.T) {
allTestData := []getRegexPageSizeTestData{}
var allTestData []getRegexPageSizeTestData
for _, testData := range testRegexPageSizeTestData {
for _, vhostAndSubpathsData := range getVhostAndPathFromURLTestData {
testData := testData
Expand Down
4 changes: 2 additions & 2 deletions pkg/scalers/scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ func init() {

// Scaler interface
type Scaler interface {
// The scaler returns the metric values and activity for a metric Name
// GetMetricsAndActivity returns the metric values and activity for a metric Name
GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error)

// Returns the metrics based on which this scaler determines that the ScaleTarget scales. This is used to construct the HPA spec that is created for
// GetMetricSpecForScaling returns the metrics based on which this scaler determines that the ScaleTarget scales. This is used to construct the HPA spec that is created for
// this scaled object. The labels used should match the selectors used in GetMetrics
GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec

Expand Down
8 changes: 4 additions & 4 deletions pkg/scalers/solace_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ const (
solaceTriggermsgrxrate = "msgrcvrate"
)

// Struct for Observed Metric Values
// SolaceMetricValues is the struct for Observed Metric Values
type SolaceMetricValues struct {
// Observed Message Count
msgCount int
Expand Down Expand Up @@ -134,7 +134,7 @@ type solaceSEMPMetadata struct {
ResponseCode int `json:"responseCode"`
}

// Constructor for SolaceScaler
// NewSolaceScaler is the constructor for SolaceScaler
func NewSolaceScaler(config *ScalerConfig) (Scaler, error) {
// Create HTTP Client
httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false)
Expand Down Expand Up @@ -428,9 +428,9 @@ func (s *SolaceScaler) GetMetricsAndActivity(ctx context.Context, metricName str
return []external_metrics.ExternalMetricValue{}, false, err
}
return []external_metrics.ExternalMetricValue{metric},
(metricValues.msgCount > s.metadata.activationMsgCountTarget ||
metricValues.msgCount > s.metadata.activationMsgCountTarget ||
metricValues.msgSpoolUsage > s.metadata.activationMsgSpoolUsageTarget ||
metricValues.msgRcvRate > s.metadata.activationMsgRxRateTarget),
metricValues.msgRcvRate > s.metadata.activationMsgRxRateTarget,
nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scaling/cache/scalers_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (c *ScalersCache) GetScalers() ([]scalers.Scaler, []scalers.ScalerConfig) {
return scalersList, configsList
}

// GetPushScaler returns array of push scalers stored in the cache
// GetPushScalers returns array of push scalers stored in the cache
func (c *ScalersCache) GetPushScalers() []scalers.PushScaler {
var result []scalers.PushScaler
for _, s := range c.Scalers {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scaling/executor/scale_jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,8 @@ func (e *scaleExecutor) cleanUp(ctx context.Context, scaledJob *kedav1alpha1.Sca
return err
}

completedJobs := []batchv1.Job{}
failedJobs := []batchv1.Job{}
var completedJobs []batchv1.Job
var failedJobs []batchv1.Job
for _, job := range jobs.Items {
job := job
finishedJobConditionType := e.getFinishedJobConditionType(&job)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scaling/scaledjob/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (

func TestTargetAverageValue(t *testing.T) {
// count = 0
specs := []v2.MetricSpec{}
var specs []v2.MetricSpec
metricName := "s0-messageCount"
targetAverageValue := getTargetAverageValue(specs)
assert.Equal(t, float64(0), targetAverageValue)
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/conver_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ limitations under the License.

package util

// String returns a pointer to the string value passed in.
// StringPointer returns a pointer to the string value passed in.
func StringPointer(v string) *string {
return &v
}