Skip to content

Commit

Permalink
Linter fixes - unconvert, revive:empty-lines, revive:var-naming, revi…
Browse files Browse the repository at this point in the history
…ve:unused-parameter (influxdata#9036)

Co-authored-by: Pawel Zak <Pawel Zak>
  • Loading branch information
zak-pawel authored Mar 24, 2021
1 parent 9aaaf72 commit 5de640b
Show file tree
Hide file tree
Showing 66 changed files with 337 additions and 336 deletions.
4 changes: 2 additions & 2 deletions internal/process/process.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,12 +126,12 @@ func (p *Process) cmdLoop(ctx context.Context) error {
}

p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err)
p.Log.Infof("Restarting in %s...", time.Duration(p.RestartDelay))
p.Log.Infof("Restarting in %s...", p.RestartDelay)

select {
case <-ctx.Done():
return nil
case <-time.After(time.Duration(p.RestartDelay)):
case <-time.After(p.RestartDelay):
// Continue the loop and restart the process
if err := p.cmdStart(); err != nil {
return err
Expand Down
4 changes: 2 additions & 2 deletions metric/metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ func convertField(v interface{}) interface{} {
case uint:
return uint64(v)
case uint64:
return uint64(v)
return v
case []byte:
return string(v)
case int32:
Expand Down Expand Up @@ -340,7 +340,7 @@ func convertField(v interface{}) interface{} {
}
case *uint64:
if v != nil {
return uint64(*v)
return *v
}
case *[]byte:
if v != nil {
Expand Down
16 changes: 8 additions & 8 deletions plugins/inputs/aliyuncms/aliyuncms.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,11 +458,11 @@ L:
metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData))

//Preparing tags & dims...
for instanceId, elem := range s.discoveryData {
for instanceID, elem := range s.discoveryData {
//Start filing tags
//Remove old value if exist
delete(metric.discoveryTags, instanceId)
metric.discoveryTags[instanceId] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags))
delete(metric.discoveryTags, instanceID)
metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags))

for _, tagQueryPath := range metric.TagsQueryPath {
tagKey, tagValue, err := parseTag(tagQueryPath, elem)
Expand All @@ -471,11 +471,11 @@ L:
continue
}
if err == nil && tagValue == "" { //Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceId)
s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID)
continue
}

metric.discoveryTags[instanceId][tagKey] = tagValue
metric.discoveryTags[instanceID][tagKey] = tagValue
}

//Adding default tags if not already there
Expand All @@ -489,17 +489,17 @@ L:

if err == nil && tagValue == "" { //Nothing found
s.Log.Debugf("Data by query path %q: is not found, for instance %q",
defaultTagQP, instanceId)
defaultTagQP, instanceID)
continue
}

metric.discoveryTags[instanceId][tagKey] = tagValue
metric.discoveryTags[instanceID][tagKey] = tagValue
}

//Preparing dimensions (first adding dimensions that comes from discovery data)
metric.requestDimensions = append(
metric.requestDimensions,
map[string]string{s.dimensionKey: instanceId})
map[string]string{s.dimensionKey: instanceID})
}

//Get final dimension (need to get full lis of
Expand Down
26 changes: 13 additions & 13 deletions plugins/inputs/aliyuncms/discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ type discoveryTool struct {
cli map[string]aliyunSdkClient //API client, which perform discovery request

respRootKey string //Root key in JSON response where to look for discovery data
respObjectIdKey string //Key in element of array under root key, that stores object ID
respObjectIDKey string //Key in element of array under root key, that stores object ID
//for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// )
wg sync.WaitGroup //WG for primary discovery goroutine
interval time.Duration //Discovery interval
Expand All @@ -69,9 +69,9 @@ type discoveryTool struct {
lg telegraf.Logger //Telegraf logger (should be provided)
}

//getRpcReqFromDiscoveryRequest - utility function to map between aliyun request primitives
//getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives
//discoveryRequest represents different type of discovery requests
func getRpcReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) {
func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) {
if reflect.ValueOf(req).Type().Kind() != reflect.Ptr ||
reflect.ValueOf(req).IsNil() {
return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind())
Expand Down Expand Up @@ -109,7 +109,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
cli = map[string]aliyunSdkClient{}
parseRootKey = regexp.MustCompile(`Describe(.*)`)
responseRootKey string
responseObjectIdKey string
responseObjectIDKey string
err error
noDiscoverySupportErr = errors.Errorf("no discovery support for project %q", project)
)
Expand All @@ -127,13 +127,13 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
switch project {
case "acs_ecs_dashboard":
dscReq[region] = ecs.CreateDescribeInstancesRequest()
responseObjectIdKey = "InstanceId"
responseObjectIDKey = "InstanceId"
case "acs_rds_dashboard":
dscReq[region] = rds.CreateDescribeDBInstancesRequest()
responseObjectIdKey = "DBInstanceId"
responseObjectIDKey = "DBInstanceId"
case "acs_slb_dashboard":
dscReq[region] = slb.CreateDescribeLoadBalancersRequest()
responseObjectIdKey = "LoadBalancerId"
responseObjectIDKey = "LoadBalancerId"
case "acs_memcache":
return nil, noDiscoverySupportErr
case "acs_ocs":
Expand All @@ -152,7 +152,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
//req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI")
case "acs_vpc_eip":
dscReq[region] = vpc.CreateDescribeEipAddressesRequest()
responseObjectIdKey = "AllocationId"
responseObjectIDKey = "AllocationId"
case "acs_kvstore":
return nil, noDiscoverySupportErr
case "acs_mns_new":
Expand Down Expand Up @@ -253,7 +253,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
//Getting response root key (if not set already). This is to be able to parse discovery responses
//As they differ per object type
//Discovery requests are of the same type per every region, so pick the first one
rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq[regions[0]])
rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq[regions[0]])
//This means that the discovery request is not of proper type/kind
if err != nil {
return nil, errors.Errorf("Can't parse rpc request object from discovery request %v", dscReq[regions[0]])
Expand Down Expand Up @@ -283,7 +283,7 @@ func NewDiscoveryTool(regions []string, project string, lg telegraf.Logger, cred
req: dscReq,
cli: cli,
respRootKey: responseRootKey,
respObjectIdKey: responseObjectIdKey,
respObjectIDKey: responseObjectIDKey,
rateLimit: rateLimit,
interval: discoveryInterval,
reqDefaultPageSize: 20,
Expand Down Expand Up @@ -380,8 +380,8 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com

for _, raw := range discoveryData {
if elem, ok := raw.(map[string]interface{}); ok {
if objectId, ok := elem[dt.respObjectIdKey].(string); ok {
preparedData[objectId] = elem
if objectID, ok := elem[dt.respObjectIDKey].(string); ok {
preparedData[objectID] = elem
}
} else {
return nil, errors.Errorf("Can't parse input data element, not a map[string]interface{} type")
Expand All @@ -407,7 +407,7 @@ func (dt *discoveryTool) getDiscoveryDataAllRegions(limiter chan bool) (map[stri
return nil, errors.Errorf("Error building common discovery request: not valid region %q", region)
}

rpcReq, err := getRpcReqFromDiscoveryRequest(dscReq)
rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion plugins/inputs/apcupsd/apcupsd.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (

const defaultAddress = "tcp://127.0.0.1:3551"

var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)}
var defaultTimeout = internal.Duration{Duration: time.Second * 5}

type ApcUpsd struct {
Servers []string
Expand Down
8 changes: 4 additions & 4 deletions plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func (c *CiscoTelemetryMDT) initBgpV4() {
c.nxpathMap[key]["aspath"] = "string"
}

func (c *CiscoTelemetryMDT) initCpu() {
func (c *CiscoTelemetryMDT) initCPU() {
key := "show processes cpu"
c.nxpathMap[key] = make(map[string]string, 5)
c.nxpathMap[key]["kernel_percent"] = "float"
Expand Down Expand Up @@ -654,7 +654,7 @@ func (c *CiscoTelemetryMDT) initPimVrf() {
c.nxpathMap[key]["table-id"] = "string"
}

func (c *CiscoTelemetryMDT) initIpMroute() {
func (c *CiscoTelemetryMDT) initIPMroute() {
key := "show ip mroute summary vrf all"
c.nxpathMap[key] = make(map[string]string, 40)
c.nxpathMap[key]["nat-mode"] = "string"
Expand Down Expand Up @@ -842,7 +842,7 @@ func (c *CiscoTelemetryMDT) initDb() {
c.initPower()
c.initMemPhys()
c.initBgpV4()
c.initCpu()
c.initCPU()
c.initResources()
c.initPtpCorrection()
c.initTrans()
Expand All @@ -861,7 +861,7 @@ func (c *CiscoTelemetryMDT) initDb() {
c.initPimStats()
c.initIntfBrief()
c.initPimVrf()
c.initIpMroute()
c.initIPMroute()
c.initIpv6Mroute()
c.initVpc()
c.initBgp()
Expand Down
2 changes: 1 addition & 1 deletion plugins/inputs/cloud_pubsub/pubsub.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error {
if err != nil {
return fmt.Errorf("unable to base64 decode message: %v", err)
}
data = []byte(strData)
data = strData
} else {
data = msg.Data()
}
Expand Down
10 changes: 5 additions & 5 deletions plugins/inputs/couchdb/couchdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
if c.client == nil {
c.client = &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
ResponseHeaderTimeout: 3 * time.Second,
},
Timeout: time.Duration(4 * time.Second),
Timeout: 4 * time.Second,
}
}

Expand All @@ -147,7 +147,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
defer response.Body.Close()

if response.StatusCode != 200 {
return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode)
return fmt.Errorf("failed to get stats from couchdb: HTTP responded %d", response.StatusCode)
}

stats := Stats{}
Expand Down Expand Up @@ -287,9 +287,9 @@ func init() {
return &CouchDB{
client: &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
ResponseHeaderTimeout: 3 * time.Second,
},
Timeout: time.Duration(4 * time.Second),
Timeout: 4 * time.Second,
},
}
})
Expand Down
6 changes: 3 additions & 3 deletions plugins/inputs/csgo/csgo.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,15 @@ func requestServer(url string, rconPw string) (string, error) {
}
defer remoteConsole.Close()

reqId, err := remoteConsole.Write("stats")
reqID, err := remoteConsole.Write("stats")
if err != nil {
return "", err
}

resp, respReqId, err := remoteConsole.Read()
resp, respReqID, err := remoteConsole.Read()
if err != nil {
return "", err
} else if reqId != respReqId {
} else if reqID != respReqID {
return "", errors.New("response/request mismatch")
} else {
return resp, nil
Expand Down
4 changes: 2 additions & 2 deletions plugins/inputs/directory_monitor/directory_monitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func TestCSVGZImport(t *testing.T) {

func TestMultipleJSONFileImports(t *testing.T) {
acc := testutil.Accumulator{}
testJsonFile := "test.json"
testJSONFile := "test.json"

// Establish process directory and finished directory.
finishedDirectory, err := ioutil.TempDir("", "finished")
Expand Down Expand Up @@ -110,7 +110,7 @@ func TestMultipleJSONFileImports(t *testing.T) {

// Let's drop a 5-line LINE-DELIMITED json.
// Write csv file to process into the 'process' directory.
f, err := os.Create(filepath.Join(processDirectory, testJsonFile))
f, err := os.Create(filepath.Join(processDirectory, testJSONFile))
require.NoError(t, err)
f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}")
f.Close()
Expand Down
4 changes: 2 additions & 2 deletions plugins/inputs/diskio/diskio_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ func (d *DiskIO) diskInfo(devName string) (map[string]string, error) {
// This allows us to also "poison" it during test scenarios
udevDataPath = ic.udevDataPath
} else {
major := unix.Major(uint64(stat.Rdev))
minor := unix.Minor(uint64(stat.Rdev))
major := unix.Major(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures
minor := unix.Minor(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures
udevDataPath = fmt.Sprintf("/run/udev/data/b%d:%d", major, minor)

_, err := os.Stat(udevDataPath)
Expand Down
12 changes: 6 additions & 6 deletions plugins/inputs/docker/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1136,22 +1136,22 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {

var (
testDate = time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC)
metricCpuTotal = testutil.MustMetric(
metricCPUTotal = testutil.MustMetric(
"docker_container_cpu",
map[string]string{
"cpu": "cpu-total",
},
map[string]interface{}{},
testDate)

metricCpu0 = testutil.MustMetric(
metricCPU0 = testutil.MustMetric(
"docker_container_cpu",
map[string]string{
"cpu": "cpu0",
},
map[string]interface{}{},
testDate)
metricCpu1 = testutil.MustMetric(
metricCPU1 = testutil.MustMetric(
"docker_container_cpu",
map[string]string{
"cpu": "cpu1",
Expand Down Expand Up @@ -1218,7 +1218,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
totalInclude: containerMetricClasses,
},
expected: []telegraf.Metric{
metricCpuTotal, metricCpu0, metricCpu1,
metricCPUTotal, metricCPU0, metricCPU1,
metricNetworkTotal, metricNetworkEth0, metricNetworkEth1,
metricBlkioTotal, metricBlkio6_0, metricBlkio6_1,
},
Expand All @@ -1231,7 +1231,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
totalInclude: []string{},
},
expected: []telegraf.Metric{
metricCpu0, metricCpu1,
metricCPU0, metricCPU1,
metricNetworkEth0, metricNetworkEth1,
metricBlkio6_0, metricBlkio6_1,
},
Expand All @@ -1243,7 +1243,7 @@ func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) {
perDeviceInclude: []string{},
totalInclude: containerMetricClasses,
},
expected: []telegraf.Metric{metricCpuTotal, metricNetworkTotal, metricBlkioTotal},
expected: []telegraf.Metric{metricCPUTotal, metricNetworkTotal, metricBlkioTotal},
},
{
name: "Per device and total metrics disabled",
Expand Down
22 changes: 11 additions & 11 deletions plugins/inputs/filecount/filecount_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,20 +213,20 @@ func getFakeFileSystem(basePath string) fakeFileSystem {
var dmask uint32 = 0666

// set directory bit
dmask |= (1 << uint(32-1))
dmask |= 1 << uint(32-1)

// create a lookup map for getting "files" from the "filesystem"
fileList := map[string]fakeFileInfo{
basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true},
basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime},
basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime},
basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime},
basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime},
basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true},
basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime},
basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime},
basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true},
basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)},
basePath: {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true},
basePath + "/foo": {name: "foo", filemode: fmask, modtime: mtime},
basePath + "/bar": {name: "bar", filemode: fmask, modtime: mtime},
basePath + "/baz": {name: "baz", filemode: fmask, modtime: olderMtime},
basePath + "/qux": {name: "qux", size: int64(400), filemode: fmask, modtime: mtime},
basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true},
basePath + "/subdir/quux": {name: "quux", filemode: fmask, modtime: mtime},
basePath + "/subdir/quuz": {name: "quuz", filemode: fmask, modtime: mtime},
basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: dmask, modtime: mtime, isdir: true},
basePath + "/subdir/nested2/qux": {name: "qux", filemode: fmask, modtime: mtime, size: int64(400)},
}

return fakeFileSystem{files: fileList}
Expand Down
Loading

0 comments on commit 5de640b

Please sign in to comment.