diff --git a/.github/workflows/typos.yaml b/.github/workflows/typos.yaml new file mode 100644 index 000000000..eb3408878 --- /dev/null +++ b/.github/workflows/typos.yaml @@ -0,0 +1,16 @@ +name: Typos GitHub Action +on: [pull_request] + +jobs: + run: + name: Spell Check with Typos + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + + - name: Check spelling + uses: crate-ci/typos@v1.29.5 + with: + files: ./ + config: ./_typos.toml diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 000000000..53dfc366a --- /dev/null +++ b/_typos.toml @@ -0,0 +1,12 @@ +[files] +extend-exclude = ["vendor/*", "*.mod", "*.sum", "build/"] + +[default] +binary = false + + +[default.extend-words] +teh = "teh" +ded = "ded" +eles = "eles" +TAGED = "TAGED" \ No newline at end of file diff --git a/cmd/csi-agent/main.go b/cmd/csi-agent/main.go index a43e38ede..0b2e3ebb9 100644 --- a/cmd/csi-agent/main.go +++ b/cmd/csi-agent/main.go @@ -77,7 +77,7 @@ func (s *FileGrpcServer) ServeOneRequest(method string) error { func main() { _, ok := os.LookupEnv("KUBELET_ROOT_DIR") if !ok { - // This is nessesary to disable the check of sub-directory in NodeServerWithValidator. + // This is necessary to disable the check of sub-directory in NodeServerWithValidator. // CSI-agent is not invoked by kubelet, so there is no usual kubelet directory structure. utils.KubeletRootDir = "/" } diff --git a/docs/nas-dynamic.md b/docs/nas-dynamic.md index ea584ae80..f25d47a34 100644 --- a/docs/nas-dynamic.md +++ b/docs/nas-dynamic.md @@ -25,7 +25,7 @@ Parameters: > > mode: Optional. Define the mount option in pv spec, which will be used after mount action. > -> modeType: Optional. Default non-recursive. Define the Mode action behavior, recursive: chmod with -R and chanage all files mode under the mounted directory. non-recursive: chmod without -R and only chanage the directory mode. +> modeType: Optional. Default non-recursive. Define the Mode action behavior, recursive: chmod with -R and change all files mode under the mounted directory. non-recursive: chmod without -R and only change the directory mode. > > archiveOnDelete: Optional. decide how to process removal path, if reclaimPolicy defined as delete. If set 'true', the removal path will be archived and not removed really, and if set 'false', the removal path will be removed when pv is deleted. diff --git a/pkg/agent/queryserver.go b/pkg/agent/queryserver.go index 8a898254a..c0f4755d6 100644 --- a/pkg/agent/queryserver.go +++ b/pkg/agent/queryserver.go @@ -65,7 +65,7 @@ func (ks *QueryServer) RunQueryServer() { // set router klog.Infof("Started Query Server with unix socket: %s", QueryServerSocket) http.HandleFunc("/api/v1/volumeinfo", ks.volumeInfoHandler) - // http.HandleFunc("/api/v1/podruntime", ks.podRunTimeHander) + // http.HandleFunc("/api/v1/podruntime", ks.podRunTimeHandler) http.HandleFunc("/api/v1/ping", ks.pingHandler) // Server Listen diff --git a/pkg/cloud/metadata/metadata.go b/pkg/cloud/metadata/metadata.go index 150bdd8c2..9f1d9c0e5 100644 --- a/pkg/cloud/metadata/metadata.go +++ b/pkg/cloud/metadata/metadata.go @@ -112,7 +112,7 @@ func (p *immutableProvider) Get(key MetadataKey) (string, error) { if err != nil { return "", err } - klog.V(2).InfoS("retrived metadata", "provider", p.name, "key", key, "value", v) + klog.V(2).InfoS("retrieved metadata", "provider", p.name, "key", key, "value", v) p.values[key] = v return v, nil } diff --git a/pkg/cloud/utils_test.go b/pkg/cloud/utils_test.go index 9cb819515..3116192d1 100644 --- a/pkg/cloud/utils_test.go +++ b/pkg/cloud/utils_test.go @@ -29,7 +29,7 @@ func (endpointMockClient) ProcessCommonRequest(request *requests.CommonRequest) }, "Endpoint": "ecs-openapi-share.cn-region-for-ut.aliyuncs.com", "Id": "cn-region-for-ut", - "SerivceCode": "ecs", + "ServiceCode": "ecs", "Namespace": "" } ] diff --git a/pkg/disk/attachdetach_slot.go b/pkg/disk/attachdetach_slot.go index 7ca0de825..431d77a99 100644 --- a/pkg/disk/attachdetach_slot.go +++ b/pkg/disk/attachdetach_slot.go @@ -46,7 +46,7 @@ type adSlot interface { } type slot interface { - Aquire(ctx context.Context) error + Acquire(ctx context.Context) error Release() } @@ -63,7 +63,7 @@ type serialADSlot struct { type serialAD_DetachSlot struct{ *serialADSlot } type serialAD_AttachSlot struct{ *serialADSlot } -func (s serialAD_DetachSlot) Aquire(ctx context.Context) error { +func (s serialAD_DetachSlot) Acquire(ctx context.Context) error { if ctx.Err() != nil { return ctx.Err() } @@ -77,7 +77,7 @@ func (s serialAD_DetachSlot) Aquire(ctx context.Context) error { } } -func (s serialAD_AttachSlot) Aquire(ctx context.Context) error { +func (s serialAD_AttachSlot) Acquire(ctx context.Context) error { if ctx.Err() != nil { return ctx.Err() } @@ -112,7 +112,7 @@ func newMaxConcurrentSlot(maxConcurrency int) maxConcurrentSlot { } } -func (s maxConcurrentSlot) Aquire(ctx context.Context) error { +func (s maxConcurrentSlot) Acquire(ctx context.Context) error { if ctx.Err() != nil { return ctx.Err() } @@ -137,8 +137,8 @@ func (parallelSlot) Attach() slot { return noOpSlot{} } type noOpSlot struct{} -func (noOpSlot) Aquire(ctx context.Context) error { return ctx.Err() } -func (noOpSlot) Release() {} +func (noOpSlot) Acquire(ctx context.Context) error { return ctx.Err() } +func (noOpSlot) Release() {} type independentSlot struct { attach slot diff --git a/pkg/disk/attachdetach_slot_test.go b/pkg/disk/attachdetach_slot_test.go index e6012e267..ac7600ba0 100644 --- a/pkg/disk/attachdetach_slot_test.go +++ b/pkg/disk/attachdetach_slot_test.go @@ -19,7 +19,7 @@ func TestDetachPriority(t *testing.T) { for i := 0; i < 3; i++ { go func() { as := s.Attach() - if err := as.Aquire(context.Background()); err != nil { + if err := as.Acquire(context.Background()); err != nil { t.Error(err) return } @@ -32,7 +32,7 @@ func TestDetachPriority(t *testing.T) { } time.Sleep(100 * time.Millisecond) ds := s.Detach() - if err := ds.Aquire(context.Background()); err != nil { + if err := ds.Acquire(context.Background()); err != nil { t.Fatal(err) } if seq != 1 { @@ -70,10 +70,10 @@ func TestCancelWaiting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) errs := make(chan error) go func() { - errs <- s.Detach().Aquire(ctx) + errs <- s.Detach().Acquire(ctx) }() go func() { - errs <- s.Attach().Aquire(ctx) + errs <- s.Attach().Acquire(ctx) }() time.Sleep(100 * time.Millisecond) // ensure we enter waiting state cancel() @@ -90,7 +90,7 @@ func TestCancelWaiting(t *testing.T) { } t.Run("serial", func(t *testing.T) { s := NewSlots(1, 1).GetSlotFor("node1") - err := s.Detach().Aquire(context.Background()) // occupy the slot + err := s.Detach().Acquire(context.Background()) // occupy the slot assert.NoError(t, err) testSlot(t, s) }) @@ -99,8 +99,8 @@ func TestCancelWaiting(t *testing.T) { attach: newMaxConcurrentSlot(1), detach: newMaxConcurrentSlot(1), } - assert.NoError(t, s.attach.Aquire(context.Background())) - assert.NoError(t, s.detach.Aquire(context.Background())) + assert.NoError(t, s.attach.Acquire(context.Background())) + assert.NoError(t, s.detach.Acquire(context.Background())) testSlot(t, s) }) } @@ -111,10 +111,10 @@ func TestCancelNoOccupy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - if err := s.Detach().Aquire(ctx); err != context.Canceled { + if err := s.Detach().Acquire(ctx); err != context.Canceled { t.Fatalf("Expected context.Canceled, got %v", err) } - if err := s.Attach().Aquire(ctx); err != context.Canceled { + if err := s.Attach().Acquire(ctx); err != context.Canceled { t.Fatalf("Expected context.Canceled, got %v", err) } } @@ -135,14 +135,14 @@ func TestSerialDetach(t *testing.T) { ctx := context.Background() as := s.Attach() - err := as.Aquire(ctx) + err := as.Acquire(ctx) if err != nil { t.Fatal(err) } // Attach should not block detach ds := s.Detach() - err = ds.Aquire(ctx) + err = ds.Acquire(ctx) if err != nil { t.Fatal(err) } @@ -160,7 +160,7 @@ func TestSerialDetach_NoRace(t *testing.T) { state := -1 for i := 0; i < 2; i++ { go func(i int) { - s.Aquire(ctx) + s.Acquire(ctx) state = i s.Release() wg.Done() diff --git a/pkg/disk/batcher/low_latency.go b/pkg/disk/batcher/low_latency.go index 659c3ab26..4b66c03b1 100644 --- a/pkg/disk/batcher/low_latency.go +++ b/pkg/disk/batcher/low_latency.go @@ -43,7 +43,7 @@ type LowLatency[T any] struct { // The average interval of outgoing batched request traffic is also perRequest, allowing some burst. // // If the batch is fully filled, the request is issued immediately, skipping any remaining wait period, and is not limited by perRequest. -// Concurrent outgoing requests are possible if there are too many incomming requests. +// Concurrent outgoing requests are possible if there are too many incoming requests. func NewLowLatency[T any](ecsClient desc.Client[T], clk clock.WithTicker, perRequest time.Duration, burst int) *LowLatency[T] { return &LowLatency[T]{ ecsClient: ecsClient, diff --git a/pkg/disk/bdf.go b/pkg/disk/bdf.go index 8d28bbe5b..0122493d6 100644 --- a/pkg/disk/bdf.go +++ b/pkg/disk/bdf.go @@ -71,7 +71,7 @@ func FindLines(reader io.Reader, keyword string) []string { return matched } -// IsNoSuchDeviceErr nd device error +// IsNoSuchDeviceErr and device error func IsNoSuchDeviceErr(err error) bool { if err == nil { return false @@ -248,7 +248,7 @@ func bindBdfDisk(diskID string) (bdf string, err error) { data, err := os.Readlink(sysPrefix + "/sys/bus/pci/devices/" + bdf + "/driver") if err != nil { klog.Errorf("bindBdfDisk: Disk %s bdf %s Readlink with error: %v", diskID, bdf, err) - return bdf, errors.Wrapf(err, "read disk dirver, diskId=%s, bdf=%s", diskID, bdf) + return bdf, errors.Wrapf(err, "read disk driver, diskId=%s, bdf=%s", diskID, bdf) } driver := filepath.Base(data) klog.Infof("bindBdfDisk: Disk %s bdf %s, kernel driver in use: %s", diskID, bdf, driver) @@ -524,7 +524,7 @@ func (_type MachineType) BusRegex() (*regexp.Regexp, error) { } type Driver interface { - CurentDriver() (string, error) + CurrentDriver() (string, error) UnbindDriver() error BindDriver(targetDriver string) error GetDeviceNumber() string @@ -591,7 +591,7 @@ func NewDeviceDriver(volumeId, blockDevice, deviceNumber string, _type MachineTy for _, pciDriver := range []string{"--nvme", "--blk"} { output, err := utils.CommandOnNode("xdragon-bdf", pciDriver, fmt.Sprintf("--id=%s", volumeId)).CombinedOutput() if err != nil { - klog.ErrorS(err, "Failed to excute xdragon-bdf command", "volumeId", volumeId, "output", output) + klog.ErrorS(err, "Failed to execute xdragon-bdf command", "volumeId", volumeId, "output", output) continue } bdf := strings.TrimSpace(string(output)) @@ -618,10 +618,10 @@ func (d *driver) GetDeviceNumber() string { return d.deviceNumber } -func (d *driver) CurentDriver() (string, error) { +func (d *driver) CurrentDriver() (string, error) { data, err := os.Readlink(filepath.Join(sysPrefix, "sys/bus/", d.machineType.BusName(), "devices", d.deviceNumber, "driver")) if err != nil { - klog.Errorf("CurentDriver: read symlink err: %v", err) + klog.Errorf("CurrentDriver: read symlink err: %v", err) return "", err } driver := filepath.Base(data) diff --git a/pkg/disk/bdfcheck.go b/pkg/disk/bdfcheck.go index 321d5f7d0..ef3dfc32a 100644 --- a/pkg/disk/bdfcheck.go +++ b/pkg/disk/bdfcheck.go @@ -184,7 +184,7 @@ func getDiskUnUsedAndAddTag() ([]string, error) { // there are unUsedDevices in host; diskIDList, err := addDiskBdfTag(stillUnusedDevices) - return stillUnusedDevices, fmt.Errorf("UnUsedDisks: %v, Udpate Tags: %v", diskIDList, err) + return stillUnusedDevices, fmt.Errorf("UnUsedDisks: %v, Update Tags: %v", diskIDList, err) } // get device mounted as filesystem or block volume diff --git a/pkg/disk/cloud.go b/pkg/disk/cloud.go index 56448c2b1..c0cfdab36 100644 --- a/pkg/disk/cloud.go +++ b/pkg/disk/cloud.go @@ -90,7 +90,7 @@ func (ad *DiskAttachDetach) attachDisk(ctx context.Context, diskID, nodeID strin } slot := ad.slots.GetSlotFor(nodeID).Attach() - if err := slot.Aquire(ctx); err != nil { + if err := slot.Acquire(ctx); err != nil { return "", status.Errorf(codes.Aborted, "AttachDisk: get ad-slot for disk %s failed: %v", diskID, err) } defer slot.Release() @@ -405,7 +405,7 @@ func (ad *DiskAttachDetach) detachDisk(ctx context.Context, ecsClient *ecs.Clien } // NodeStageVolume/NodeUnstageVolume should be called by sequence slot := ad.slots.GetSlotFor(nodeID).Detach() - if err := slot.Aquire(ctx); err != nil { + if err := slot.Acquire(ctx); err != nil { return status.Errorf(codes.Aborted, "DetachDisk: get ad-slot for disk %s failed: %v", diskID, err) } defer slot.Release() @@ -905,12 +905,12 @@ func clientToken(name string) string { return "h:" + base64.RawStdEncoding.EncodeToString(hash.Sum(nil)) } -var vaildDiskNameRegexp = regexp.MustCompile(`^\pL[\pL0-9:_.-]{1,127}$`) +var validDiskNameRegexp = regexp.MustCompile(`^\pL[\pL0-9:_.-]{1,127}$`) // https://help.aliyun.com/zh/ecs/developer-reference/api-ecs-2014-05-26-createdisk // 长度为 2~128 个字符,支持 Unicode 中 letter 分类下的字符(其中包括英文、中文等),ASCII 数字(0-9)。可以包含半角冒号(:)、下划线(_)、半角句号(.)或者短划线(-)。必须以 Unicode 中 letter 分类下的字符开头。 func isValidDiskName(name string) bool { - return vaildDiskNameRegexp.MatchString(name) + return validDiskNameRegexp.MatchString(name) } // https://help.aliyun.com/zh/ecs/developer-reference/api-ecs-2014-05-26-createsnapshot @@ -921,7 +921,7 @@ func isValidSnapshotName(name string) bool { if strings.HasPrefix(name, "auto") { return false } - return vaildDiskNameRegexp.MatchString(name) + return validDiskNameRegexp.MatchString(name) } func createDisk(ecsClient cloud.ECSInterface, diskName, snapshotID string, diskVol *diskVolumeArgs, supportedTypes sets.Set[Category], selectedInstance string) (string, createAttempt, error) { @@ -1084,7 +1084,7 @@ func createDiskAttempt(req *ecs.CreateDiskRequest, attempt createAttempt, ecsCli type createAttempt struct { Category Category PerformanceLevel PerformanceLevel - // Instance is the ECS instance ID choosed. Only populated if Category.SingleInstance is true + // Instance is the ECS instance ID chosen. Only populated if Category.SingleInstance is true Instance string } diff --git a/pkg/disk/cloud_test.go b/pkg/disk/cloud_test.go index 2ebec2b06..e0637abe3 100644 --- a/pkg/disk/cloud_test.go +++ b/pkg/disk/cloud_test.go @@ -483,7 +483,7 @@ func TestCreateDisk_ParameterMismatch(t *testing.T) { } } -func TestCreateDisk_NoInfinitLoop(t *testing.T) { +func TestCreateDisk_NoInfiniteLoop(t *testing.T) { ctrl := gomock.NewController(t) client := cloud.NewMockECSInterface(ctrl) diff --git a/pkg/disk/constants.go b/pkg/disk/constants.go index cb8c699c2..1de788d13 100644 --- a/pkg/disk/constants.go +++ b/pkg/disk/constants.go @@ -114,8 +114,8 @@ const ( kubeNodeName = "KUBE_NODE_NAME" // describeResourceType ... describeResourceType = "DataDisk" - // NodeSchedueTag in annotations - NodeSchedueTag = "volume.kubernetes.io/selected-node" + // NodeScheduleTag in annotations + NodeScheduleTag = "volume.kubernetes.io/selected-node" // RetryMaxTimes ... RetryMaxTimes = 5 diff --git a/pkg/disk/controllerserver.go b/pkg/disk/controllerserver.go index 8766d81fb..7b1eae709 100644 --- a/pkg/disk/controllerserver.go +++ b/pkg/disk/controllerserver.go @@ -303,7 +303,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol newErrMsg := utils.FindSuggestionByErrorMessage(err.Error(), utils.DiskDelete) errMsg := fmt.Sprintf("DeleteVolume: Delete disk with error: %s", newErrMsg) if response != nil { - errMsg = fmt.Sprintf("DeleteVolume: Delete disk with error: %s, with RequstId: %s", newErrMsg, response.RequestId) + errMsg = fmt.Sprintf("DeleteVolume: Delete disk with error: %s, with RequestId: %s", newErrMsg, response.RequestId) } klog.Warningf(errMsg) if strings.Contains(err.Error(), DiskCreatingSnapshot) || strings.Contains(err.Error(), IncorrectDiskStatus) { @@ -442,7 +442,7 @@ func getVolumeSnapshotConfig(req *csi.CreateSnapshotRequest) (*createSnapshotPar vsName := req.Parameters[common.VolumeSnapshotNameKey] vsNameSpace := req.Parameters[common.VolumeSnapshotNamespaceKey] - // volumesnapshot not in parameters, just retrun + // volumesnapshot not in parameters, just return if vsName == "" || vsNameSpace == "" { return &ecsParams, nil } @@ -601,7 +601,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS func snapshotBeforeDelete(disk *ecs.Disk, ecsClient *ecs.Client) error { if !AllCategories[Category(disk.Category)].InstantAccessSnapshot { - klog.Infof("snapshotBeforeDelete: Instant Access snapshot required, but current disk.Catagory is: %s", disk.Category) + klog.Infof("snapshotBeforeDelete: Instant Access snapshot required, but current disk.Category is: %s", disk.Category) return nil } diff --git a/pkg/disk/device_manager.go b/pkg/disk/device_manager.go index 63f2edb0d..4d4a0b5b8 100644 --- a/pkg/disk/device_manager.go +++ b/pkg/disk/device_manager.go @@ -28,7 +28,7 @@ type DeviceManager struct { // Disable read serial from sysfs DisableSerial bool - // Support sole alreadly formatted disk partition + // Support sole already formatted disk partition EnableDiskPartition bool } @@ -243,7 +243,7 @@ func (m *DeviceManager) WriteSysfs(devicePath, name string, value []byte) error fileName := filepath.Clean(base + name) if !strings.HasPrefix(fileName, base) { // Note this cannot prevent user from accessing other devices through e.g. /sys/block/vda/subsystem/vdb - // But we cannot restrict symlink either because names like `bdi/read_ahead_kb` may be vaild, in which `bdi` is a symlink. + // But we cannot restrict symlink either because names like `bdi/read_ahead_kb` may be valid, in which `bdi` is a symlink. // Just reject obvious attacks like '../../../root/.ssh/id_rsa'. return fmt.Errorf("invalid relative path in sysConfig: %s", name) } diff --git a/pkg/disk/group_volume_snapshot_utils.go b/pkg/disk/group_volume_snapshot_utils.go index e40436858..94eb6d439 100644 --- a/pkg/disk/group_volume_snapshot_utils.go +++ b/pkg/disk/group_volume_snapshot_utils.go @@ -29,7 +29,7 @@ func getVolumeGroupSnapshotConfig(req *csi.CreateVolumeGroupSnapshotRequest) (*c vsName := req.Parameters[common.VolumeGroupSnapshotNameKey] vsNameSpace := req.Parameters[common.VolumeGroupSnapshotNamespaceKey] - // volumesnapshot not in parameters, just retrun + // volumesnapshot not in parameters, just return if vsName == "" || vsNameSpace == "" { return &ecsParams, nil } @@ -231,9 +231,9 @@ func checkSourceVolumes(sourceVolumeIds []string) error { func requestAndDeleteGroupSnapshot(groupSnapshotID string) (*ecs.DeleteSnapshotGroupResponse, error) { // Delete Snapshotgroup - deleteSnapshotGroupRequset := ecs.CreateDeleteSnapshotGroupRequest() - deleteSnapshotGroupRequset.SnapshotGroupId = groupSnapshotID - response, err := GlobalConfigVar.EcsClient.DeleteSnapshotGroup(deleteSnapshotGroupRequset) + deleteSnapshotGroupRequest := ecs.CreateDeleteSnapshotGroupRequest() + deleteSnapshotGroupRequest.SnapshotGroupId = groupSnapshotID + response, err := GlobalConfigVar.EcsClient.DeleteSnapshotGroup(deleteSnapshotGroupRequest) if err != nil { return response, err } diff --git a/pkg/disk/nodeserver.go b/pkg/disk/nodeserver.go index a04ac7cee..b9ff288ed 100644 --- a/pkg/disk/nodeserver.go +++ b/pkg/disk/nodeserver.go @@ -724,10 +724,10 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag fileInfo, err := os.Lstat(tmpPath) if err != nil { if strings.Contains(strings.ToLower(err.Error()), InputOutputErr) { - if err = isPathAvailiable(targetPath); err != nil { + if err = isPathAvailable(targetPath); err != nil { if err = ns.k8smounter.Unmount(targetPath); err != nil { klog.Errorf("NodeUnstageVolume: umount target %s(input/output error) with error: %v", targetPath, err) - return nil, status.Errorf(codes.InvalidArgument, "NodeUnstageVolume umount target %s with errror: %v", targetPath, err) + return nil, status.Errorf(codes.InvalidArgument, "NodeUnstageVolume umount target %s with error: %v", targetPath, err) } klog.Warningf("NodeUnstageVolume: target path %s show input/output error: %v, umount it.", targetPath, err) } @@ -1210,7 +1210,7 @@ func (ns *nodeServer) umountRunDVolumes(volumePath string) (bool, error) { } else { d, _ = NewDeviceDriver("", "", mountInfo.Source, BDF, nil) } - cDriver, err := d.CurentDriver() + cDriver, err := d.CurrentDriver() if err != nil { if IsNoSuchFileErr(err) { klog.Infof("driver has been removed, device: %s has empty driver", mountInfo.Source) @@ -1347,7 +1347,7 @@ func (ns *nodeServer) mountRunDVolumes(volumeId, pvName, sourcePath, targetPath, klog.Errorf("NodePublishVolume(rund3.0): can't get bdf number of volume: %s: err: %v", volumeId, err) return true, status.Error(codes.InvalidArgument, "NodePublishVolume: cannot get bdf number of volume: "+volumeId) } - cDriver, err := driver.CurentDriver() + cDriver, err := driver.CurrentDriver() if err != nil { return true, status.Errorf(codes.Internal, "NodePublishVolume(rund3.0): can't get current volume driver: %+v", err) } @@ -1414,7 +1414,7 @@ func (ns *nodeServer) mountRunDVolumes(volumeId, pvName, sourcePath, targetPath, err = directvolume.AddMountInfo(directvolume.EnsureVolumeAttributesFileDir(targetPath, isRawBlock), mountInfo) if err != nil { - klog.Errorf("NodePublishVolume(rund3.0): Adding runD mount infomation to DirectVolume failed: %v", err) + klog.Errorf("NodePublishVolume(rund3.0): Adding runD mount information to DirectVolume failed: %v", err) return true, err } @@ -1495,12 +1495,12 @@ func (ns *nodeServer) checkMountedOfRunvAndRund(volumeId, targetPath string) boo klog.ErrorS(err, "NodeStageVolume: Failed to get bdf number", "volumeId", volumeId) return false } - cDrvier, err := d.CurentDriver() + cDriver, err := d.CurrentDriver() if err != nil { klog.ErrorS(err, "NodeStageVolume: Failed to get current driver", "volumeId", volumeId) return false } - if vfioDrivers.Has(cDrvier) { + if vfioDrivers.Has(cDriver) { return true } return false diff --git a/pkg/disk/utils.go b/pkg/disk/utils.go index 5c8f65cc3..07f2b5871 100644 --- a/pkg/disk/utils.go +++ b/pkg/disk/utils.go @@ -336,7 +336,7 @@ func prepareMountInfos(req *csi.NodePublishVolumeRequest) ([]string, string) { return options, fsType } -// GetVolumeIDByDevice get volumeID by specific deivce name according to device meta-info +// GetVolumeIDByDevice get volumeID by specific device name according to device meta-info func GetVolumeIDByDevice(device string) (volumeID string, err error) { // get volume by serial number feature deviceName := device @@ -543,7 +543,7 @@ func getDiskVolumeOptions(req *csi.CreateVolumeRequest) (*diskVolumeArgs, error) diskVolArgs.RegionID = GlobalConfigVar.Region } - diskVolArgs.NodeSelected, _ = volOptions[NodeSchedueTag] + diskVolArgs.NodeSelected, _ = volOptions[NodeScheduleTag] // fstype // https://github.com/kubernetes-csi/external-provisioner/releases/tag/v1.0.1 @@ -825,7 +825,7 @@ func checkDeviceAvailable(mountinfoPath, devicePath, volumeID, targetPath string } if isDeviceMountedAt(mnts, devicePath, utils.KubeletRootDir) { - return fmt.Errorf("devicePath(%s) is used as DataDisk for kubelet, cannot used fo Volume", devicePath) + return fmt.Errorf("devicePath(%s) is used as DataDisk for kubelet, cannot used for Volume", devicePath) } return nil } @@ -845,8 +845,8 @@ func GetVolumeDeviceName(diskID string) (string, error) { return device, err } -// isPathAvailiable -func isPathAvailiable(path string) error { +// isPathAvailable +func isPathAvailable(path string) error { f, err := os.Open(path) if err != nil { return fmt.Errorf("Open Path (%s) with error: %v ", path, err) @@ -1117,7 +1117,7 @@ func getAvailableDiskCountFromAnnotation(node *v1.Node) (int, error) { } var typeInfo InstanceTypeInfo if err := json.Unmarshal([]byte(node.Annotations[instanceTypeInfoAnnotation]), &typeInfo); err != nil { - klog.Errorf("error unmarshaling instance type info annotation: %v", err) + klog.Errorf("error unmarshalling instance type info annotation: %v", err) return 0, err } return typeInfo.DiskQuantity, nil diff --git a/pkg/disk/utils_test.go b/pkg/disk/utils_test.go index 79ad3effd..b00a3a87c 100644 --- a/pkg/disk/utils_test.go +++ b/pkg/disk/utils_test.go @@ -411,7 +411,7 @@ func TestGetVolumeCountFromOpenAPI(t *testing.T) { dev.AddDisk(t, "node-for-testingdetachingdisk", []byte("d-testingdetachingdisk")) // manually attached disk has no xattr dev.AddDisk(t, "node-for-2zeh74nnxxrobxz49eug", nil) - // an arbirary error for getxattr, we should ignore it + // an arbitrary error for getxattr, we should ignore it dev.AddDisk(t, "node-for-testinglocaldisk", []byte("d-some-very-looooog-value-that-cause-getxattr-to-fail")) getNode := func() (*corev1.Node, error) { return testNode(), nil } diff --git a/pkg/ens/ens.go b/pkg/ens/ens.go index cee55ae44..d75714f43 100644 --- a/pkg/ens/ens.go +++ b/pkg/ens/ens.go @@ -31,8 +31,8 @@ const ( LOCAL_SSD = "local_ssd" ENS_DISK_AVAILABLE = "available" - // NodeSchedueTag in annotations - NodeSchedueTag = "volume.kubernetes.io/selected-node" + // NodeScheduleTag in annotations + NodeScheduleTag = "volume.kubernetes.io/selected-node" // DiskNotAvailable error DiskNotAvailable = "InvalidDataDiskCategory.NotSupported" diff --git a/pkg/ens/nodeserver.go b/pkg/ens/nodeserver.go index de0a81f8e..f0294d1f8 100644 --- a/pkg/ens/nodeserver.go +++ b/pkg/ens/nodeserver.go @@ -465,10 +465,10 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag fileInfo, err := os.Lstat(tmpPath) if err != nil { if strings.Contains(strings.ToLower(err.Error()), INPUT_OUTPUT_ERR) { - if err = utils.IsPathAvailiable(targetPath); err != nil { + if err = utils.IsPathAvailable(targetPath); err != nil { if err = ns.k8smounter.Unmount(targetPath); err != nil { klog.Errorf("NodeUnstageVolume: umount target %s(input/output error) with error: %v", targetPath, err) - return nil, status.Errorf(codes.InvalidArgument, "NodeUnstageVolume umount target %s with errror: %v", targetPath, err) + return nil, status.Errorf(codes.InvalidArgument, "NodeUnstageVolume umount target %s with error: %v", targetPath, err) } klog.Warningf("NodeUnstageVolume: target path %s show input/output error: %v, umount it.", targetPath, err) } diff --git a/pkg/ens/utils.go b/pkg/ens/utils.go index b80a7fefe..f03b41838 100644 --- a/pkg/ens/utils.go +++ b/pkg/ens/utils.go @@ -102,7 +102,7 @@ func ValidateCreateVolumeParams(params map[string]string) (*DiskParams, error) { DiskType: diskType, DiskTags: params["diskTags"], ResourceGroupID: params["resourceGroupId"], - NodeSelected: params[NodeSchedueTag], + NodeSelected: params[NodeScheduleTag], } return dp, nil } diff --git a/pkg/metric/utils.go b/pkg/metric/utils.go index 9c1c6c75a..e3fe98283 100644 --- a/pkg/metric/utils.go +++ b/pkg/metric/utils.go @@ -271,33 +271,33 @@ func almostEqualFloat64(a, b float64) bool { } func parseLantencyThreshold(s string, defaults float64) (float64, error) { - var thresholNum int - var threshodUnit string - _, err := fmt.Sscanf(s, "%d%s", &thresholNum, &threshodUnit) + var thresholdNum int + var thresholdUnit string + _, err := fmt.Sscanf(s, "%d%s", &thresholdNum, &thresholdUnit) if err != nil { klog.Errorf("Parse latency threshold %s is failed, err:%s", s, err) return defaults, err } - switch threshodUnit { + switch thresholdUnit { case "s", "second", "seconds": - return float64(thresholNum * 1000), nil + return float64(thresholdNum * 1000), nil case "ms", "millisecond", "milliseconds": - return float64(thresholNum), nil + return float64(thresholdNum), nil case "us", "microsecond", "microseconds": - return float64(thresholNum / 1000), nil + return float64(thresholdNum / 1000), nil default: return defaults, nil } } func parseCapacityThreshold(s string, defaults float64) (float64, error) { - var thresholNum float64 - _, err := fmt.Sscanf(s, "%f", &thresholNum) + var thresholdNum float64 + _, err := fmt.Sscanf(s, "%f", &thresholdNum) if err != nil { klog.Errorf("Parse threshold %s is failed, err:%s", s, err) return defaults, err } - return thresholNum, nil + return thresholdNum, nil } func getGlobalMountPathByDiskID(diskID string) string { diff --git a/pkg/mounter/fuse_pod_manager.go b/pkg/mounter/fuse_pod_manager.go index 6e7f3fc14..ba9ddd028 100644 --- a/pkg/mounter/fuse_pod_manager.go +++ b/pkg/mounter/fuse_pod_manager.go @@ -25,8 +25,8 @@ import ( ) const ( - fusePodManagerTimeout = time.Second * 30 - fuseServieAccountName = "csi-fuse-ossfs" + fusePodManagerTimeout = time.Second * 30 + fuseServiceAccountName = "csi-fuse-ossfs" // deprecated LegacyFusePodNamespace = "kube-system" ) diff --git a/pkg/mounter/helper.go b/pkg/mounter/helper.go index affd8165d..f6cc3cd56 100644 --- a/pkg/mounter/helper.go +++ b/pkg/mounter/helper.go @@ -87,7 +87,7 @@ func GetOIDCProvider(clusterId string) string { return fmt.Sprintf("ack-rrsa-%s", clusterId) } -// GetArn get rrsa config for fuse contianer's env setting +// GetArn get rrsa config for fuse container's env setting func GetArn(provider, accountId, roleName string) (oidcProviderArn, roleArn string) { if provider == "" || accountId == "" || roleName == "" { return diff --git a/pkg/mounter/ossfs.go b/pkg/mounter/ossfs.go index ef19686f2..b9a6f45e3 100644 --- a/pkg/mounter/ossfs.go +++ b/pkg/mounter/ossfs.go @@ -375,7 +375,7 @@ func GetOssfsAttachPath(volumeId string) string { return filepath.Join(OssfsAttachDir, hex.EncodeToString(volSha[:]), "globalmount") } -// keep consitent with RAM response +// keep consistent with RAM response var secretRefKeysToParse []string = []string{ "AccessKeyId", "AccessKeySecret", diff --git a/pkg/nas/cloud/nas_client_v2_test.go b/pkg/nas/cloud/nas_client_v2_test.go index bf50db208..84c3bf094 100644 --- a/pkg/nas/cloud/nas_client_v2_test.go +++ b/pkg/nas/cloud/nas_client_v2_test.go @@ -247,7 +247,7 @@ func TestCreateAccessPointError(t *testing.T) { }, &tea.SDKError{ Code: tea.String("OperationDenied.AccessPointCountsExceeded\n"), StatusCode: tea.Int(403), - Message: tea.String("The maxium number of access point has reached its limits.\t"), + Message: tea.String("The maximum number of access point has reached its limits.\t"), }) }) _, err := client.CreateAccesspoint(&nas.CreateAccessPointRequest{}) diff --git a/pkg/nas/nodeserver.go b/pkg/nas/nodeserver.go index 71743651a..b92f77bb3 100644 --- a/pkg/nas/nodeserver.go +++ b/pkg/nas/nodeserver.go @@ -520,8 +520,8 @@ func (ns *nodeServer) isLosetupUsed(lockFile string, opt *Options, volumeID stri if !utils.IsFileExisting(lockFile) { return false } - fileCotent := utils.GetFileContent(lockFile) - contentParts := strings.Split(fileCotent, ":") + fileContent := utils.GetFileContent(lockFile) + contentParts := strings.Split(fileContent, ":") if len(contentParts) != 2 || contentParts[0] == "" || contentParts[1] == "" { return true } diff --git a/pkg/oss/nodeserver.go b/pkg/oss/nodeserver.go index fa629f1e9..b70a2a6de 100644 --- a/pkg/oss/nodeserver.go +++ b/pkg/oss/nodeserver.go @@ -61,8 +61,8 @@ const ( metricsPathPrefix = "/host/var/run/ossfs/" // defaultMetricsTop defaultMetricsTop = "10" - // fuseServieAccountName - fuseServieAccountName = "csi-fuse-ossfs" + // fuseServiceAccountName + fuseServiceAccountName = "csi-fuse-ossfs" ) const ( diff --git a/pkg/oss/utils.go b/pkg/oss/utils.go index 4747e22c1..90df78f18 100644 --- a/pkg/oss/utils.go +++ b/pkg/oss/utils.go @@ -45,7 +45,7 @@ const ( SigV4 SigVersion = "v4" ) -// VolumeAs determines the mounting tagret path in OSS +// VolumeAs determines the mounting target path in OSS type VolumeAsType string const ( @@ -261,7 +261,7 @@ func checkRRSAParams(opt *Options) error { // getRRSAConfig get oidcProviderArn and roleArn func getRRSAConfig(opt *Options, m metadata.MetadataProvider) (rrsaCfg *mounter.RrsaConfig, err error) { - saName := fuseServieAccountName + saName := fuseServiceAccountName if opt.ServiceAccountName != "" { saName = opt.ServiceAccountName } diff --git a/pkg/oss/utils_test.go b/pkg/oss/utils_test.go index 3acb57dfa..d627909d8 100644 --- a/pkg/oss/utils_test.go +++ b/pkg/oss/utils_test.go @@ -254,17 +254,17 @@ func Test_getRRSAConfig(t *testing.T) { { "rolename", Options{RoleName: "test-role-name"}, - &mounter.RrsaConfig{OidcProviderArn: "acs:ram::112233445566:oidc-provider/ack-rrsa-c12345678", RoleArn: "acs:ram::112233445566:role/test-role-name", ServiceAccountName: fuseServieAccountName}, + &mounter.RrsaConfig{OidcProviderArn: "acs:ram::112233445566:oidc-provider/ack-rrsa-c12345678", RoleArn: "acs:ram::112233445566:role/test-role-name", ServiceAccountName: fuseServiceAccountName}, }, { "specified-arns", Options{RoleArn: "test-role-arn", OidcProviderArn: "test-oidc-provider-arn"}, - &mounter.RrsaConfig{OidcProviderArn: "test-oidc-provider-arn", RoleArn: "test-role-arn", ServiceAccountName: fuseServieAccountName}, + &mounter.RrsaConfig{OidcProviderArn: "test-oidc-provider-arn", RoleArn: "test-role-arn", ServiceAccountName: fuseServiceAccountName}, }, { "arns-first", Options{RoleName: "test-role-name", RoleArn: "test-role-arn", OidcProviderArn: "test-oidc-provider-arn"}, - &mounter.RrsaConfig{OidcProviderArn: "test-oidc-provider-arn", RoleArn: "test-role-arn", ServiceAccountName: fuseServieAccountName}, + &mounter.RrsaConfig{OidcProviderArn: "test-oidc-provider-arn", RoleArn: "test-role-arn", ServiceAccountName: fuseServiceAccountName}, }, { "serviceaccount", diff --git a/pkg/pov/cloud.go b/pkg/pov/cloud.go index a708bb43b..3b430a22f 100644 --- a/pkg/pov/cloud.go +++ b/pkg/pov/cloud.go @@ -25,7 +25,7 @@ type PovOptions struct { type Cloud interface { CreateVolume(ctx context.Context, volumeName string, diskOptions *PovOptions) (fsId, requestID string, err error) - DeleteVolume(ctx context.Context, volumeName string) (reuqestID string, err error) + DeleteVolume(ctx context.Context, volumeName string) (requestID string, err error) CreateVolumeMountPoint(ctx context.Context, filesystemID string) (mpId string, err error) AttachVscMountPoint(ctx context.Context, mpId, fsId, instanceID string) (requestID string, err error) DescribeVscMountPoints(ctx context.Context, fsId, mpId string) (dvmpr *dfs.DescribeVscMountPointsResponse, err error) @@ -89,7 +89,7 @@ func (c *cloud) CreateVolume(ctx context.Context, volumeName string, diskOptions return resp.FileSystemId, resp.RequestId, nil } -func (c *cloud) DeleteVolume(ctx context.Context, filesystemID string) (reqeustID string, err error) { +func (c *cloud) DeleteVolume(ctx context.Context, filesystemID string) (requestID string, err error) { cdfsr := dfs.CreateDeleteFileSystemRequest() cdfsr.FileSystemId = filesystemID diff --git a/pkg/pov/controller.go b/pkg/pov/controller.go index 868ecede6..2213b2b47 100644 --- a/pkg/pov/controller.go +++ b/pkg/pov/controller.go @@ -33,7 +33,7 @@ const ( FILESYSTEMID = "filesystemid" TopologyKey = "topology.kubernetes.io/region" - // volumeContext starting with labelAppendPrefix will automatically added to pv lables + // volumeContext starting with labelAppendPrefix will automatically added to pv labels labelAppendPrefix = "csi.alibabacloud.com/label-prefix/" annVSCIDPrefix = "csi.alibabacloud.com/ann-vsc-id" labelmpId = "csi.alibabacloud.com/label-mp-id" diff --git a/pkg/pov/internal/inflight.go b/pkg/pov/internal/inflight.go index 8722237e1..a15be1177 100644 --- a/pkg/pov/internal/inflight.go +++ b/pkg/pov/internal/inflight.go @@ -23,7 +23,7 @@ type InFlight struct { inFlight map[string]bool } -// NewInFlight instanciates a InFlight structures. +// NewInFlight instantiates a InFlight structures. func NewInFlight() *InFlight { return &InFlight{ mux: &sync.Mutex{}, diff --git a/pkg/utils/error_types.go b/pkg/utils/error_types.go index 0d62feb79..aea53bf2c 100644 --- a/pkg/utils/error_types.go +++ b/pkg/utils/error_types.go @@ -38,7 +38,7 @@ var errorTypeMaps = map[string]map[string]*errorInfo{ NasMountTargetDelete: NasMountTargetDeleteErrors, } -// DiskProvisionErrors are errors throwed by ecs create disk api +// DiskProvisionErrors are errors thrown by ecs create disk api var DiskProvisionErrors = map[string]*errorInfo{ "disk size is not supported.": { errorName: "SpecificSizeNotValid", @@ -52,7 +52,7 @@ var DiskProvisionErrors = map[string]*errorInfo{ }, } -// DiskAttachDetachErrors are errors throwed by disk attach +// DiskAttachDetachErrors are errors thrown by disk attach var DiskAttachDetachErrors = map[string]*errorInfo{ "had volume node affinity conflict": { errorName: "VolumeNodeAffinityConflict", @@ -76,22 +76,22 @@ var DiskAttachDetachErrors = map[string]*errorInfo{ }, } -// DiskMountErrors are errors throwed by disk mount +// DiskMountErrors are errors thrown by disk mount var DiskMountErrors = map[string]*errorInfo{} -// DiskDeleteErrors are errors throwed by disk delete +// DiskDeleteErrors are errors thrown by disk delete var DiskDeleteErrors = map[string]*errorInfo{} -// NasFilesystemCreateErrors are errors throwed by nas create +// NasFilesystemCreateErrors are errors thrown by nas create var NasFilesystemCreateErrors = map[string]*errorInfo{} -// NasFilesystemDeleteErrors are errors throwed by nas filesystem delete +// NasFilesystemDeleteErrors are errors thrown by nas filesystem delete var NasFilesystemDeleteErrors = map[string]*errorInfo{} -// NasMountTargetCreateErrors are errors throwed by nas mount target create +// NasMountTargetCreateErrors are errors thrown by nas mount target create var NasMountTargetCreateErrors = map[string]*errorInfo{} -// NasMountTargetDeleteErrors are errors throwed by nas mount target delete +// NasMountTargetDeleteErrors are errors thrown by nas mount target delete var NasMountTargetDeleteErrors = map[string]*errorInfo{} // FindSuggestionByErrorMessage get new error message by error type & error message diff --git a/pkg/utils/error_types_test.go b/pkg/utils/error_types_test.go index 25da690c7..6d6f78347 100644 --- a/pkg/utils/error_types_test.go +++ b/pkg/utils/error_types_test.go @@ -24,11 +24,11 @@ func TestFindSuggestionByErrorMessage(t *testing.T) { attachOrgMsg3: attachOrgMsg3 + "\n" + "faq: https://help.aliyun.com/document_detail/286495.htm#section-ihn-gds-9mm", } - for origMsg, wrapperedMsg := range errMsgProvisionSample { - assert.Equal(t, wrapperedMsg, FindSuggestionByErrorMessage(origMsg, DiskProvision)) + for origMsg, wrappedMsg := range errMsgProvisionSample { + assert.Equal(t, wrappedMsg, FindSuggestionByErrorMessage(origMsg, DiskProvision)) } - for origMsg, wrapperedMsg := range errMsgAttachSample { - assert.Equal(t, wrapperedMsg, FindSuggestionByErrorMessage(origMsg, DiskAttachDetach)) + for origMsg, wrappedMsg := range errMsgAttachSample { + assert.Equal(t, wrappedMsg, FindSuggestionByErrorMessage(origMsg, DiskAttachDetach)) } } diff --git a/pkg/utils/http/header.go b/pkg/utils/http/header.go index 5c8bcb147..0b215488f 100644 --- a/pkg/utils/http/header.go +++ b/pkg/utils/http/header.go @@ -25,7 +25,7 @@ func mustParseHeaderTo(envName string, header http.Header) { if hstr := os.Getenv(envName); hstr != "" { h, err := ParseHeaderConfig(hstr) if err != nil { - klog.Fatalf("Invaild %s: %v", envName, err) + klog.Fatalf("Invalid %s: %v", envName, err) } maps.Copy(header, h) } diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 7d41e3fc4..91d1600c5 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -641,8 +641,8 @@ func LoadJSONData(dataFileName string) (map[string]string, error) { return data, nil } -// IsPathAvailiable -func IsPathAvailiable(path string) error { +// IsPathAvailable +func IsPathAvailable(path string) error { f, err := os.Open(path) if err != nil { return fmt.Errorf("Open Path (%s) with error: %v ", path, err) @@ -788,7 +788,7 @@ func FormatNewDisk(readOnly bool, source, fstype, target string, mkfsOptions, mo _, err := diskMounter.Exec.Command("mkfs."+fstype, args...).CombinedOutput() if err == nil { // the disk has been formatted successfully try to mount it again. - klog.Infof("Disk format successed, pvName: %s elapsedTime: %+v ms", pvName, time.Now().Sub(startT).Milliseconds()) + klog.Infof("Disk format succeeded, pvName: %s elapsedTime: %+v ms", pvName, time.Now().Sub(startT).Milliseconds()) return diskMounter.Interface.Mount(source, target, fstype, mountOptions) } klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q) error:(%v)", source, fstype, target, args, err)