Skip to content

Commit

Permalink
feat: azure-pipelines allows to configure demandsToIgnore to ignore/a…
Browse files Browse the repository at this point in the history
…ccept demands but not require them for scaling (kedacore#5579)

Signed-off-by: jan-mrm <[email protected]>
  • Loading branch information
jan-mrm committed May 4, 2024
1 parent a168022 commit c4482a8
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 1 deletion.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ Here is an overview of all new **experimental** features:
### Improvements

- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX))
- **Azure Pipelines Scaler**: New configuration parameter `demandsToIgnore` to ignore certain demands especially ones added automatically by pipeline tasks, useful when using `requireAllDemands: true` ([#5579](https://github.com/kedacore/keda/issues/5579))

### Fixes

Expand Down
27 changes: 26 additions & 1 deletion pkg/scalers/azure_pipelines_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ type azurePipelinesMetadata struct {
authContext authContext
parent string
demands string
demandsToIgnore string
poolID int
targetPipelinesQueueLength int64
activationTargetPipelinesQueueLength int64
Expand Down Expand Up @@ -267,6 +268,12 @@ func parseAzurePipelinesMetadata(ctx context.Context, logger logr.Logger, config
meta.demands = ""
}

if val, ok := config.TriggerMetadata["demandsToIgnore"]; ok && val != "" {
meta.demandsToIgnore = config.TriggerMetadata["demandsToIgnore"]
} else {
meta.demandsToIgnore = ""
}

meta.jobsToFetch = 250
if val, ok := config.TriggerMetadata["jobsToFetch"]; ok && val != "" {
jobsToFetch, err := strconv.ParseInt(val, 10, 64)
Expand Down Expand Up @@ -480,10 +487,28 @@ func stripAgentVFromArray(array []string) []string {
return result
}

func stripValuesFromArray(array []string, valuesToStrip []string) []string {
if len(valuesToStrip) > 0 {
var result []string
OUTER:
for _, item := range array {
for _, valueToStrip := range valuesToStrip {
if item == valueToStrip {
continue OUTER
}
}
result = append(result, item)
}
return result
}
return array
}

// Determine if the scaledjob has the right demands to spin up
func getCanAgentDemandFulfilJob(jr JobRequest, metadata *azurePipelinesMetadata) bool {
countDemands := 0
demandsInJob := stripAgentVFromArray(jr.Demands)
demandsToIgnore := strings.Split(metadata.demandsToIgnore, ",")
demandsInJob := stripValuesFromArray(stripAgentVFromArray(jr.Demands), demandsToIgnore)
demandsInScaler := stripAgentVFromArray(strings.Split(metadata.demands, ","))

for _, demandInJob := range demandsInJob {
Expand Down
54 changes: 54 additions & 0 deletions pkg/scalers/azure_pipelines_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,60 @@ func TestAzurePipelinesNotMatchedPartialRequiredTriggerDemands(t *testing.T) {
}
}

func TestAzurePipelinesMatchedDemandAgentWithRequireAllDemandsAndOneIgnoredDemand(t *testing.T) {
var apiStub = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write(buildLoadJSON())
}))

meta := getDemandJobMetaData(apiStub.URL)
meta.requireAllDemands = true
meta.demands = "dotnet60"
meta.demandsToIgnore = "java"

mockAzurePipelinesScaler := azurePipelinesScaler{
metadata: meta,
httpClient: http.DefaultClient,
}

queuelen, err := mockAzurePipelinesScaler.GetAzurePipelinesQueueLength(context.TODO())

if err != nil {
t.Fail()
}

if queuelen < 1 {
t.Fail()
}
}

func TestAzurePipelinesMatchedDemandAgentWithRequireAllDemandsAndTwoIgnoredDemand(t *testing.T) {
var apiStub = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write(buildLoadJSON())
}))

meta := getDemandJobMetaData(apiStub.URL)
meta.requireAllDemands = true
meta.demands = "dotnet60"
meta.demandsToIgnore = "someOtherDemand,java"

mockAzurePipelinesScaler := azurePipelinesScaler{
metadata: meta,
httpClient: http.DefaultClient,
}

queuelen, err := mockAzurePipelinesScaler.GetAzurePipelinesQueueLength(context.TODO())

if err != nil {
t.Fail()
}

if queuelen < 1 {
t.Fail()
}
}

func buildLoadJSON() []byte {
output := testJobRequestResponse[0 : len(testJobRequestResponse)-2]
for i := 1; i < loadCount; i++ {
Expand Down

0 comments on commit c4482a8

Please sign in to comment.