From 9352123040a933f5f9879f4dcdef44294bb31171 Mon Sep 17 00:00:00 2001 From: Tom Nabarro Date: Wed, 22 Jan 2025 23:32:31 +0000 Subject: [PATCH 1/8] DAOS-6611 control: Enable dmg pool exclude,drain,reint on multiple ranks Signed-off-by: Tom Nabarro --- src/common/tests_dmg_helpers.c | 5 +- src/control/cmd/dmg/json_test.go | 4 +- src/control/cmd/dmg/pool.go | 66 +- src/control/cmd/dmg/pool_test.go | 37 +- src/control/cmd/dmg/pretty/system.go | 8 +- src/control/cmd/dmg/pretty/system_test.go | 40 +- src/control/cmd/dmg/system.go | 9 +- src/control/common/proto/mgmt/mgmt.pb.go | 130 +-- src/control/common/proto/mgmt/mgmt_grpc.pb.go | 21 +- src/control/common/proto/mgmt/pool.pb.go | 892 +++++++++--------- src/control/common/proto/mgmt/system.pb.go | 277 +++--- src/control/lib/control/pool.go | 164 +++- src/control/lib/control/pool_test.go | 55 +- src/control/lib/control/system.go | 15 +- src/control/lib/control/system_test.go | 30 +- src/control/server/faults.go | 5 +- src/control/server/mgmt_pool.go | 38 +- src/control/server/mgmt_pool_test.go | 87 +- src/control/server/mgmt_system.go | 142 ++- src/control/server/mgmt_system_test.go | 327 ++++--- src/mgmt/pool.pb-c.c | 814 ++++++---------- src/mgmt/pool.pb-c.h | 158 ++-- src/mgmt/srv_drpc.c | 54 +- src/mgmt/tests/srv_drpc_tests.c | 6 +- src/proto/mgmt/mgmt.proto | 4 +- src/proto/mgmt/pool.proto | 37 +- src/proto/mgmt/system.proto | 10 +- src/tests/ftest/deployment/network_failure.py | 3 +- .../ftest/deployment/server_rank_failure.py | 3 +- src/tests/ftest/deployment/target_failure.py | 9 +- src/tests/ftest/util/dmg_utils.py | 25 +- src/tests/ftest/util/dmg_utils_base.py | 7 +- 32 files changed, 1738 insertions(+), 1744 deletions(-) diff --git a/src/common/tests_dmg_helpers.c b/src/common/tests_dmg_helpers.c index 0cf9a50aef3..4aed95df659 100644 --- a/src/common/tests_dmg_helpers.c +++ b/src/common/tests_dmg_helpers.c @@ -1,5 +1,6 @@ /** * (C) Copyright 2020-2024 Intel Corporation. + * (C) Copyright 2025 Hewlett Packard Enterprise Development LP * * SPDX-License-Identifier: BSD-2-Clause-Patent */ @@ -1063,7 +1064,9 @@ dmg_pool_target(const char *cmd, const char *dmg_config_file, const uuid_t uuid, D_GOTO(out, rc = -DER_NOMEM); } - args = cmd_push_arg(args, &argcount, "--rank=%d ", rank); + // Exclude, drain and reintegrate take ranks option which can be either a rank-list range or + // a single rank identifier. + args = cmd_push_arg(args, &argcount, "--ranks=%d ", rank); if (args == NULL) D_GOTO(out, rc = -DER_NOMEM); diff --git a/src/control/cmd/dmg/json_test.go b/src/control/cmd/dmg/json_test.go index 57576ed6966..81d47c8716d 100644 --- a/src/control/cmd/dmg/json_test.go +++ b/src/control/cmd/dmg/json_test.go @@ -96,10 +96,8 @@ func TestDmg_JsonOutput(t *testing.T) { testArgs = append(testArgs, test.MockUUID(), "label:foo") case "pool get-prop": testArgs = append(testArgs, test.MockUUID(), "label") - case "pool extend": + case "pool extend", "pool exclude", "pool drain", "pool reintegrate": testArgs = append(testArgs, test.MockUUID(), "--ranks", "0") - case "pool exclude", "pool drain", "pool reintegrate": - testArgs = append(testArgs, test.MockUUID(), "--rank", "0") case "pool query-targets": testArgs = append(testArgs, test.MockUUID(), "--rank", "0", "--target-idx", "1,3,5,7") case "container set-owner": diff --git a/src/control/cmd/dmg/pool.go b/src/control/cmd/dmg/pool.go index 4a19b2c3479..e6eec0ea1a4 100644 --- a/src/control/cmd/dmg/pool.go +++ b/src/control/cmd/dmg/pool.go @@ -35,10 +35,10 @@ type PoolCmd struct { Destroy poolDestroyCmd `command:"destroy" description:"Destroy a DAOS pool"` Evict poolEvictCmd `command:"evict" description:"Evict all pool connections to a DAOS pool"` List poolListCmd `command:"list" alias:"ls" description:"List DAOS pools"` - Extend poolExtendCmd `command:"extend" description:"Extend a DAOS pool to include new ranks."` - Exclude poolExcludeCmd `command:"exclude" description:"Exclude targets from a rank"` - Drain poolDrainCmd `command:"drain" description:"Drain targets from a rank"` - Reintegrate poolReintegrateCmd `command:"reintegrate" alias:"reint" description:"Reintegrate targets for a rank"` + Extend poolExtendCmd `command:"extend" description:"Extend a DAOS pool to include new ranks"` + Exclude poolExcludeCmd `command:"exclude" description:"Exclude targets from a set of ranks"` + Drain poolDrainCmd `command:"drain" description:"Drain targets from a set of ranks"` + Reintegrate poolReintegrateCmd `command:"reintegrate" alias:"reint" description:"Reintegrate targets for a set of rank"` Query poolQueryCmd `command:"query" description:"Query a DAOS pool"` QueryTargets poolQueryTargetsCmd `command:"query-targets" description:"Query pool target info"` GetACL poolGetACLCmd `command:"get-acl" description:"Get a DAOS pool's Access Control List"` @@ -531,11 +531,17 @@ func (cmd *poolEvictCmd) Execute(args []string) error { return err } +// poolRanksCmd is used as an embedded command type that enables multiple ranks on a pool to be +// processed. +type poolRanksCmd struct { + poolCmd + RankList ui.RankSetFlag `long:"ranks" required:"1" description:"Comma-separated list of rank-range strings to operate on for a single pool"` +} + // poolExcludeCmd is the struct representing the command to exclude a DAOS target. type poolExcludeCmd struct { - poolCmd - Rank uint32 `long:"rank" required:"1" description:"Engine rank of the targets to be excluded"` - TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be excluded from the rank"` + poolRanksCmd + TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be excluded from each rank"` } // Execute is run when PoolExcludeCmd subcommand is activated @@ -547,7 +553,11 @@ func (cmd *poolExcludeCmd) Execute(args []string) error { return errors.WithMessage(err, "parsing target list") } - req := &control.PoolExcludeReq{ID: cmd.PoolID().String(), Rank: ranklist.Rank(cmd.Rank), TargetIdx: idxList} + req := &control.PoolExcludeReq{ + ID: cmd.PoolID().String(), + Ranks: cmd.RankList.Ranks(), + TargetIdx: idxList, + } err := control.PoolExclude(cmd.MustLogCtx(), cmd.ctlInvoker, req) if err != nil { @@ -561,41 +571,46 @@ func (cmd *poolExcludeCmd) Execute(args []string) error { // poolDrainCmd is the struct representing the command to Drain a DAOS target. type poolDrainCmd struct { - poolCmd - Rank uint32 `long:"rank" required:"1" description:"Engine rank of the targets to be drained"` - TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be drained on the rank"` + poolRanksCmd + TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be drained on each rank"` } // Execute is run when PoolDrainCmd subcommand is activated func (cmd *poolDrainCmd) Execute(args []string) error { - msg := "succeeded" - var idxList []uint32 if err := common.ParseNumberList(cmd.TargetIdx, &idxList); err != nil { - err = errors.WithMessage(err, "parsing target list") - return err + return errors.WithMessage(err, "parsing target list") } req := &control.PoolDrainReq{ ID: cmd.PoolID().String(), - Rank: ranklist.Rank(cmd.Rank), + Ranks: cmd.RankList.Ranks(), TargetIdx: idxList, } - err := control.PoolDrain(cmd.MustLogCtx(), cmd.ctlInvoker, req) + resp, err := control.PoolDrain(cmd.MustLogCtx(), cmd.ctlInvoker, req) + + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(resp, err) + } + + // Retrieve PoolRanksResults so we can pretty print output. + results, err := resp.GetResults(err) if err != nil { - msg = errors.WithMessage(err, "failed").Error() + cmd.Errorf(errors.WithMessage(err, "Pool drain failed").Error()) + return err } - cmd.Infof("Drain command %s\n", msg) + var out strings.Builder + pretty.PrintPoolRankResults(&out, "drain", results) + cmd.Info(out.String()) - return err + return resp.Errors() } // poolExtendCmd is the struct representing the command to Extend a DAOS pool. type poolExtendCmd struct { - poolCmd - RankList ui.RankSetFlag `long:"ranks" required:"1" description:"Comma-separated list of ranks to add to the pool"` + poolRanksCmd } // Execute is run when PoolExtendCmd subcommand is activated @@ -619,9 +634,8 @@ func (cmd *poolExtendCmd) Execute(args []string) error { // poolReintegrateCmd is the struct representing the command to Add a DAOS target. type poolReintegrateCmd struct { - poolCmd - Rank uint32 `long:"rank" required:"1" description:"Engine rank of the targets to be reintegrated"` - TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be reintegrated into the rank"` + poolRanksCmd + TargetIdx string `long:"target-idx" description:"Comma-separated list of target idx(s) to be reintegrated into each rank"` } // Execute is run when poolReintegrateCmd subcommand is activated @@ -636,7 +650,7 @@ func (cmd *poolReintegrateCmd) Execute(args []string) error { req := &control.PoolReintegrateReq{ ID: cmd.PoolID().String(), - Rank: ranklist.Rank(cmd.Rank), + Ranks: cmd.RankList.Ranks(), TargetIdx: idxList, } diff --git a/src/control/cmd/dmg/pool_test.go b/src/control/cmd/dmg/pool_test.go index 4681abd9f17..e1cca25926e 100644 --- a/src/control/cmd/dmg/pool_test.go +++ b/src/control/cmd/dmg/pool_test.go @@ -1,5 +1,6 @@ // // (C) Copyright 2019-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -615,11 +616,11 @@ func TestPoolCommands(t *testing.T) { }, { "Exclude a target with single target idx", - "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1", + "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2 --target-idx 1", strings.Join([]string{ printRequest(t, &control.PoolExcludeReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{1}, }), }, " "), @@ -627,11 +628,11 @@ func TestPoolCommands(t *testing.T) { }, { "Exclude a target with multiple idx", - "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1,2,3", + "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2 --target-idx 1,2,3", strings.Join([]string{ printRequest(t, &control.PoolExcludeReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{1, 2, 3}, }), }, " "), @@ -639,11 +640,11 @@ func TestPoolCommands(t *testing.T) { }, { "Exclude a target with no idx given", - "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0", + "pool exclude 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2", strings.Join([]string{ printRequest(t, &control.PoolExcludeReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{}, }), }, " "), @@ -651,11 +652,11 @@ func TestPoolCommands(t *testing.T) { }, { "Drain a target with single target idx", - "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1", + "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2 --target-idx 1", strings.Join([]string{ printRequest(t, &control.PoolDrainReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{1}, }), }, " "), @@ -663,11 +664,11 @@ func TestPoolCommands(t *testing.T) { }, { "Drain a target with multiple idx", - "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1,2,3", + "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2 --target-idx 1,2,3", strings.Join([]string{ printRequest(t, &control.PoolDrainReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{1, 2, 3}, }), }, " "), @@ -675,11 +676,11 @@ func TestPoolCommands(t *testing.T) { }, { "Drain a target with no idx given", - "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0", + "pool drain 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-2", strings.Join([]string{ printRequest(t, &control.PoolDrainReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1, 2}, TargetIdx: []uint32{}, }), }, " "), @@ -716,11 +717,11 @@ func TestPoolCommands(t *testing.T) { }, { "Reintegrate a target with single target idx", - "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1", + "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-1 --target-idx 1", strings.Join([]string{ printRequest(t, &control.PoolReintegrateReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1}, TargetIdx: []uint32{1}, }), }, " "), @@ -728,11 +729,11 @@ func TestPoolCommands(t *testing.T) { }, { "Reintegrate a target with multiple idx", - "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0 --target-idx 1,2,3", + "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-1 --target-idx 1,2,3", strings.Join([]string{ printRequest(t, &control.PoolReintegrateReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1}, TargetIdx: []uint32{1, 2, 3}, }), }, " "), @@ -740,11 +741,11 @@ func TestPoolCommands(t *testing.T) { }, { "Reintegrate a target with no idx given", - "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --rank 0", + "pool reintegrate 031bcaf8-f0f5-42ef-b3c5-ee048676dceb --ranks 0-1", strings.Join([]string{ printRequest(t, &control.PoolReintegrateReq{ ID: "031bcaf8-f0f5-42ef-b3c5-ee048676dceb", - Rank: 0, + Ranks: []ranklist.Rank{0, 1}, TargetIdx: []uint32{}, }), }, " "), diff --git a/src/control/cmd/dmg/pretty/system.go b/src/control/cmd/dmg/pretty/system.go index f5c082d7d0e..98e06d52494 100644 --- a/src/control/cmd/dmg/pretty/system.go +++ b/src/control/cmd/dmg/pretty/system.go @@ -225,7 +225,7 @@ func PrintSystemCleanupResponse(out io.Writer, resp *control.SystemCleanupResp, // PrintPoolRankResults generates a table showing results of operations on pool ranks. Each row will // indicate a result for a group of ranks on a pool. -func PrintPoolRankResults(out io.Writer, results []*control.PoolRankResult) { +func PrintPoolRankResults(out io.Writer, opStr string, results []*control.PoolRanksResult) { if len(results) == 0 { fmt.Fprintln(out, "No pool ranks processed") return @@ -236,14 +236,14 @@ func PrintPoolRankResults(out io.Writer, results []*control.PoolRankResult) { var table []txtfmt.TableRow for _, r := range results { - result := "OK" + result := fmt.Sprintf("%s OK", opStr) reason := "-" if r.Status != 0 { - result = "FAIL" + result = fmt.Sprintf("%s FAIL", opStr) reason = r.Msg } row := txtfmt.TableRow{ - "Pool": r.PoolID, + "Pool": r.ID, "Ranks": r.Ranks, "Result": result, "Reason": reason, diff --git a/src/control/cmd/dmg/pretty/system_test.go b/src/control/cmd/dmg/pretty/system_test.go index e7d84a4fc5d..f1c6bfb8ce9 100644 --- a/src/control/cmd/dmg/pretty/system_test.go +++ b/src/control/cmd/dmg/pretty/system_test.go @@ -615,37 +615,41 @@ Unknown 3 hosts: foo[7-9] func TestPretty_PrintPoolRankResults(t *testing.T) { for name, tc := range map[string]struct { - results []*control.PoolRankResult + op string + results []*control.PoolRanksResult expOut string }{ "normal response": { - results: []*control.PoolRankResult{ + op: "drain", + results: []*control.PoolRanksResult{ {PoolID: test.MockUUID(1), Ranks: "0-3"}, {PoolID: test.MockUUID(2), Ranks: "1-4"}, }, expOut: ` -Pool Ranks Result Reason ----- ----- ------ ------ -00000001-0001-0001-0001-000000000001 0-3 OK - -00000002-0002-0002-0002-000000000002 1-4 OK - +Pool Ranks Result Reason +---- ----- ------ ------ +00000001-0001-0001-0001-000000000001 0-3 drain OK - +00000002-0002-0002-0002-000000000002 1-4 drain OK - `, }, "normal response; use labels": { - results: []*control.PoolRankResult{ + op: "drain", + results: []*control.PoolRanksResult{ {PoolID: "label1", Ranks: "0-3"}, {PoolID: "label2", Ranks: "1-4"}, }, expOut: ` -Pool Ranks Result Reason ----- ----- ------ ------ -label1 0-3 OK - -label2 1-4 OK - +Pool Ranks Result Reason +---- ----- ------ ------ +label1 0-3 drain OK - +label2 1-4 drain OK - `, }, "response with failures": { - results: []*control.PoolRankResult{ + op: "reintegrate", + results: []*control.PoolRanksResult{ {PoolID: test.MockUUID(1), Ranks: "1-2"}, {PoolID: test.MockUUID(2), Ranks: "0"}, { @@ -654,18 +658,18 @@ label2 1-4 OK - }, }, expOut: ` -Pool Ranks Result Reason ----- ----- ------ ------ -00000001-0001-0001-0001-000000000001 1-2 OK - -00000002-0002-0002-0002-000000000002 0 OK - -00000002-0002-0002-0002-000000000002 1-2 FAIL fail1 +Pool Ranks Result Reason +---- ----- ------ ------ +00000001-0001-0001-0001-000000000001 1-2 reintegrate OK - +00000002-0002-0002-0002-000000000002 0 reintegrate OK - +00000002-0002-0002-0002-000000000002 1-2 reintegrate FAIL fail1 `, }, } { t.Run(name, func(t *testing.T) { var out strings.Builder - PrintPoolRankResults(&out, tc.results) + PrintPoolRankResults(&out, tc.op, tc.results) if diff := cmp.Diff(strings.TrimLeft(tc.expOut, "\n"), out.String()); diff != "" { t.Fatalf("unexpected stdout (-want, +got):\n%s\n", diff) diff --git a/src/control/cmd/dmg/system.go b/src/control/cmd/dmg/system.go index 852244899c5..39b4e8fd14d 100644 --- a/src/control/cmd/dmg/system.go +++ b/src/control/cmd/dmg/system.go @@ -311,12 +311,13 @@ type systemDrainCmd struct { } func (cmd *systemDrainCmd) execute(reint bool) (errOut error) { + var opStr string defer func() { - op := "drain" + opStr = "drain" if reint { - op = "reintegrate" + opStr = "reintegrate" } - errOut = errors.Wrapf(errOut, "system %s failed", op) + errOut = errors.Wrapf(errOut, "system %s failed", opStr) }() if err := cmd.validateHostsRanks(); err != nil { @@ -342,7 +343,7 @@ func (cmd *systemDrainCmd) execute(reint bool) (errOut error) { } var out strings.Builder - pretty.PrintPoolRankResults(&out, resp.Results) + pretty.PrintPoolRankResults(&out, opStr, resp.Results) cmd.Info(out.String()) return resp.Errors() diff --git a/src/control/common/proto/mgmt/mgmt.pb.go b/src/control/common/proto/mgmt/mgmt.pb.go index d9bf5c0fc63..8b6926d26db 100644 --- a/src/control/common/proto/mgmt/mgmt.pb.go +++ b/src/control/common/proto/mgmt/mgmt.pb.go @@ -1,5 +1,6 @@ // // (C) Copyright 2019-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -70,7 +71,7 @@ var file_mgmt_mgmt_proto_rawDesc = []byte{ 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x09, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x12, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x6d, 0x67, - 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x0a, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x13, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, @@ -78,7 +79,7 @@ var file_mgmt_mgmt_proto_rawDesc = []byte{ 0x0f, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, - 0x52, 0x65, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x09, 0x50, + 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x09, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x13, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, @@ -271,33 +272,32 @@ var file_mgmt_mgmt_proto_goTypes = []interface{}{ (*PoolDestroyResp)(nil), // 47: mgmt.PoolDestroyResp (*PoolEvictResp)(nil), // 48: mgmt.PoolEvictResp (*PoolExcludeResp)(nil), // 49: mgmt.PoolExcludeResp - (*PoolDrainResp)(nil), // 50: mgmt.PoolDrainResp + (*PoolRanksResp)(nil), // 50: mgmt.PoolRanksResp (*PoolExtendResp)(nil), // 51: mgmt.PoolExtendResp - (*PoolReintResp)(nil), // 52: mgmt.PoolReintResp - (*PoolQueryResp)(nil), // 53: mgmt.PoolQueryResp - (*PoolQueryTargetResp)(nil), // 54: mgmt.PoolQueryTargetResp - (*PoolSetPropResp)(nil), // 55: mgmt.PoolSetPropResp - (*PoolGetPropResp)(nil), // 56: mgmt.PoolGetPropResp - (*ACLResp)(nil), // 57: mgmt.ACLResp - (*GetAttachInfoResp)(nil), // 58: mgmt.GetAttachInfoResp - (*ListPoolsResp)(nil), // 59: mgmt.ListPoolsResp - (*ListContResp)(nil), // 60: mgmt.ListContResp - (*DaosResp)(nil), // 61: mgmt.DaosResp - (*SystemQueryResp)(nil), // 62: mgmt.SystemQueryResp - (*SystemStopResp)(nil), // 63: mgmt.SystemStopResp - (*SystemStartResp)(nil), // 64: mgmt.SystemStartResp - (*SystemExcludeResp)(nil), // 65: mgmt.SystemExcludeResp - (*SystemDrainResp)(nil), // 66: mgmt.SystemDrainResp - (*SystemEraseResp)(nil), // 67: mgmt.SystemEraseResp - (*SystemCleanupResp)(nil), // 68: mgmt.SystemCleanupResp - (*CheckStartResp)(nil), // 69: mgmt.CheckStartResp - (*CheckStopResp)(nil), // 70: mgmt.CheckStopResp - (*CheckQueryResp)(nil), // 71: mgmt.CheckQueryResp - (*CheckGetPolicyResp)(nil), // 72: mgmt.CheckGetPolicyResp - (*CheckActResp)(nil), // 73: mgmt.CheckActResp - (*PoolUpgradeResp)(nil), // 74: mgmt.PoolUpgradeResp - (*SystemGetAttrResp)(nil), // 75: mgmt.SystemGetAttrResp - (*SystemGetPropResp)(nil), // 76: mgmt.SystemGetPropResp + (*PoolQueryResp)(nil), // 52: mgmt.PoolQueryResp + (*PoolQueryTargetResp)(nil), // 53: mgmt.PoolQueryTargetResp + (*PoolSetPropResp)(nil), // 54: mgmt.PoolSetPropResp + (*PoolGetPropResp)(nil), // 55: mgmt.PoolGetPropResp + (*ACLResp)(nil), // 56: mgmt.ACLResp + (*GetAttachInfoResp)(nil), // 57: mgmt.GetAttachInfoResp + (*ListPoolsResp)(nil), // 58: mgmt.ListPoolsResp + (*ListContResp)(nil), // 59: mgmt.ListContResp + (*DaosResp)(nil), // 60: mgmt.DaosResp + (*SystemQueryResp)(nil), // 61: mgmt.SystemQueryResp + (*SystemStopResp)(nil), // 62: mgmt.SystemStopResp + (*SystemStartResp)(nil), // 63: mgmt.SystemStartResp + (*SystemExcludeResp)(nil), // 64: mgmt.SystemExcludeResp + (*SystemDrainResp)(nil), // 65: mgmt.SystemDrainResp + (*SystemEraseResp)(nil), // 66: mgmt.SystemEraseResp + (*SystemCleanupResp)(nil), // 67: mgmt.SystemCleanupResp + (*CheckStartResp)(nil), // 68: mgmt.CheckStartResp + (*CheckStopResp)(nil), // 69: mgmt.CheckStopResp + (*CheckQueryResp)(nil), // 70: mgmt.CheckQueryResp + (*CheckGetPolicyResp)(nil), // 71: mgmt.CheckGetPolicyResp + (*CheckActResp)(nil), // 72: mgmt.CheckActResp + (*PoolUpgradeResp)(nil), // 73: mgmt.PoolUpgradeResp + (*SystemGetAttrResp)(nil), // 74: mgmt.SystemGetAttrResp + (*SystemGetPropResp)(nil), // 75: mgmt.SystemGetPropResp } var file_mgmt_mgmt_proto_depIdxs = []int32{ 0, // 0: mgmt.MgmtSvc.Join:input_type -> mgmt.JoinReq @@ -352,44 +352,44 @@ var file_mgmt_mgmt_proto_depIdxs = []int32{ 47, // 49: mgmt.MgmtSvc.PoolDestroy:output_type -> mgmt.PoolDestroyResp 48, // 50: mgmt.MgmtSvc.PoolEvict:output_type -> mgmt.PoolEvictResp 49, // 51: mgmt.MgmtSvc.PoolExclude:output_type -> mgmt.PoolExcludeResp - 50, // 52: mgmt.MgmtSvc.PoolDrain:output_type -> mgmt.PoolDrainResp + 50, // 52: mgmt.MgmtSvc.PoolDrain:output_type -> mgmt.PoolRanksResp 51, // 53: mgmt.MgmtSvc.PoolExtend:output_type -> mgmt.PoolExtendResp - 52, // 54: mgmt.MgmtSvc.PoolReintegrate:output_type -> mgmt.PoolReintResp - 53, // 55: mgmt.MgmtSvc.PoolQuery:output_type -> mgmt.PoolQueryResp - 54, // 56: mgmt.MgmtSvc.PoolQueryTarget:output_type -> mgmt.PoolQueryTargetResp - 55, // 57: mgmt.MgmtSvc.PoolSetProp:output_type -> mgmt.PoolSetPropResp - 56, // 58: mgmt.MgmtSvc.PoolGetProp:output_type -> mgmt.PoolGetPropResp - 57, // 59: mgmt.MgmtSvc.PoolGetACL:output_type -> mgmt.ACLResp - 57, // 60: mgmt.MgmtSvc.PoolOverwriteACL:output_type -> mgmt.ACLResp - 57, // 61: mgmt.MgmtSvc.PoolUpdateACL:output_type -> mgmt.ACLResp - 57, // 62: mgmt.MgmtSvc.PoolDeleteACL:output_type -> mgmt.ACLResp - 58, // 63: mgmt.MgmtSvc.GetAttachInfo:output_type -> mgmt.GetAttachInfoResp - 59, // 64: mgmt.MgmtSvc.ListPools:output_type -> mgmt.ListPoolsResp - 60, // 65: mgmt.MgmtSvc.ListContainers:output_type -> mgmt.ListContResp - 61, // 66: mgmt.MgmtSvc.ContSetOwner:output_type -> mgmt.DaosResp - 62, // 67: mgmt.MgmtSvc.SystemQuery:output_type -> mgmt.SystemQueryResp - 63, // 68: mgmt.MgmtSvc.SystemStop:output_type -> mgmt.SystemStopResp - 64, // 69: mgmt.MgmtSvc.SystemStart:output_type -> mgmt.SystemStartResp - 65, // 70: mgmt.MgmtSvc.SystemExclude:output_type -> mgmt.SystemExcludeResp - 66, // 71: mgmt.MgmtSvc.SystemDrain:output_type -> mgmt.SystemDrainResp - 67, // 72: mgmt.MgmtSvc.SystemErase:output_type -> mgmt.SystemEraseResp - 68, // 73: mgmt.MgmtSvc.SystemCleanup:output_type -> mgmt.SystemCleanupResp - 61, // 74: mgmt.MgmtSvc.SystemCheckEnable:output_type -> mgmt.DaosResp - 61, // 75: mgmt.MgmtSvc.SystemCheckDisable:output_type -> mgmt.DaosResp - 69, // 76: mgmt.MgmtSvc.SystemCheckStart:output_type -> mgmt.CheckStartResp - 70, // 77: mgmt.MgmtSvc.SystemCheckStop:output_type -> mgmt.CheckStopResp - 71, // 78: mgmt.MgmtSvc.SystemCheckQuery:output_type -> mgmt.CheckQueryResp - 61, // 79: mgmt.MgmtSvc.SystemCheckSetPolicy:output_type -> mgmt.DaosResp - 72, // 80: mgmt.MgmtSvc.SystemCheckGetPolicy:output_type -> mgmt.CheckGetPolicyResp - 73, // 81: mgmt.MgmtSvc.SystemCheckRepair:output_type -> mgmt.CheckActResp - 74, // 82: mgmt.MgmtSvc.PoolUpgrade:output_type -> mgmt.PoolUpgradeResp - 61, // 83: mgmt.MgmtSvc.SystemSetAttr:output_type -> mgmt.DaosResp - 75, // 84: mgmt.MgmtSvc.SystemGetAttr:output_type -> mgmt.SystemGetAttrResp - 61, // 85: mgmt.MgmtSvc.SystemSetProp:output_type -> mgmt.DaosResp - 76, // 86: mgmt.MgmtSvc.SystemGetProp:output_type -> mgmt.SystemGetPropResp - 61, // 87: mgmt.MgmtSvc.FaultInjectReport:output_type -> mgmt.DaosResp - 61, // 88: mgmt.MgmtSvc.FaultInjectPoolFault:output_type -> mgmt.DaosResp - 61, // 89: mgmt.MgmtSvc.FaultInjectMgmtPoolFault:output_type -> mgmt.DaosResp + 50, // 54: mgmt.MgmtSvc.PoolReintegrate:output_type -> mgmt.PoolRanksResp + 52, // 55: mgmt.MgmtSvc.PoolQuery:output_type -> mgmt.PoolQueryResp + 53, // 56: mgmt.MgmtSvc.PoolQueryTarget:output_type -> mgmt.PoolQueryTargetResp + 54, // 57: mgmt.MgmtSvc.PoolSetProp:output_type -> mgmt.PoolSetPropResp + 55, // 58: mgmt.MgmtSvc.PoolGetProp:output_type -> mgmt.PoolGetPropResp + 56, // 59: mgmt.MgmtSvc.PoolGetACL:output_type -> mgmt.ACLResp + 56, // 60: mgmt.MgmtSvc.PoolOverwriteACL:output_type -> mgmt.ACLResp + 56, // 61: mgmt.MgmtSvc.PoolUpdateACL:output_type -> mgmt.ACLResp + 56, // 62: mgmt.MgmtSvc.PoolDeleteACL:output_type -> mgmt.ACLResp + 57, // 63: mgmt.MgmtSvc.GetAttachInfo:output_type -> mgmt.GetAttachInfoResp + 58, // 64: mgmt.MgmtSvc.ListPools:output_type -> mgmt.ListPoolsResp + 59, // 65: mgmt.MgmtSvc.ListContainers:output_type -> mgmt.ListContResp + 60, // 66: mgmt.MgmtSvc.ContSetOwner:output_type -> mgmt.DaosResp + 61, // 67: mgmt.MgmtSvc.SystemQuery:output_type -> mgmt.SystemQueryResp + 62, // 68: mgmt.MgmtSvc.SystemStop:output_type -> mgmt.SystemStopResp + 63, // 69: mgmt.MgmtSvc.SystemStart:output_type -> mgmt.SystemStartResp + 64, // 70: mgmt.MgmtSvc.SystemExclude:output_type -> mgmt.SystemExcludeResp + 65, // 71: mgmt.MgmtSvc.SystemDrain:output_type -> mgmt.SystemDrainResp + 66, // 72: mgmt.MgmtSvc.SystemErase:output_type -> mgmt.SystemEraseResp + 67, // 73: mgmt.MgmtSvc.SystemCleanup:output_type -> mgmt.SystemCleanupResp + 60, // 74: mgmt.MgmtSvc.SystemCheckEnable:output_type -> mgmt.DaosResp + 60, // 75: mgmt.MgmtSvc.SystemCheckDisable:output_type -> mgmt.DaosResp + 68, // 76: mgmt.MgmtSvc.SystemCheckStart:output_type -> mgmt.CheckStartResp + 69, // 77: mgmt.MgmtSvc.SystemCheckStop:output_type -> mgmt.CheckStopResp + 70, // 78: mgmt.MgmtSvc.SystemCheckQuery:output_type -> mgmt.CheckQueryResp + 60, // 79: mgmt.MgmtSvc.SystemCheckSetPolicy:output_type -> mgmt.DaosResp + 71, // 80: mgmt.MgmtSvc.SystemCheckGetPolicy:output_type -> mgmt.CheckGetPolicyResp + 72, // 81: mgmt.MgmtSvc.SystemCheckRepair:output_type -> mgmt.CheckActResp + 73, // 82: mgmt.MgmtSvc.PoolUpgrade:output_type -> mgmt.PoolUpgradeResp + 60, // 83: mgmt.MgmtSvc.SystemSetAttr:output_type -> mgmt.DaosResp + 74, // 84: mgmt.MgmtSvc.SystemGetAttr:output_type -> mgmt.SystemGetAttrResp + 60, // 85: mgmt.MgmtSvc.SystemSetProp:output_type -> mgmt.DaosResp + 75, // 86: mgmt.MgmtSvc.SystemGetProp:output_type -> mgmt.SystemGetPropResp + 60, // 87: mgmt.MgmtSvc.FaultInjectReport:output_type -> mgmt.DaosResp + 60, // 88: mgmt.MgmtSvc.FaultInjectPoolFault:output_type -> mgmt.DaosResp + 60, // 89: mgmt.MgmtSvc.FaultInjectMgmtPoolFault:output_type -> mgmt.DaosResp 45, // [45:90] is the sub-list for method output_type 0, // [0:45] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name diff --git a/src/control/common/proto/mgmt/mgmt_grpc.pb.go b/src/control/common/proto/mgmt/mgmt_grpc.pb.go index 8a4d7ebc6c5..5221eb8ff4a 100644 --- a/src/control/common/proto/mgmt/mgmt_grpc.pb.go +++ b/src/control/common/proto/mgmt/mgmt_grpc.pb.go @@ -1,5 +1,6 @@ // // (C) Copyright 2019-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -94,11 +95,11 @@ type MgmtSvcClient interface { // Exclude a pool target. PoolExclude(ctx context.Context, in *PoolExcludeReq, opts ...grpc.CallOption) (*PoolExcludeResp, error) // Drain a pool target. - PoolDrain(ctx context.Context, in *PoolDrainReq, opts ...grpc.CallOption) (*PoolDrainResp, error) + PoolDrain(ctx context.Context, in *PoolDrainReq, opts ...grpc.CallOption) (*PoolRanksResp, error) // Extend a pool. PoolExtend(ctx context.Context, in *PoolExtendReq, opts ...grpc.CallOption) (*PoolExtendResp, error) // Reintegrate a pool target. - PoolReintegrate(ctx context.Context, in *PoolReintReq, opts ...grpc.CallOption) (*PoolReintResp, error) + PoolReintegrate(ctx context.Context, in *PoolReintReq, opts ...grpc.CallOption) (*PoolRanksResp, error) // PoolQuery queries a DAOS pool. PoolQuery(ctx context.Context, in *PoolQueryReq, opts ...grpc.CallOption) (*PoolQueryResp, error) // PoolQueryTarget queries a DAOS storage target. @@ -242,8 +243,8 @@ func (c *mgmtSvcClient) PoolExclude(ctx context.Context, in *PoolExcludeReq, opt return out, nil } -func (c *mgmtSvcClient) PoolDrain(ctx context.Context, in *PoolDrainReq, opts ...grpc.CallOption) (*PoolDrainResp, error) { - out := new(PoolDrainResp) +func (c *mgmtSvcClient) PoolDrain(ctx context.Context, in *PoolDrainReq, opts ...grpc.CallOption) (*PoolRanksResp, error) { + out := new(PoolRanksResp) err := c.cc.Invoke(ctx, MgmtSvc_PoolDrain_FullMethodName, in, out, opts...) if err != nil { return nil, err @@ -260,8 +261,8 @@ func (c *mgmtSvcClient) PoolExtend(ctx context.Context, in *PoolExtendReq, opts return out, nil } -func (c *mgmtSvcClient) PoolReintegrate(ctx context.Context, in *PoolReintReq, opts ...grpc.CallOption) (*PoolReintResp, error) { - out := new(PoolReintResp) +func (c *mgmtSvcClient) PoolReintegrate(ctx context.Context, in *PoolReintReq, opts ...grpc.CallOption) (*PoolRanksResp, error) { + out := new(PoolRanksResp) err := c.cc.Invoke(ctx, MgmtSvc_PoolReintegrate_FullMethodName, in, out, opts...) if err != nil { return nil, err @@ -604,11 +605,11 @@ type MgmtSvcServer interface { // Exclude a pool target. PoolExclude(context.Context, *PoolExcludeReq) (*PoolExcludeResp, error) // Drain a pool target. - PoolDrain(context.Context, *PoolDrainReq) (*PoolDrainResp, error) + PoolDrain(context.Context, *PoolDrainReq) (*PoolRanksResp, error) // Extend a pool. PoolExtend(context.Context, *PoolExtendReq) (*PoolExtendResp, error) // Reintegrate a pool target. - PoolReintegrate(context.Context, *PoolReintReq) (*PoolReintResp, error) + PoolReintegrate(context.Context, *PoolReintReq) (*PoolRanksResp, error) // PoolQuery queries a DAOS pool. PoolQuery(context.Context, *PoolQueryReq) (*PoolQueryResp, error) // PoolQueryTarget queries a DAOS storage target. @@ -707,13 +708,13 @@ func (UnimplementedMgmtSvcServer) PoolEvict(context.Context, *PoolEvictReq) (*Po func (UnimplementedMgmtSvcServer) PoolExclude(context.Context, *PoolExcludeReq) (*PoolExcludeResp, error) { return nil, status.Errorf(codes.Unimplemented, "method PoolExclude not implemented") } -func (UnimplementedMgmtSvcServer) PoolDrain(context.Context, *PoolDrainReq) (*PoolDrainResp, error) { +func (UnimplementedMgmtSvcServer) PoolDrain(context.Context, *PoolDrainReq) (*PoolRanksResp, error) { return nil, status.Errorf(codes.Unimplemented, "method PoolDrain not implemented") } func (UnimplementedMgmtSvcServer) PoolExtend(context.Context, *PoolExtendReq) (*PoolExtendResp, error) { return nil, status.Errorf(codes.Unimplemented, "method PoolExtend not implemented") } -func (UnimplementedMgmtSvcServer) PoolReintegrate(context.Context, *PoolReintReq) (*PoolReintResp, error) { +func (UnimplementedMgmtSvcServer) PoolReintegrate(context.Context, *PoolReintReq) (*PoolRanksResp, error) { return nil, status.Errorf(codes.Unimplemented, "method PoolReintegrate not implemented") } func (UnimplementedMgmtSvcServer) PoolQuery(context.Context, *PoolQueryReq) (*PoolQueryResp, error) { diff --git a/src/control/common/proto/mgmt/pool.pb.go b/src/control/common/proto/mgmt/pool.pb.go index c8be8fc1978..ac775140a6f 100644 --- a/src/control/common/proto/mgmt/pool.pb.go +++ b/src/control/common/proto/mgmt/pool.pb.go @@ -1,5 +1,6 @@ // // (C) Copyright 2019-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -173,7 +174,7 @@ func (x PoolRebuildStatus_State) Number() protoreflect.EnumNumber { // Deprecated: Use PoolRebuildStatus_State.Descriptor instead. func (PoolRebuildStatus_State) EnumDescriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{20, 0} + return file_mgmt_pool_proto_rawDescGZIP(), []int{19, 0} } type PoolQueryTargetInfo_TargetType int32 @@ -228,7 +229,7 @@ func (x PoolQueryTargetInfo_TargetType) Number() protoreflect.EnumNumber { // Deprecated: Use PoolQueryTargetInfo_TargetType.Descriptor instead. func (PoolQueryTargetInfo_TargetType) EnumDescriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{31, 0} + return file_mgmt_pool_proto_rawDescGZIP(), []int{30, 0} } type PoolQueryTargetInfo_TargetState int32 @@ -289,7 +290,7 @@ func (x PoolQueryTargetInfo_TargetState) Number() protoreflect.EnumNumber { // Deprecated: Use PoolQueryTargetInfo_TargetState.Descriptor instead. func (PoolQueryTargetInfo_TargetState) EnumDescriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{31, 1} + return file_mgmt_pool_proto_rawDescGZIP(), []int{30, 1} } // PoolCreateReq supplies new pool parameters. @@ -823,9 +824,9 @@ type PoolExcludeReq struct { unknownFields protoimpl.UnknownFields Sys string `protobuf:"bytes,1,opt,name=sys,proto3" json:"sys,omitempty"` // DAOS system identifier - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to exclude some targets - Rank uint32 `protobuf:"varint,3,opt,name=rank,proto3" json:"rank,omitempty"` // target to move to the down state - TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // target ranks + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to exclude some targets on each selected rank + Ranks []uint32 `protobuf:"varint,3,rep,packed,name=ranks,proto3" json:"ranks,omitempty"` // Ranks to operate on + TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // Targets to move to the down state on each selected rank SvcRanks []uint32 `protobuf:"varint,5,rep,packed,name=svc_ranks,json=svcRanks,proto3" json:"svc_ranks,omitempty"` // List of pool service ranks } @@ -875,11 +876,11 @@ func (x *PoolExcludeReq) GetId() string { return "" } -func (x *PoolExcludeReq) GetRank() uint32 { +func (x *PoolExcludeReq) GetRanks() []uint32 { if x != nil { - return x.Rank + return x.Ranks } - return 0 + return nil } func (x *PoolExcludeReq) GetTargetIdx() []uint32 { @@ -951,9 +952,9 @@ type PoolDrainReq struct { unknownFields protoimpl.UnknownFields Sys string `protobuf:"bytes,1,opt,name=sys,proto3" json:"sys,omitempty"` // DAOS system identifier - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to drain some targets - Rank uint32 `protobuf:"varint,3,opt,name=rank,proto3" json:"rank,omitempty"` // rank to move to the down state - TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // rank targets + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to drain some targets on each selected rank + Ranks []uint32 `protobuf:"varint,3,rep,packed,name=ranks,proto3" json:"ranks,omitempty"` // Ranks to operate on + TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // Targets to move to the drain state on each selected rank SvcRanks []uint32 `protobuf:"varint,5,rep,packed,name=svc_ranks,json=svcRanks,proto3" json:"svc_ranks,omitempty"` // List of pool service ranks } @@ -1003,11 +1004,11 @@ func (x *PoolDrainReq) GetId() string { return "" } -func (x *PoolDrainReq) GetRank() uint32 { +func (x *PoolDrainReq) GetRanks() []uint32 { if x != nil { - return x.Rank + return x.Ranks } - return 0 + return nil } func (x *PoolDrainReq) GetTargetIdx() []uint32 { @@ -1024,17 +1025,19 @@ func (x *PoolDrainReq) GetSvcRanks() []uint32 { return nil } -// PoolDrainResp returns resultant state of Drain operation. -type PoolDrainResp struct { +// PoolRanksResp returns response from operation on multiple pool-ranks. +type PoolRanksResp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // DAOS error code + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // DAOS error code for failed rank attempt + FailedRank uint32 `protobuf:"varint,2,opt,name=failed_rank,json=failedRank,proto3" json:"failed_rank,omitempty"` // Rank ID that failed operation + SuccessRanks []uint32 `protobuf:"varint,3,rep,packed,name=success_ranks,json=successRanks,proto3" json:"success_ranks,omitempty"` // Pool-ranks that were successfully operated on } -func (x *PoolDrainResp) Reset() { - *x = PoolDrainResp{} +func (x *PoolRanksResp) Reset() { + *x = PoolRanksResp{} if protoimpl.UnsafeEnabled { mi := &file_mgmt_pool_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1042,13 +1045,13 @@ func (x *PoolDrainResp) Reset() { } } -func (x *PoolDrainResp) String() string { +func (x *PoolRanksResp) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PoolDrainResp) ProtoMessage() {} +func (*PoolRanksResp) ProtoMessage() {} -func (x *PoolDrainResp) ProtoReflect() protoreflect.Message { +func (x *PoolRanksResp) ProtoReflect() protoreflect.Message { mi := &file_mgmt_pool_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1060,18 +1063,32 @@ func (x *PoolDrainResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PoolDrainResp.ProtoReflect.Descriptor instead. -func (*PoolDrainResp) Descriptor() ([]byte, []int) { +// Deprecated: Use PoolRanksResp.ProtoReflect.Descriptor instead. +func (*PoolRanksResp) Descriptor() ([]byte, []int) { return file_mgmt_pool_proto_rawDescGZIP(), []int{9} } -func (x *PoolDrainResp) GetStatus() int32 { +func (x *PoolRanksResp) GetStatus() int32 { if x != nil { return x.Status } return 0 } +func (x *PoolRanksResp) GetFailedRank() uint32 { + if x != nil { + return x.FailedRank + } + return 0 +} + +func (x *PoolRanksResp) GetSuccessRanks() []uint32 { + if x != nil { + return x.SuccessRanks + } + return nil +} + // PoolExtendReq supplies pool identifier and rank list. type PoolExtendReq struct { state protoimpl.MessageState @@ -1080,7 +1097,7 @@ type PoolExtendReq struct { Sys string `protobuf:"bytes,1,opt,name=sys,proto3" json:"sys,omitempty"` // DAOS system identifier Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to add target up to - Ranks []uint32 `protobuf:"varint,3,rep,packed,name=ranks,proto3" json:"ranks,omitempty"` // ranks + Ranks []uint32 `protobuf:"varint,3,rep,packed,name=ranks,proto3" json:"ranks,omitempty"` // Ranks to operate on SvcRanks []uint32 `protobuf:"varint,4,rep,packed,name=svc_ranks,json=svcRanks,proto3" json:"svc_ranks,omitempty"` // List of pool service ranks TierBytes []uint64 `protobuf:"varint,5,rep,packed,name=tier_bytes,json=tierBytes,proto3" json:"tier_bytes,omitempty"` // Size in bytes of storage tiers FaultDomains []uint32 `protobuf:"varint,6,rep,packed,name=fault_domains,json=faultDomains,proto3" json:"fault_domains,omitempty"` // fault domain tree, minimal format @@ -1231,9 +1248,9 @@ type PoolReintReq struct { unknownFields protoimpl.UnknownFields Sys string `protobuf:"bytes,1,opt,name=sys,proto3" json:"sys,omitempty"` // DAOS system identifier - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to add target up to - Rank uint32 `protobuf:"varint,3,opt,name=rank,proto3" json:"rank,omitempty"` // target to move to the up state - TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // target ranks + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` // uuid or label of pool to reintegrate some targets on each selected rank + Ranks []uint32 `protobuf:"varint,3,rep,packed,name=ranks,proto3" json:"ranks,omitempty"` // Ranks to operate on + TargetIdx []uint32 `protobuf:"varint,4,rep,packed,name=target_idx,json=targetIdx,proto3" json:"target_idx,omitempty"` // Targets to move to the reintegrate state on each rank SvcRanks []uint32 `protobuf:"varint,5,rep,packed,name=svc_ranks,json=svcRanks,proto3" json:"svc_ranks,omitempty"` // List of pool service ranks TierBytes []uint64 `protobuf:"varint,6,rep,packed,name=tier_bytes,json=tierBytes,proto3" json:"tier_bytes,omitempty"` // Size in bytes of storage tiers MemRatio float32 `protobuf:"fixed32,7,opt,name=mem_ratio,json=memRatio,proto3" json:"mem_ratio,omitempty"` // Fraction of meta-blob-sz to use as mem-file-sz @@ -1285,11 +1302,11 @@ func (x *PoolReintReq) GetId() string { return "" } -func (x *PoolReintReq) GetRank() uint32 { +func (x *PoolReintReq) GetRanks() []uint32 { if x != nil { - return x.Rank + return x.Ranks } - return 0 + return nil } func (x *PoolReintReq) GetTargetIdx() []uint32 { @@ -1320,54 +1337,6 @@ func (x *PoolReintReq) GetMemRatio() float32 { return 0 } -// PoolReintResp returns resultant state of reintegrate operation. -type PoolReintResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // DAOS error code -} - -func (x *PoolReintResp) Reset() { - *x = PoolReintResp{} - if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PoolReintResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PoolReintResp) ProtoMessage() {} - -func (x *PoolReintResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PoolReintResp.ProtoReflect.Descriptor instead. -func (*PoolReintResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{13} -} - -func (x *PoolReintResp) GetStatus() int32 { - if x != nil { - return x.Status - } - return 0 -} - // ListPoolsReq represents a request to list pools on a given DAOS system. type ListPoolsReq struct { state protoimpl.MessageState @@ -1380,7 +1349,7 @@ type ListPoolsReq struct { func (x *ListPoolsReq) Reset() { *x = ListPoolsReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[14] + mi := &file_mgmt_pool_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1393,7 +1362,7 @@ func (x *ListPoolsReq) String() string { func (*ListPoolsReq) ProtoMessage() {} func (x *ListPoolsReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[14] + mi := &file_mgmt_pool_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1375,7 @@ func (x *ListPoolsReq) ProtoReflect() protoreflect.Message { // Deprecated: Use ListPoolsReq.ProtoReflect.Descriptor instead. func (*ListPoolsReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{14} + return file_mgmt_pool_proto_rawDescGZIP(), []int{13} } func (x *ListPoolsReq) GetSys() string { @@ -1430,7 +1399,7 @@ type ListPoolsResp struct { func (x *ListPoolsResp) Reset() { *x = ListPoolsResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[15] + mi := &file_mgmt_pool_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1443,7 +1412,7 @@ func (x *ListPoolsResp) String() string { func (*ListPoolsResp) ProtoMessage() {} func (x *ListPoolsResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[15] + mi := &file_mgmt_pool_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,7 +1425,7 @@ func (x *ListPoolsResp) ProtoReflect() protoreflect.Message { // Deprecated: Use ListPoolsResp.ProtoReflect.Descriptor instead. func (*ListPoolsResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{15} + return file_mgmt_pool_proto_rawDescGZIP(), []int{14} } func (x *ListPoolsResp) GetStatus() int32 { @@ -1496,7 +1465,7 @@ type ListContReq struct { func (x *ListContReq) Reset() { *x = ListContReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[16] + mi := &file_mgmt_pool_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1509,7 +1478,7 @@ func (x *ListContReq) String() string { func (*ListContReq) ProtoMessage() {} func (x *ListContReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[16] + mi := &file_mgmt_pool_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1522,7 +1491,7 @@ func (x *ListContReq) ProtoReflect() protoreflect.Message { // Deprecated: Use ListContReq.ProtoReflect.Descriptor instead. func (*ListContReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{16} + return file_mgmt_pool_proto_rawDescGZIP(), []int{15} } func (x *ListContReq) GetSys() string { @@ -1558,7 +1527,7 @@ type ListContResp struct { func (x *ListContResp) Reset() { *x = ListContResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[17] + mi := &file_mgmt_pool_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1571,7 +1540,7 @@ func (x *ListContResp) String() string { func (*ListContResp) ProtoMessage() {} func (x *ListContResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[17] + mi := &file_mgmt_pool_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1584,7 +1553,7 @@ func (x *ListContResp) ProtoReflect() protoreflect.Message { // Deprecated: Use ListContResp.ProtoReflect.Descriptor instead. func (*ListContResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{17} + return file_mgmt_pool_proto_rawDescGZIP(), []int{16} } func (x *ListContResp) GetStatus() int32 { @@ -1616,7 +1585,7 @@ type PoolQueryReq struct { func (x *PoolQueryReq) Reset() { *x = PoolQueryReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[18] + mi := &file_mgmt_pool_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1629,7 +1598,7 @@ func (x *PoolQueryReq) String() string { func (*PoolQueryReq) ProtoMessage() {} func (x *PoolQueryReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[18] + mi := &file_mgmt_pool_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1642,7 +1611,7 @@ func (x *PoolQueryReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolQueryReq.ProtoReflect.Descriptor instead. func (*PoolQueryReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{18} + return file_mgmt_pool_proto_rawDescGZIP(), []int{17} } func (x *PoolQueryReq) GetSys() string { @@ -1690,7 +1659,7 @@ type StorageUsageStats struct { func (x *StorageUsageStats) Reset() { *x = StorageUsageStats{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[19] + mi := &file_mgmt_pool_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1703,7 +1672,7 @@ func (x *StorageUsageStats) String() string { func (*StorageUsageStats) ProtoMessage() {} func (x *StorageUsageStats) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[19] + mi := &file_mgmt_pool_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1716,7 +1685,7 @@ func (x *StorageUsageStats) ProtoReflect() protoreflect.Message { // Deprecated: Use StorageUsageStats.ProtoReflect.Descriptor instead. func (*StorageUsageStats) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{19} + return file_mgmt_pool_proto_rawDescGZIP(), []int{18} } func (x *StorageUsageStats) GetTotal() uint64 { @@ -1776,7 +1745,7 @@ type PoolRebuildStatus struct { func (x *PoolRebuildStatus) Reset() { *x = PoolRebuildStatus{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[20] + mi := &file_mgmt_pool_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1789,7 +1758,7 @@ func (x *PoolRebuildStatus) String() string { func (*PoolRebuildStatus) ProtoMessage() {} func (x *PoolRebuildStatus) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[20] + mi := &file_mgmt_pool_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1802,7 +1771,7 @@ func (x *PoolRebuildStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolRebuildStatus.ProtoReflect.Descriptor instead. func (*PoolRebuildStatus) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{20} + return file_mgmt_pool_proto_rawDescGZIP(), []int{19} } func (x *PoolRebuildStatus) GetStatus() int32 { @@ -1865,7 +1834,7 @@ type PoolQueryResp struct { func (x *PoolQueryResp) Reset() { *x = PoolQueryResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[21] + mi := &file_mgmt_pool_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1878,7 +1847,7 @@ func (x *PoolQueryResp) String() string { func (*PoolQueryResp) ProtoMessage() {} func (x *PoolQueryResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[21] + mi := &file_mgmt_pool_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1891,7 +1860,7 @@ func (x *PoolQueryResp) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolQueryResp.ProtoReflect.Descriptor instead. func (*PoolQueryResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{21} + return file_mgmt_pool_proto_rawDescGZIP(), []int{20} } func (x *PoolQueryResp) GetStatus() int32 { @@ -2057,7 +2026,7 @@ type PoolProperty struct { func (x *PoolProperty) Reset() { *x = PoolProperty{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[22] + mi := &file_mgmt_pool_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2070,7 +2039,7 @@ func (x *PoolProperty) String() string { func (*PoolProperty) ProtoMessage() {} func (x *PoolProperty) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[22] + mi := &file_mgmt_pool_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2083,7 +2052,7 @@ func (x *PoolProperty) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolProperty.ProtoReflect.Descriptor instead. func (*PoolProperty) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{22} + return file_mgmt_pool_proto_rawDescGZIP(), []int{21} } func (x *PoolProperty) GetNumber() uint32 { @@ -2145,7 +2114,7 @@ type PoolSetPropReq struct { func (x *PoolSetPropReq) Reset() { *x = PoolSetPropReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[23] + mi := &file_mgmt_pool_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2158,7 +2127,7 @@ func (x *PoolSetPropReq) String() string { func (*PoolSetPropReq) ProtoMessage() {} func (x *PoolSetPropReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[23] + mi := &file_mgmt_pool_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2171,7 +2140,7 @@ func (x *PoolSetPropReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolSetPropReq.ProtoReflect.Descriptor instead. func (*PoolSetPropReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{23} + return file_mgmt_pool_proto_rawDescGZIP(), []int{22} } func (x *PoolSetPropReq) GetSys() string { @@ -2214,7 +2183,7 @@ type PoolSetPropResp struct { func (x *PoolSetPropResp) Reset() { *x = PoolSetPropResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[24] + mi := &file_mgmt_pool_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2227,7 +2196,7 @@ func (x *PoolSetPropResp) String() string { func (*PoolSetPropResp) ProtoMessage() {} func (x *PoolSetPropResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[24] + mi := &file_mgmt_pool_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2240,7 +2209,7 @@ func (x *PoolSetPropResp) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolSetPropResp.ProtoReflect.Descriptor instead. func (*PoolSetPropResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{24} + return file_mgmt_pool_proto_rawDescGZIP(), []int{23} } func (x *PoolSetPropResp) GetStatus() int32 { @@ -2265,7 +2234,7 @@ type PoolGetPropReq struct { func (x *PoolGetPropReq) Reset() { *x = PoolGetPropReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[25] + mi := &file_mgmt_pool_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2278,7 +2247,7 @@ func (x *PoolGetPropReq) String() string { func (*PoolGetPropReq) ProtoMessage() {} func (x *PoolGetPropReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[25] + mi := &file_mgmt_pool_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2291,7 +2260,7 @@ func (x *PoolGetPropReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolGetPropReq.ProtoReflect.Descriptor instead. func (*PoolGetPropReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{25} + return file_mgmt_pool_proto_rawDescGZIP(), []int{24} } func (x *PoolGetPropReq) GetSys() string { @@ -2335,7 +2304,7 @@ type PoolGetPropResp struct { func (x *PoolGetPropResp) Reset() { *x = PoolGetPropResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[26] + mi := &file_mgmt_pool_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2348,7 +2317,7 @@ func (x *PoolGetPropResp) String() string { func (*PoolGetPropResp) ProtoMessage() {} func (x *PoolGetPropResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[26] + mi := &file_mgmt_pool_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2361,7 +2330,7 @@ func (x *PoolGetPropResp) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolGetPropResp.ProtoReflect.Descriptor instead. func (*PoolGetPropResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{26} + return file_mgmt_pool_proto_rawDescGZIP(), []int{25} } func (x *PoolGetPropResp) GetStatus() int32 { @@ -2393,7 +2362,7 @@ type PoolUpgradeReq struct { func (x *PoolUpgradeReq) Reset() { *x = PoolUpgradeReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[27] + mi := &file_mgmt_pool_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2406,7 +2375,7 @@ func (x *PoolUpgradeReq) String() string { func (*PoolUpgradeReq) ProtoMessage() {} func (x *PoolUpgradeReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[27] + mi := &file_mgmt_pool_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2419,7 +2388,7 @@ func (x *PoolUpgradeReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolUpgradeReq.ProtoReflect.Descriptor instead. func (*PoolUpgradeReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{27} + return file_mgmt_pool_proto_rawDescGZIP(), []int{26} } func (x *PoolUpgradeReq) GetSys() string { @@ -2455,7 +2424,7 @@ type PoolUpgradeResp struct { func (x *PoolUpgradeResp) Reset() { *x = PoolUpgradeResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[28] + mi := &file_mgmt_pool_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2468,7 +2437,7 @@ func (x *PoolUpgradeResp) String() string { func (*PoolUpgradeResp) ProtoMessage() {} func (x *PoolUpgradeResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[28] + mi := &file_mgmt_pool_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2481,7 +2450,7 @@ func (x *PoolUpgradeResp) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolUpgradeResp.ProtoReflect.Descriptor instead. func (*PoolUpgradeResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{28} + return file_mgmt_pool_proto_rawDescGZIP(), []int{27} } func (x *PoolUpgradeResp) GetStatus() int32 { @@ -2507,7 +2476,7 @@ type PoolQueryTargetReq struct { func (x *PoolQueryTargetReq) Reset() { *x = PoolQueryTargetReq{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[29] + mi := &file_mgmt_pool_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2520,7 +2489,7 @@ func (x *PoolQueryTargetReq) String() string { func (*PoolQueryTargetReq) ProtoMessage() {} func (x *PoolQueryTargetReq) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[29] + mi := &file_mgmt_pool_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2533,7 +2502,7 @@ func (x *PoolQueryTargetReq) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolQueryTargetReq.ProtoReflect.Descriptor instead. func (*PoolQueryTargetReq) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{29} + return file_mgmt_pool_proto_rawDescGZIP(), []int{28} } func (x *PoolQueryTargetReq) GetSys() string { @@ -2585,7 +2554,7 @@ type StorageTargetUsage struct { func (x *StorageTargetUsage) Reset() { *x = StorageTargetUsage{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[30] + mi := &file_mgmt_pool_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2598,7 +2567,7 @@ func (x *StorageTargetUsage) String() string { func (*StorageTargetUsage) ProtoMessage() {} func (x *StorageTargetUsage) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[30] + mi := &file_mgmt_pool_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2611,7 +2580,7 @@ func (x *StorageTargetUsage) ProtoReflect() protoreflect.Message { // Deprecated: Use StorageTargetUsage.ProtoReflect.Descriptor instead. func (*StorageTargetUsage) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{30} + return file_mgmt_pool_proto_rawDescGZIP(), []int{29} } func (x *StorageTargetUsage) GetTotal() uint64 { @@ -2652,7 +2621,7 @@ type PoolQueryTargetInfo struct { func (x *PoolQueryTargetInfo) Reset() { *x = PoolQueryTargetInfo{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[31] + mi := &file_mgmt_pool_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2665,7 +2634,7 @@ func (x *PoolQueryTargetInfo) String() string { func (*PoolQueryTargetInfo) ProtoMessage() {} func (x *PoolQueryTargetInfo) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[31] + mi := &file_mgmt_pool_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2678,7 +2647,7 @@ func (x *PoolQueryTargetInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolQueryTargetInfo.ProtoReflect.Descriptor instead. func (*PoolQueryTargetInfo) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{31} + return file_mgmt_pool_proto_rawDescGZIP(), []int{30} } func (x *PoolQueryTargetInfo) GetType() PoolQueryTargetInfo_TargetType { @@ -2722,7 +2691,7 @@ type PoolQueryTargetResp struct { func (x *PoolQueryTargetResp) Reset() { *x = PoolQueryTargetResp{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[32] + mi := &file_mgmt_pool_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2735,7 +2704,7 @@ func (x *PoolQueryTargetResp) String() string { func (*PoolQueryTargetResp) ProtoMessage() {} func (x *PoolQueryTargetResp) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[32] + mi := &file_mgmt_pool_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2748,7 +2717,7 @@ func (x *PoolQueryTargetResp) ProtoReflect() protoreflect.Message { // Deprecated: Use PoolQueryTargetResp.ProtoReflect.Descriptor instead. func (*PoolQueryTargetResp) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{32} + return file_mgmt_pool_proto_rawDescGZIP(), []int{31} } func (x *PoolQueryTargetResp) GetStatus() int32 { @@ -2780,7 +2749,7 @@ type ListPoolsResp_Pool struct { func (x *ListPoolsResp_Pool) Reset() { *x = ListPoolsResp_Pool{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[33] + mi := &file_mgmt_pool_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2793,7 +2762,7 @@ func (x *ListPoolsResp_Pool) String() string { func (*ListPoolsResp_Pool) ProtoMessage() {} func (x *ListPoolsResp_Pool) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[33] + mi := &file_mgmt_pool_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2806,7 +2775,7 @@ func (x *ListPoolsResp_Pool) ProtoReflect() protoreflect.Message { // Deprecated: Use ListPoolsResp_Pool.ProtoReflect.Descriptor instead. func (*ListPoolsResp_Pool) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{15, 0} + return file_mgmt_pool_proto_rawDescGZIP(), []int{14, 0} } func (x *ListPoolsResp_Pool) GetUuid() string { @@ -2855,7 +2824,7 @@ type ListContResp_Cont struct { func (x *ListContResp_Cont) Reset() { *x = ListContResp_Cont{} if protoimpl.UnsafeEnabled { - mi := &file_mgmt_pool_proto_msgTypes[34] + mi := &file_mgmt_pool_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +2837,7 @@ func (x *ListContResp_Cont) String() string { func (*ListContResp_Cont) ProtoMessage() {} func (x *ListContResp_Cont) ProtoReflect() protoreflect.Message { - mi := &file_mgmt_pool_proto_msgTypes[34] + mi := &file_mgmt_pool_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +2850,7 @@ func (x *ListContResp_Cont) ProtoReflect() protoreflect.Message { // Deprecated: Use ListContResp_Cont.ProtoReflect.Descriptor instead. func (*ListContResp_Cont) Descriptor() ([]byte, []int) { - return file_mgmt_pool_proto_rawDescGZIP(), []int{17, 0} + return file_mgmt_pool_proto_rawDescGZIP(), []int{16, 0} } func (x *ListContResp_Cont) GetUuid() string { @@ -2961,267 +2930,269 @@ var file_mgmt_pool_proto_rawDesc = []byte{ 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x63, 0x6c, 0x75, + 0x6e, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, - 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, - 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, 0x6c, 0x45, - 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x78, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, + 0x6c, 0x45, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x78, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x6d, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, + 0x6c, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, - 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, - 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x27, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, 0x6c, 0x44, 0x72, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xc5, - 0x01, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, - 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, - 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0d, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, - 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, - 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x65, 0x6d, - 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x65, - 0x6d, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x47, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, - 0xbc, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, - 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x5f, 0x69, 0x64, 0x78, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x49, 0x64, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, - 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, - 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x27, - 0x0a, 0x0d, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, + 0x75, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, + 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, + 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x61, 0x6e, + 0x6b, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x52, 0x61, 0x74, 0x69, 0x6f, + 0x22, 0x47, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x69, + 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x09, + 0x74, 0x69, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0c, 0x50, 0x6f, + 0x6f, 0x6c, 0x52, 0x65, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x61, 0x6e, + 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x78, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, + 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, + 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x20, 0x0a, 0x0c, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x22, 0x83, 0x02, 0x0a, + 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, + 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, + 0x74, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x86, 0x01, 0x0a, 0x04, 0x50, 0x6f, + 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x76, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, + 0x73, 0x76, 0x63, 0x52, 0x65, 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x22, 0x4c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, + 0x22, 0x7b, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, + 0x67, 0x6d, 0x74, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x73, 0x1a, 0x1a, 0x0a, 0x04, 0x43, 0x6f, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x6c, 0x0a, + 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xac, 0x01, 0x0a, 0x11, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, + 0x03, 0x6d, 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, + 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x6d, + 0x65, 0x61, 0x6e, 0x12, 0x35, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x11, 0x50, + 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, + 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x22, 0x25, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x44, + 0x4c, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x08, + 0x0a, 0x04, 0x42, 0x55, 0x53, 0x59, 0x10, 0x02, 0x22, 0x85, 0x06, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, + 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x23, 0x0a, 0x0d, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x36, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x67, 0x6d, + 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x6b, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6c, 0x61, 0x79, 0x6f, 0x75, 0x74, + 0x5f, 0x76, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x70, 0x6f, 0x6f, 0x6c, + 0x4c, 0x61, 0x79, 0x6f, 0x75, 0x74, 0x56, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x5f, 0x6c, 0x61, 0x79, 0x6f, 0x75, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x4c, 0x61, + 0x79, 0x6f, 0x75, 0x74, 0x56, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, + 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x76, 0x63, 0x5f, 0x6c, 0x64, 0x72, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x76, 0x63, 0x4c, 0x64, 0x72, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, + 0x52, 0x07, 0x73, 0x76, 0x63, 0x52, 0x65, 0x70, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x65, 0x6d, 0x5f, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x4a, 0x04, 0x08, + 0x09, 0x10, 0x0a, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x42, 0x07, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, 0x50, + 0x6f, 0x6f, 0x6c, 0x53, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x5d, 0x0a, 0x0f, + 0x50, 0x6f, 0x6f, 0x6c, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x67, + 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, + 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x50, + 0x6f, 0x6f, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, + 0x50, 0x6f, 0x6f, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x20, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x22, 0x83, 0x02, 0x0a, 0x0d, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, - 0x6f, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x05, 0x70, 0x6f, - 0x6f, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x86, 0x01, 0x0a, 0x04, 0x50, 0x6f, 0x6f, 0x6c, 0x12, - 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, - 0x75, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x76, 0x63, - 0x5f, 0x72, 0x65, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x76, 0x63, - 0x52, 0x65, 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, - 0x4c, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x10, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x12, 0x50, 0x6f, 0x6f, 0x6c, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x7b, 0x0a, - 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x67, 0x6d, 0x74, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6f, - 0x6e, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0x1a, - 0x0a, 0x04, 0x43, 0x6f, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x6c, 0x0a, 0x0c, 0x50, 0x6f, - 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xac, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, - 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x12, 0x0a, 0x04, - 0x6d, 0x65, 0x61, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x6d, 0x65, 0x61, 0x6e, - 0x12, 0x35, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x11, 0x50, 0x6f, 0x6f, 0x6c, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x25, - 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x44, 0x4c, 0x45, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x42, - 0x55, 0x53, 0x59, 0x10, 0x02, 0x22, 0x85, 0x06, 0x0a, 0x0d, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, - 0x75, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, - 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x12, 0x31, 0x0a, 0x07, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x65, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x07, 0x72, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x12, 0x36, 0x0a, 0x0a, 0x74, 0x69, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x09, 0x74, 0x69, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, - 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x61, 0x6e, - 0x6b, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, - 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, - 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6c, 0x61, 0x79, 0x6f, 0x75, 0x74, 0x5f, 0x76, 0x65, - 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x70, 0x6f, 0x6f, 0x6c, 0x4c, 0x61, 0x79, - 0x6f, 0x75, 0x74, 0x56, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x5f, 0x6c, 0x61, 0x79, 0x6f, 0x75, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x10, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x4c, 0x61, 0x79, 0x6f, 0x75, - 0x74, 0x56, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x76, 0x63, 0x5f, 0x6c, 0x64, 0x72, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x76, 0x63, 0x4c, 0x64, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x73, - 0x76, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x73, - 0x76, 0x63, 0x52, 0x65, 0x70, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x65, 0x6d, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, - 0x65, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x64, - 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x64, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, - 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x63, 0x0a, - 0x0c, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x16, 0x0a, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x72, 0x76, 0x61, 0x6c, 0x12, - 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x76, 0x61, 0x6c, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x65, 0x74, 0x50, 0x72, - 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x67, - 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, - 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, - 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, 0x6c, - 0x53, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, - 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, - 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x5d, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, - 0x6c, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, + 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, + 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x75, 0x0a, 0x12, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x16, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x65, + 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, + 0x70, 0x65, 0x22, 0x80, 0x03, 0x0a, 0x13, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x38, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, + 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x65, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x46, 0x69, + 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x44, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x53, + 0x53, 0x44, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x50, 0x4d, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, + 0x56, 0x4d, 0x10, 0x04, 0x22, 0x5f, 0x0a, 0x0b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x4f, 0x57, 0x4e, 0x5f, 0x4f, + 0x55, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x06, + 0x0a, 0x02, 0x55, 0x50, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x50, 0x5f, 0x49, 0x4e, 0x10, + 0x04, 0x12, 0x07, 0x0a, 0x03, 0x4e, 0x45, 0x57, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x52, + 0x41, 0x49, 0x4e, 0x10, 0x06, 0x22, 0x5e, 0x0a, 0x13, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, - 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, 0x0a, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, 0x6c, - 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x08, 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x29, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, - 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x12, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, - 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x61, 0x6e, - 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0d, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x76, 0x63, 0x5f, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, - 0x73, 0x76, 0x63, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x75, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, - 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, - 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x64, 0x69, 0x61, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x22, - 0x80, 0x03, 0x0a, 0x13, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x38, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, - 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x25, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2e, - 0x0a, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24, - 0x0a, 0x0e, 0x6d, 0x65, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x0a, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x07, 0x0a, 0x03, 0x48, 0x44, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x44, 0x10, - 0x02, 0x12, 0x06, 0x0a, 0x02, 0x50, 0x4d, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x4d, 0x10, - 0x04, 0x22, 0x5f, 0x0a, 0x0b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x4f, 0x57, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x10, - 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x55, - 0x50, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x50, 0x5f, 0x49, 0x4e, 0x10, 0x04, 0x12, 0x07, - 0x0a, 0x03, 0x4e, 0x45, 0x57, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x52, 0x41, 0x49, 0x4e, - 0x10, 0x06, 0x22, 0x5e, 0x0a, 0x13, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x6e, 0x66, - 0x6f, 0x73, 0x2a, 0x25, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x64, - 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x43, 0x4d, 0x10, 0x00, 0x12, - 0x08, 0x0a, 0x04, 0x4e, 0x56, 0x4d, 0x45, 0x10, 0x01, 0x2a, 0x56, 0x0a, 0x10, 0x50, 0x6f, 0x6f, - 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, - 0x08, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x52, - 0x65, 0x61, 0x64, 0x79, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, - 0x79, 0x69, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x64, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, - 0x04, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x64, 0x61, 0x6f, 0x73, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2f, - 0x73, 0x72, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x67, 0x6d, 0x74, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, + 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x2a, 0x25, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x43, 0x4d, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x56, 0x4d, 0x45, 0x10, 0x01, 0x2a, 0x56, 0x0a, 0x10, + 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x73, + 0x74, 0x72, 0x6f, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x65, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x64, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x10, 0x04, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x61, + 0x6f, 0x73, 0x2f, 0x73, 0x72, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x67, 0x6d, 0x74, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3237,7 +3208,7 @@ func file_mgmt_pool_proto_rawDescGZIP() []byte { } var file_mgmt_pool_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_mgmt_pool_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_mgmt_pool_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_mgmt_pool_proto_goTypes = []interface{}{ (StorageMediaType)(0), // 0: mgmt.StorageMediaType (PoolServiceState)(0), // 1: mgmt.PoolServiceState @@ -3253,50 +3224,49 @@ var file_mgmt_pool_proto_goTypes = []interface{}{ (*PoolExcludeReq)(nil), // 11: mgmt.PoolExcludeReq (*PoolExcludeResp)(nil), // 12: mgmt.PoolExcludeResp (*PoolDrainReq)(nil), // 13: mgmt.PoolDrainReq - (*PoolDrainResp)(nil), // 14: mgmt.PoolDrainResp + (*PoolRanksResp)(nil), // 14: mgmt.PoolRanksResp (*PoolExtendReq)(nil), // 15: mgmt.PoolExtendReq (*PoolExtendResp)(nil), // 16: mgmt.PoolExtendResp (*PoolReintReq)(nil), // 17: mgmt.PoolReintReq - (*PoolReintResp)(nil), // 18: mgmt.PoolReintResp - (*ListPoolsReq)(nil), // 19: mgmt.ListPoolsReq - (*ListPoolsResp)(nil), // 20: mgmt.ListPoolsResp - (*ListContReq)(nil), // 21: mgmt.ListContReq - (*ListContResp)(nil), // 22: mgmt.ListContResp - (*PoolQueryReq)(nil), // 23: mgmt.PoolQueryReq - (*StorageUsageStats)(nil), // 24: mgmt.StorageUsageStats - (*PoolRebuildStatus)(nil), // 25: mgmt.PoolRebuildStatus - (*PoolQueryResp)(nil), // 26: mgmt.PoolQueryResp - (*PoolProperty)(nil), // 27: mgmt.PoolProperty - (*PoolSetPropReq)(nil), // 28: mgmt.PoolSetPropReq - (*PoolSetPropResp)(nil), // 29: mgmt.PoolSetPropResp - (*PoolGetPropReq)(nil), // 30: mgmt.PoolGetPropReq - (*PoolGetPropResp)(nil), // 31: mgmt.PoolGetPropResp - (*PoolUpgradeReq)(nil), // 32: mgmt.PoolUpgradeReq - (*PoolUpgradeResp)(nil), // 33: mgmt.PoolUpgradeResp - (*PoolQueryTargetReq)(nil), // 34: mgmt.PoolQueryTargetReq - (*StorageTargetUsage)(nil), // 35: mgmt.StorageTargetUsage - (*PoolQueryTargetInfo)(nil), // 36: mgmt.PoolQueryTargetInfo - (*PoolQueryTargetResp)(nil), // 37: mgmt.PoolQueryTargetResp - (*ListPoolsResp_Pool)(nil), // 38: mgmt.ListPoolsResp.Pool - (*ListContResp_Cont)(nil), // 39: mgmt.ListContResp.Cont + (*ListPoolsReq)(nil), // 18: mgmt.ListPoolsReq + (*ListPoolsResp)(nil), // 19: mgmt.ListPoolsResp + (*ListContReq)(nil), // 20: mgmt.ListContReq + (*ListContResp)(nil), // 21: mgmt.ListContResp + (*PoolQueryReq)(nil), // 22: mgmt.PoolQueryReq + (*StorageUsageStats)(nil), // 23: mgmt.StorageUsageStats + (*PoolRebuildStatus)(nil), // 24: mgmt.PoolRebuildStatus + (*PoolQueryResp)(nil), // 25: mgmt.PoolQueryResp + (*PoolProperty)(nil), // 26: mgmt.PoolProperty + (*PoolSetPropReq)(nil), // 27: mgmt.PoolSetPropReq + (*PoolSetPropResp)(nil), // 28: mgmt.PoolSetPropResp + (*PoolGetPropReq)(nil), // 29: mgmt.PoolGetPropReq + (*PoolGetPropResp)(nil), // 30: mgmt.PoolGetPropResp + (*PoolUpgradeReq)(nil), // 31: mgmt.PoolUpgradeReq + (*PoolUpgradeResp)(nil), // 32: mgmt.PoolUpgradeResp + (*PoolQueryTargetReq)(nil), // 33: mgmt.PoolQueryTargetReq + (*StorageTargetUsage)(nil), // 34: mgmt.StorageTargetUsage + (*PoolQueryTargetInfo)(nil), // 35: mgmt.PoolQueryTargetInfo + (*PoolQueryTargetResp)(nil), // 36: mgmt.PoolQueryTargetResp + (*ListPoolsResp_Pool)(nil), // 37: mgmt.ListPoolsResp.Pool + (*ListContResp_Cont)(nil), // 38: mgmt.ListContResp.Cont } var file_mgmt_pool_proto_depIdxs = []int32{ - 27, // 0: mgmt.PoolCreateReq.properties:type_name -> mgmt.PoolProperty - 38, // 1: mgmt.ListPoolsResp.pools:type_name -> mgmt.ListPoolsResp.Pool - 39, // 2: mgmt.ListContResp.containers:type_name -> mgmt.ListContResp.Cont + 26, // 0: mgmt.PoolCreateReq.properties:type_name -> mgmt.PoolProperty + 37, // 1: mgmt.ListPoolsResp.pools:type_name -> mgmt.ListPoolsResp.Pool + 38, // 2: mgmt.ListContResp.containers:type_name -> mgmt.ListContResp.Cont 0, // 3: mgmt.StorageUsageStats.media_type:type_name -> mgmt.StorageMediaType 2, // 4: mgmt.PoolRebuildStatus.state:type_name -> mgmt.PoolRebuildStatus.State - 25, // 5: mgmt.PoolQueryResp.rebuild:type_name -> mgmt.PoolRebuildStatus - 24, // 6: mgmt.PoolQueryResp.tier_stats:type_name -> mgmt.StorageUsageStats + 24, // 5: mgmt.PoolQueryResp.rebuild:type_name -> mgmt.PoolRebuildStatus + 23, // 6: mgmt.PoolQueryResp.tier_stats:type_name -> mgmt.StorageUsageStats 1, // 7: mgmt.PoolQueryResp.state:type_name -> mgmt.PoolServiceState - 27, // 8: mgmt.PoolSetPropReq.properties:type_name -> mgmt.PoolProperty - 27, // 9: mgmt.PoolGetPropReq.properties:type_name -> mgmt.PoolProperty - 27, // 10: mgmt.PoolGetPropResp.properties:type_name -> mgmt.PoolProperty + 26, // 8: mgmt.PoolSetPropReq.properties:type_name -> mgmt.PoolProperty + 26, // 9: mgmt.PoolGetPropReq.properties:type_name -> mgmt.PoolProperty + 26, // 10: mgmt.PoolGetPropResp.properties:type_name -> mgmt.PoolProperty 0, // 11: mgmt.StorageTargetUsage.media_type:type_name -> mgmt.StorageMediaType 3, // 12: mgmt.PoolQueryTargetInfo.type:type_name -> mgmt.PoolQueryTargetInfo.TargetType 4, // 13: mgmt.PoolQueryTargetInfo.state:type_name -> mgmt.PoolQueryTargetInfo.TargetState - 35, // 14: mgmt.PoolQueryTargetInfo.space:type_name -> mgmt.StorageTargetUsage - 36, // 15: mgmt.PoolQueryTargetResp.infos:type_name -> mgmt.PoolQueryTargetInfo + 34, // 14: mgmt.PoolQueryTargetInfo.space:type_name -> mgmt.StorageTargetUsage + 35, // 15: mgmt.PoolQueryTargetResp.infos:type_name -> mgmt.PoolQueryTargetInfo 16, // [16:16] is the sub-list for method output_type 16, // [16:16] is the sub-list for method input_type 16, // [16:16] is the sub-list for extension type_name @@ -3419,7 +3389,7 @@ func file_mgmt_pool_proto_init() { } } file_mgmt_pool_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PoolDrainResp); i { + switch v := v.(*PoolRanksResp); i { case 0: return &v.state case 1: @@ -3467,18 +3437,6 @@ func file_mgmt_pool_proto_init() { } } file_mgmt_pool_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PoolReintResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mgmt_pool_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListPoolsReq); i { case 0: return &v.state @@ -3490,7 +3448,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListPoolsResp); i { case 0: return &v.state @@ -3502,7 +3460,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListContReq); i { case 0: return &v.state @@ -3514,7 +3472,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListContResp); i { case 0: return &v.state @@ -3526,7 +3484,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolQueryReq); i { case 0: return &v.state @@ -3538,7 +3496,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StorageUsageStats); i { case 0: return &v.state @@ -3550,7 +3508,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolRebuildStatus); i { case 0: return &v.state @@ -3562,7 +3520,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolQueryResp); i { case 0: return &v.state @@ -3574,7 +3532,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolProperty); i { case 0: return &v.state @@ -3586,7 +3544,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolSetPropReq); i { case 0: return &v.state @@ -3598,7 +3556,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolSetPropResp); i { case 0: return &v.state @@ -3610,7 +3568,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolGetPropReq); i { case 0: return &v.state @@ -3622,7 +3580,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolGetPropResp); i { case 0: return &v.state @@ -3634,7 +3592,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolUpgradeReq); i { case 0: return &v.state @@ -3646,7 +3604,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolUpgradeResp); i { case 0: return &v.state @@ -3658,7 +3616,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolQueryTargetReq); i { case 0: return &v.state @@ -3670,7 +3628,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StorageTargetUsage); i { case 0: return &v.state @@ -3682,7 +3640,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolQueryTargetInfo); i { case 0: return &v.state @@ -3694,7 +3652,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PoolQueryTargetResp); i { case 0: return &v.state @@ -3706,7 +3664,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListPoolsResp_Pool); i { case 0: return &v.state @@ -3718,7 +3676,7 @@ func file_mgmt_pool_proto_init() { return nil } } - file_mgmt_pool_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_mgmt_pool_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListContResp_Cont); i { case 0: return &v.state @@ -3731,7 +3689,7 @@ func file_mgmt_pool_proto_init() { } } } - file_mgmt_pool_proto_msgTypes[22].OneofWrappers = []interface{}{ + file_mgmt_pool_proto_msgTypes[21].OneofWrappers = []interface{}{ (*PoolProperty_Strval)(nil), (*PoolProperty_Numval)(nil), } @@ -3741,7 +3699,7 @@ func file_mgmt_pool_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_mgmt_pool_proto_rawDesc, NumEnums: 5, - NumMessages: 35, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/src/control/common/proto/mgmt/system.pb.go b/src/control/common/proto/mgmt/system.pb.go index 1857d9fc74b..5f3f293b8cb 100644 --- a/src/control/common/proto/mgmt/system.pb.go +++ b/src/control/common/proto/mgmt/system.pb.go @@ -1,5 +1,6 @@ // // (C) Copyright 2019-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -567,20 +568,20 @@ func (x *SystemExcludeResp) GetResults() []*shared.RankResult { return nil } -// Results for system OSA calls on multiple pool-ranks. -type PoolRankResult struct { +// Results for operations on multiple pool-ranks. +type PoolRanksResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // Status of the OSA operation on a specific pool - Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` // Error message if status indicates an error - PoolId string `protobuf:"bytes,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` // Label or uuid of pool - Ranks string `protobuf:"bytes,4,opt,name=ranks,proto3" json:"ranks,omitempty"` // Rank-set that has encountered this result + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // Status of the OSA operation on a specific pool + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` // Error message if status indicates an error + Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` // Label or uuid of pool + Ranks string `protobuf:"bytes,4,opt,name=ranks,proto3" json:"ranks,omitempty"` // rankset that has encountered this result } -func (x *PoolRankResult) Reset() { - *x = PoolRankResult{} +func (x *PoolRanksResult) Reset() { + *x = PoolRanksResult{} if protoimpl.UnsafeEnabled { mi := &file_mgmt_system_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -588,13 +589,13 @@ func (x *PoolRankResult) Reset() { } } -func (x *PoolRankResult) String() string { +func (x *PoolRanksResult) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PoolRankResult) ProtoMessage() {} +func (*PoolRanksResult) ProtoMessage() {} -func (x *PoolRankResult) ProtoReflect() protoreflect.Message { +func (x *PoolRanksResult) ProtoReflect() protoreflect.Message { mi := &file_mgmt_system_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -606,33 +607,33 @@ func (x *PoolRankResult) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PoolRankResult.ProtoReflect.Descriptor instead. -func (*PoolRankResult) Descriptor() ([]byte, []int) { +// Deprecated: Use PoolRanksResult.ProtoReflect.Descriptor instead. +func (*PoolRanksResult) Descriptor() ([]byte, []int) { return file_mgmt_system_proto_rawDescGZIP(), []int{7} } -func (x *PoolRankResult) GetStatus() int32 { +func (x *PoolRanksResult) GetStatus() int32 { if x != nil { return x.Status } return 0 } -func (x *PoolRankResult) GetMsg() string { +func (x *PoolRanksResult) GetMsg() string { if x != nil { return x.Msg } return "" } -func (x *PoolRankResult) GetPoolId() string { +func (x *PoolRanksResult) GetId() string { if x != nil { - return x.PoolId + return x.Id } return "" } -func (x *PoolRankResult) GetRanks() string { +func (x *PoolRanksResult) GetRanks() string { if x != nil { return x.Ranks } @@ -717,8 +718,8 @@ type SystemDrainResp struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Reint bool `protobuf:"varint,1,opt,name=reint,proto3" json:"reint,omitempty"` // Flag to indicate if results are for drain or reint. - Results []*PoolRankResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` // Results for drain or reint calls on pool-ranks. + Reint bool `protobuf:"varint,1,opt,name=reint,proto3" json:"reint,omitempty"` // Flag to indicate if results are for drain or reint. + Results []*PoolRanksResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` // Results for drain or reint calls on pool-ranks. } func (x *SystemDrainResp) Reset() { @@ -760,7 +761,7 @@ func (x *SystemDrainResp) GetReint() bool { return false } -func (x *SystemDrainResp) GetResults() []*PoolRankResult { +func (x *SystemDrainResp) GetResults() []*PoolRanksResult { if x != nil { return x.Results } @@ -1581,120 +1582,120 @@ var file_mgmt_system_proto_rawDesc = []byte{ 0x45, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x52, 0x61, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x69, 0x0a, 0x0e, 0x50, 0x6f, 0x6f, - 0x6c, 0x52, 0x61, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x64, 0x0a, 0x0e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x44, 0x72, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x68, - 0x6f, 0x73, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x69, 0x6e, 0x74, 0x22, 0x57, 0x0a, 0x0f, 0x53, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x14, 0x0a, - 0x05, 0x72, 0x65, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, - 0x69, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, - 0x52, 0x61, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x22, 0x6d, 0x0a, 0x0e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x68, 0x6f, - 0x73, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, - 0x73, 0x6b, 0x22, 0xc4, 0x01, 0x0a, 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x61, - 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, - 0x74, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, - 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x62, 0x73, - 0x65, 0x6e, 0x74, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x64, 0x61, 0x74, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x45, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, - 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x22, 0x3f, 0x0a, - 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x45, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x52, 0x61, 0x6e, 0x6b, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3e, - 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x73, 0x79, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0xbe, - 0x01, 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x12, 0x3f, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, - 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x68, 0x0a, 0x0d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, - 0x12, 0x17, 0x0a, 0x07, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6f, 0x6c, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, - 0xab, 0x01, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x74, 0x41, 0x74, 0x74, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x67, 0x6d, - 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, - 0x65, 0x71, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, - 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, - 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, - 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x73, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x47, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x47, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, - 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xab, 0x01, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x53, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x46, 0x0a, 0x0a, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, - 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x9b, 0x01, - 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x3a, 0x5a, 0x38, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2d, 0x73, - 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2f, 0x73, 0x72, 0x63, 0x2f, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x6d, 0x67, 0x6d, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x61, 0x0a, 0x0f, 0x50, 0x6f, 0x6f, + 0x6c, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x22, 0x64, 0x0a, 0x0e, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x10, + 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x65, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x69, + 0x6e, 0x74, 0x22, 0x58, 0x0a, 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x44, 0x72, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x69, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x69, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x67, 0x6d, 0x74, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x52, 0x61, 0x6e, 0x6b, 0x73, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x6d, 0x0a, 0x0e, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, + 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xc4, 0x01, 0x0a, 0x0f, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, + 0x2c, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, + 0x6d, 0x62, 0x65, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x61, 0x6e, 0x6b, 0x73, 0x12, + 0x20, 0x0a, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x68, 0x6f, 0x73, 0x74, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x45, 0x72, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x22, 0x3f, 0x0a, 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x45, 0x72, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x2e, 0x52, 0x61, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3e, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, + 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3f, 0x0a, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x68, + 0x0a, 0x0d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x6f, 0x6f, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6f, 0x6c, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x10, 0x53, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x53, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, + 0x46, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x53, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, 0x71, 0x2e, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x47, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x22, 0x9b, 0x01, 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x41, 0x74, + 0x74, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x67, 0x6d, + 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x41, 0x74, 0x74, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, + 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xab, + 0x01, 0x0a, 0x10, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, + 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x67, 0x6d, 0x74, + 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, + 0x71, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, + 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, 0x10, + 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x71, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, + 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x11, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x12, 0x47, 0x0a, 0x0a, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x6d, 0x67, 0x6d, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x61, + 0x6f, 0x73, 0x2f, 0x73, 0x72, 0x63, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x67, 0x6d, 0x74, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1718,7 +1719,7 @@ var file_mgmt_system_proto_goTypes = []interface{}{ (*SystemStartResp)(nil), // 4: mgmt.SystemStartResp (*SystemExcludeReq)(nil), // 5: mgmt.SystemExcludeReq (*SystemExcludeResp)(nil), // 6: mgmt.SystemExcludeResp - (*PoolRankResult)(nil), // 7: mgmt.PoolRankResult + (*PoolRanksResult)(nil), // 7: mgmt.PoolRanksResult (*SystemDrainReq)(nil), // 8: mgmt.SystemDrainReq (*SystemDrainResp)(nil), // 9: mgmt.SystemDrainResp (*SystemQueryReq)(nil), // 10: mgmt.SystemQueryReq @@ -1744,7 +1745,7 @@ var file_mgmt_system_proto_depIdxs = []int32{ 27, // 0: mgmt.SystemStopResp.results:type_name -> shared.RankResult 27, // 1: mgmt.SystemStartResp.results:type_name -> shared.RankResult 27, // 2: mgmt.SystemExcludeResp.results:type_name -> shared.RankResult - 7, // 3: mgmt.SystemDrainResp.results:type_name -> mgmt.PoolRankResult + 7, // 3: mgmt.SystemDrainResp.results:type_name -> mgmt.PoolRanksResult 0, // 4: mgmt.SystemQueryResp.members:type_name -> mgmt.SystemMember 27, // 5: mgmt.SystemEraseResp.results:type_name -> shared.RankResult 22, // 6: mgmt.SystemCleanupResp.results:type_name -> mgmt.SystemCleanupResp.CleanupResult @@ -1850,7 +1851,7 @@ func file_mgmt_system_proto_init() { } } file_mgmt_system_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PoolRankResult); i { + switch v := v.(*PoolRanksResult); i { case 0: return &v.state case 1: diff --git a/src/control/lib/control/pool.go b/src/control/lib/control/pool.go index 15dfb639922..50ad757fa66 100644 --- a/src/control/lib/control/pool.go +++ b/src/control/lib/control/pool.go @@ -1,5 +1,5 @@ // -// (C) Copyright 2020-2024 Intel Corporation. +// // (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent @@ -743,11 +743,99 @@ func PoolGetProp(ctx context.Context, rpcClient UnaryInvoker, req *PoolGetPropRe return resp, nil } +// PoolRanksResult describes the result of an OSA operation on a pool's ranks. +type PoolRanksResult struct { + Status int32 `json:"status"` // Status returned from a specific OSA dRPC call + Msg string `json:"msg"` // Error message if Status is not Success + ID string `json:"id"` // Unique identifier for pool + Ranks string `json:"ranks"` // RankSet of ranks that should be operated on +} + +// PoolRanksResults is an alias for a PoolRanksResult slice. +type PoolRanksResults []*PoolRanksResult + +// PoolRanksReq struct contains request for operation on multiple pool-ranks. +type PoolRanksReq struct { + poolRequest + ID string `json:"id"` + Ranks []ranklist.Rank `json:"ranks"` + TargetIdx []uint32 `json:"target_idx"` +} + +// PoolRanksResp struct contains response from operation on multiple pool-ranks. +type PoolRanksResp struct { + Status int32 `json:"status"` + ID string `json:"id"` + FailedRank ranklist.Rank `json:"failed_rank"` + SuccessRanks []ranklist.Rank `json:"success_ranks"` + InitialRankset string `json:"initial_rankset"` +} + +func (resp *PoolRanksResp) Errors() error { + if resp.Status == int32(daos.Success) { + return nil + } + err := daos.Status(resp.Status) + + if resp.FailedRank == ranklist.NilRank { + return errors.Wrapf(err, "pool %s ranks %s", resp.ID, resp.InitialRankset) + } + + return errors.Wrapf(err, "pool %s rank %d", resp.ID, resp.FailedRank) +} + +// GetResults returns a slice of results from the response and input error. +func (resp *PoolRanksResp) GetResults(errIn error) ([]*PoolRanksResult, error) { + results := []*PoolRanksResult{} + + if errIn != nil { + // Return root cause so rank results can be aggregated if required. + msgErr := errIn.Error() + if f, ok := errors.Cause(errIn).(*fault.Fault); ok { + msgErr = f.Error() + } + results = append(results, &PoolRanksResult{ + ID: resp.ID, + Ranks: resp.InitialRankset, + Status: int32(daos.MiscError), + Msg: msgErr, + }) + + return results, nil + } + + if resp.Status != int32(daos.Success) { + if resp.FailedRank == ranklist.NilRank { + return nil, errors.New("invalid rank returned with non-zero status") + } + // Add one result for failed rank. + results = append(results, &PoolRanksResult{ + ID: resp.ID, + Ranks: fmt.Sprintf("%d", resp.FailedRank), + Status: resp.Status, + Msg: daos.Status(resp.Status).Error(), + }) + } else if len(resp.SuccessRanks) == 0 { + // Expected that at least one result will be generated for each pool. + return nil, errors.Errorf("no ranks were operated on for pool %s", resp.ID) + } + + if len(resp.SuccessRanks) != 0 { + rsSuccess := ranklist.RankSetFromRanks(resp.SuccessRanks) + results = append(results, &PoolRanksResult{ + ID: resp.ID, + Ranks: rsSuccess.String(), + }) + } + + return results, nil +} + // PoolExcludeReq struct contains request type PoolExcludeReq struct { poolRequest ID string - Rank ranklist.Rank + Ranks []ranklist.Rank TargetIdx []uint32 } @@ -757,12 +845,11 @@ type PoolExcludeReq struct { // This should automatically start the rebuildiing process. // Returns an error (including any DER code from DAOS). func PoolExclude(ctx context.Context, rpcClient UnaryInvoker, req *PoolExcludeReq) error { - pbReq := &mgmtpb.PoolExcludeReq{ - Sys: req.getSystem(rpcClient), - Id: req.ID, - Rank: req.Rank.Uint32(), - TargetIdx: req.TargetIdx, + pbReq := new(mgmtpb.PoolExcludeReq) + if err := convert.Types(req, pbReq); err != nil { + return errors.Wrapf(err, "convert %T->%T", req, pbReq) } + req.setRPC(func(ctx context.Context, conn *grpc.ClientConn) (proto.Message, error) { return mgmtpb.NewMgmtSvcClient(conn).PoolExclude(ctx, pbReq) }) @@ -776,25 +863,24 @@ func PoolExclude(ctx context.Context, rpcClient UnaryInvoker, req *PoolExcludeRe return errors.Wrap(ur.getMSError(), "pool exclude failed") } -// PoolDrainReq struct contains request -type PoolDrainReq struct { - poolRequest - ID string - Rank ranklist.Rank - TargetIdx []uint32 -} - -// DrainResp has no other parameters other than success/failure for now. +//// PoolDrainReq struct contains a request. +//type PoolDrainReq struct { +// PoolRanksReq +//} +// +//// PoolDrainResp struct contains a response. +//type PoolDrainResp struct { +// PoolRanksResp +//} // PoolDrain will set a pool target for a specific rank in to the drain state which should // automatically start the rebuildiing process. Returns an error (including any DER code from DAOS). -func PoolDrain(ctx context.Context, rpcClient UnaryInvoker, req *PoolDrainReq) error { - pbReq := &mgmtpb.PoolDrainReq{ - Sys: req.getSystem(rpcClient), - Id: req.ID, - Rank: req.Rank.Uint32(), - TargetIdx: req.TargetIdx, +func PoolDrain(ctx context.Context, rpcClient UnaryInvoker, req *PoolRanksReq) (*PoolRanksResp, error) { + pbReq := new(mgmtpb.PoolDrainReq) + if err := convert.Types(req, pbReq); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", req, pbReq) } + req.setRPC(func(ctx context.Context, conn *grpc.ClientConn) (proto.Message, error) { return mgmtpb.NewMgmtSvcClient(conn).PoolDrain(ctx, pbReq) }) @@ -802,19 +888,18 @@ func PoolDrain(ctx context.Context, rpcClient UnaryInvoker, req *PoolDrainReq) e rpcClient.Debugf("Drain DAOS pool target request: %s\n", pbUtil.Debug(pbReq)) ur, err := rpcClient.InvokeUnaryRPC(ctx, req) if err != nil { - return err + return nil, err + } + if msErr := ur.getMSError(); err != nil { + return nil, errors.Wrap(msErr, "pool drain failed") } - return errors.Wrap(ur.getMSError(), "pool drain failed") -} - -func genPoolExtendRequest(in *PoolExtendReq) (out *mgmtpb.PoolExtendReq, err error) { - out = new(mgmtpb.PoolExtendReq) - if err = convert.Types(in, out); err != nil { + resp := new(PoolRanksResp) + if err := convertMSResponse(ur, resp); err != nil { return nil, err } - return + return resp, nil } // PoolExtendReq struct contains request @@ -828,9 +913,9 @@ type PoolExtendReq struct { // This should automatically start the rebalance process. // Returns an error (including any DER code from DAOS). func PoolExtend(ctx context.Context, rpcClient UnaryInvoker, req *PoolExtendReq) error { - pbReq, err := genPoolExtendRequest(req) - if err != nil { - return errors.Wrap(err, "failed to generate PoolExtend request") + pbReq := new(mgmtpb.PoolExtendReq) + if err := convert.Types(req, pbReq); err != nil { + return errors.Wrapf(err, "convert %T->%T", req, pbReq) } pbReq.Sys = req.getSystem(rpcClient) @@ -849,21 +934,16 @@ func PoolExtend(ctx context.Context, rpcClient UnaryInvoker, req *PoolExtendReq) // PoolReintegrateReq struct contains request type PoolReintegrateReq struct { - poolRequest - ID string - Rank ranklist.Rank - TargetIdx []uint32 + PoolRanksReq } // PoolReintegrate will set a pool target for a specific rank back to up. // This should automatically start the reintegration process. // Returns an error (including any DER code from DAOS). func PoolReintegrate(ctx context.Context, rpcClient UnaryInvoker, req *PoolReintegrateReq) error { - pbReq := &mgmtpb.PoolReintReq{ - Sys: req.getSystem(rpcClient), - Id: req.ID, - Rank: req.Rank.Uint32(), - TargetIdx: req.TargetIdx, + pbReq := new(mgmtpb.PoolReintReq) + if err := convert.Types(req, pbReq); err != nil { + return errors.Wrapf(err, "convert %T->%T", req, pbReq) } req.setRPC(func(ctx context.Context, conn *grpc.ClientConn) (proto.Message, error) { diff --git a/src/control/lib/control/pool_test.go b/src/control/lib/control/pool_test.go index 8e019bee0ba..5be4835d049 100644 --- a/src/control/lib/control/pool_test.go +++ b/src/control/lib/control/pool_test.go @@ -1,5 +1,6 @@ // // (C) Copyright 2020-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -236,14 +237,15 @@ func TestControl_PoolUpgrade(t *testing.T) { func TestControl_PoolDrain(t *testing.T) { for name, tc := range map[string]struct { - mic *MockInvokerConfig - req *PoolDrainReq - expErr error + mic *MockInvokerConfig + req *PoolRanksReq + expErr error + expResp *PoolRanksResp }{ "local failure": { - req: &PoolDrainReq{ + req: &PoolRanksReq{ ID: test.MockUUID(), - Rank: 2, + Ranks: []ranklist.Rank{2}, TargetIdx: []uint32{1, 2, 3}, }, mic: &MockInvokerConfig{ @@ -252,9 +254,9 @@ func TestControl_PoolDrain(t *testing.T) { expErr: errors.New("local failed"), }, "remote failure": { - req: &PoolDrainReq{ + req: &PoolRanksReq{ ID: test.MockUUID(), - Rank: 2, + Ranks: []ranklist.Rank{2}, TargetIdx: []uint32{1, 2, 3}, }, mic: &MockInvokerConfig{ @@ -263,16 +265,42 @@ func TestControl_PoolDrain(t *testing.T) { expErr: errors.New("remote failed"), }, "success": { - req: &PoolDrainReq{ + req: &PoolRanksReq{ ID: test.MockUUID(), - Rank: 2, + Ranks: []ranklist.Rank{1, 2, 3}, TargetIdx: []uint32{1, 2, 3}, }, mic: &MockInvokerConfig{ UnaryResponse: MockMSResponse("host1", nil, - &mgmtpb.PoolDrainResp{}, + &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1, 2, 3}, + }, ), }, + expResp: &PoolRanksResp{ + SuccessRanks: []ranklist.Rank{1, 2, 3}, + }, + }, + "partial failure": { + req: &PoolRanksReq{ + ID: test.MockUUID(), + Ranks: []ranklist.Rank{1, 2, 3}, + TargetIdx: []uint32{1, 2, 3}, + }, + mic: &MockInvokerConfig{ + UnaryResponse: MockMSResponse("host1", nil, + &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1}, + FailedRank: 2, + Status: -1, + }, + ), + }, + expResp: &PoolRanksResp{ + SuccessRanks: []ranklist.Rank{1}, + FailedRank: ranklist.Rank(2), + Status: -1, + }, }, } { t.Run(name, func(t *testing.T) { @@ -287,11 +315,16 @@ func TestControl_PoolDrain(t *testing.T) { ctx := test.Context(t) mi := NewMockInvoker(log, mic) - gotErr := PoolDrain(ctx, mi, tc.req) + resp, gotErr := PoolDrain(ctx, mi, tc.req) test.CmpErr(t, tc.expErr, gotErr) if tc.expErr != nil { return } + + cmpOpt := cmpopts.IgnoreUnexported(mgmtpb.PoolRanksResp{}) + if diff := cmp.Diff(tc.expResp, resp, cmpOpt); diff != "" { + t.Fatalf("Unexpected response (-want, +got):\n%s\n", diff) + } }) } } diff --git a/src/control/lib/control/system.go b/src/control/lib/control/system.go index 73601696f6b..d0d54ba3e14 100644 --- a/src/control/lib/control/system.go +++ b/src/control/lib/control/system.go @@ -567,17 +567,6 @@ func SystemExclude(ctx context.Context, rpcClient UnaryInvoker, req *SystemExclu return resp, convertMSResponse(ur, resp) } -// PoolRankResult describes the result of an OSA operation on a pool's ranks. -type PoolRankResult struct { - Status int32 `json:"status"` // Status returned from a specific OSA dRPC call - Msg string `json:"msg"` // Error message if Status is not Success - PoolID string `json:"pool_id"` // Unique identifier for pool - Ranks string `json:"ranks"` // RankSet of ranks that should be operated on -} - -// PoolRankResults is an alias for a PoolRankResult slice. -type PoolRankResults []*PoolRankResult - // SystemDrainReq contains the inputs for the system drain request. type SystemDrainReq struct { unaryRequest @@ -591,7 +580,7 @@ type SystemDrainReq struct { // in the response so decoding is not required. type SystemDrainResp struct { sysResponse `json:"-"` - Results PoolRankResults `json:"results"` + Results PoolRanksResults `json:"results"` } // Errors returns a single error combining all error messages associated with pool-rank results. @@ -600,7 +589,7 @@ func (resp *SystemDrainResp) Errors() (err error) { for _, r := range resp.Results { if r.Status != int32(daos.Success) { err = concatErrs(err, - errors.Errorf("pool %s ranks %s: %s", r.PoolID, r.Ranks, r.Msg)) + errors.Errorf("pool %s ranks %s: %s", r.ID, r.Ranks, r.Msg)) } } return diff --git a/src/control/lib/control/system_test.go b/src/control/lib/control/system_test.go index 15c9578769f..54303a27e4d 100644 --- a/src/control/lib/control/system_test.go +++ b/src/control/lib/control/system_test.go @@ -1090,40 +1090,44 @@ func TestControl_SystemDrain(t *testing.T) { "dual pools; single rank": { req: new(SystemDrainReq), uResp: MockMSResponse("10.0.0.1:10001", nil, &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ - {PoolId: test.MockUUID(1), Ranks: "1"}, - {PoolId: test.MockUUID(2), Ranks: "1"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: test.MockUUID(1), Ranks: "1"}, + {Id: test.MockUUID(2), Ranks: "1"}, }, }), expResp: &SystemDrainResp{ - Results: []*PoolRankResult{ - {PoolID: test.MockUUID(1), Ranks: "1"}, - {PoolID: test.MockUUID(2), Ranks: "1"}, + Results: []*PoolRanksResult{ + {ID: test.MockUUID(1), Ranks: "1"}, + {ID: test.MockUUID(2), Ranks: "1"}, }, }, }, - "dual pools; single rank; with errors": { + "dual pools; multiple ranks; with errors": { req: new(SystemDrainReq), uResp: MockMSResponse("10.0.0.1:10001", nil, &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ + Results: []*mgmtpb.PoolRanksResult{ + {Id: test.MockUUID(1), Ranks: "0"}, { - PoolId: test.MockUUID(1), Ranks: "1", + Id: test.MockUUID(1), Ranks: "1", Status: -1, Msg: "fail1", }, + {Id: test.MockUUID(2), Ranks: "0"}, { - PoolId: test.MockUUID(2), Ranks: "1", + Id: test.MockUUID(2), Ranks: "1", Status: -1, Msg: "fail2", }, }, }), expResp: &SystemDrainResp{ - Results: []*PoolRankResult{ + Results: []*PoolRanksResult{ + {ID: test.MockUUID(1), Ranks: "0"}, { - PoolID: test.MockUUID(1), Ranks: "1", + ID: test.MockUUID(1), Ranks: "1", Status: -1, Msg: "fail1", }, + {ID: test.MockUUID(2), Ranks: "0"}, { - PoolID: test.MockUUID(2), Ranks: "1", + ID: test.MockUUID(2), Ranks: "1", Status: -1, Msg: "fail2", }, }, diff --git a/src/control/server/faults.go b/src/control/server/faults.go index e32d2e6c728..721d516a06a 100644 --- a/src/control/server/faults.go +++ b/src/control/server/faults.go @@ -1,5 +1,6 @@ // // (C) Copyright 2020-2024 Intel Corporation. +// (C) Copyright 2025 Hewlett Packard Enterprise Development LP // // SPDX-License-Identifier: BSD-2-Clause-Patent // @@ -110,8 +111,8 @@ func FaultPoolInvalidRanks(invalid []ranklist.Rank) *fault.Fault { return serverFault( code.ServerPoolInvalidRanks, - fmt.Sprintf("pool request contains invalid ranks: %s", strings.Join(rs, ",")), - "retry the request with a valid set of ranks", + fmt.Sprintf("pool request contains ranks with invalid state: %s", strings.Join(rs, ",")), + "retry the request with a set of ranks that have valid states", ) } diff --git a/src/control/server/mgmt_pool.go b/src/control/server/mgmt_pool.go index c4941c20606..94d27511c35 100644 --- a/src/control/server/mgmt_pool.go +++ b/src/control/server/mgmt_pool.go @@ -851,7 +851,7 @@ func (svc *mgmtSvc) PoolExclude(ctx context.Context, req *mgmtpb.PoolExcludeReq) } // PoolDrain implements the method defined for the Management Service. -func (svc *mgmtSvc) PoolDrain(ctx context.Context, req *mgmtpb.PoolDrainReq) (*mgmtpb.PoolDrainResp, error) { +func (svc *mgmtSvc) PoolDrain(ctx context.Context, req *mgmtpb.PoolDrainReq) (*mgmtpb.PoolRanksResp, error) { if err := svc.checkLeaderRequest(req); err != nil { return nil, err } @@ -861,7 +861,7 @@ func (svc *mgmtSvc) PoolDrain(ctx context.Context, req *mgmtpb.PoolDrainReq) (*m return nil, err } - resp := &mgmtpb.PoolDrainResp{} + resp := &mgmtpb.PoolRanksResp{} if err := svc.unmarshalPB(dResp.Body, resp); err != nil { return nil, err } @@ -907,30 +907,34 @@ func (svc *mgmtSvc) PoolExtend(ctx context.Context, req *mgmtpb.PoolExtendReq) ( } // PoolReintegrate implements the method defined for the Management Service. -func (svc *mgmtSvc) PoolReintegrate(ctx context.Context, req *mgmtpb.PoolReintReq) (*mgmtpb.PoolReintResp, error) { +func (svc *mgmtSvc) PoolReintegrate(ctx context.Context, req *mgmtpb.PoolReintReq) (*mgmtpb.PoolRanksResp, error) { if err := svc.checkLeaderRequest(req); err != nil { return nil, err } - // Look up the pool service record to find the storage allocations - // used at creation. - ps, err := svc.getPoolService(req.GetId()) - if err != nil { - return nil, err - } + // Refuse call if any requested rank is not in a valid state. + invalid := []ranklist.Rank{} + for _, rank := range req.Ranks { + r := ranklist.Rank(rank) - r := ranklist.Rank(req.Rank) + m, err := svc.membership.Get(r) + if err != nil { + return nil, err + } - m, err := svc.membership.Get(r) - if err != nil { - return nil, err + if m.State&system.AvailableMemberFilter == 0 { + invalid = append(invalid, r) + } } - - if m.State&system.AvailableMemberFilter == 0 { - invalid := []ranklist.Rank{r} + if len(invalid) != 0 { return nil, FaultPoolInvalidRanks(invalid) } + // Look up the pool service record to find the storage allocations used at creation. + ps, err := svc.getPoolService(req.GetId()) + if err != nil { + return nil, err + } req.TierBytes = ps.Storage.PerRankTierStorage req.MemRatio = ps.Storage.MemRatio @@ -939,7 +943,7 @@ func (svc *mgmtSvc) PoolReintegrate(ctx context.Context, req *mgmtpb.PoolReintRe return nil, err } - resp := &mgmtpb.PoolReintResp{} + resp := &mgmtpb.PoolRanksResp{} if err := svc.unmarshalPB(dResp.Body, resp); err != nil { return nil, err } diff --git a/src/control/server/mgmt_pool_test.go b/src/control/server/mgmt_pool_test.go index a16955830f6..6979a5b9202 100644 --- a/src/control/server/mgmt_pool_test.go +++ b/src/control/server/mgmt_pool_test.go @@ -1495,8 +1495,9 @@ func TestServer_MgmtSvc_PoolReintegrate(t *testing.T) { nilReq bool getMockDrpc func(error) *mockDrpcClient mgmtSvc *mgmtSvc + members system.Members reqIn *mgmtpb.PoolReintReq - drpcResp *mgmtpb.PoolReintResp + drpcResp *mgmtpb.PoolRanksResp expDrpcReq *mgmtpb.PoolReintReq expErr error }{ @@ -1529,17 +1530,26 @@ func TestServer_MgmtSvc_PoolReintegrate(t *testing.T) { expErr: errors.New("unmarshal"), }, "missing uuid": { - reqIn: &mgmtpb.PoolReintReq{Rank: 1}, + reqIn: &mgmtpb.PoolReintReq{Ranks: []uint32{1}}, expErr: errors.New("empty pool id"), }, - "successfully extended": { - drpcResp: &mgmtpb.PoolReintResp{}, + "invalid rank": { + members: system.Members{ + mockMember(t, 1, 2, "excluded"), + mockMember(t, 2, 2, "joined"), + mockMember(t, 3, 1, "joined"), + }, + expErr: errors.New("ranks with invalid state: 1"), + }, + "successfully reintegrated": { + drpcResp: &mgmtpb.PoolRanksResp{}, // Expect that the last request contains updated params from ps entry. expDrpcReq: &mgmtpb.PoolReintReq{ Sys: build.DefaultSystemName, SvcRanks: mockSvcRanks, Id: mockUUID, - Rank: 1, + Ranks: []uint32{1, 2, 3}, + TargetIdx: []uint32{1, 2}, TierBytes: mockTierBytes, MemRatio: mockMemRatio, }, @@ -1550,10 +1560,27 @@ func TestServer_MgmtSvc_PoolReintegrate(t *testing.T) { defer test.ShowBufferOnFailure(t, buf) if tc.reqIn == nil && !tc.nilReq { - tc.reqIn = &mgmtpb.PoolReintReq{Id: mockUUID, Rank: 1} + tc.reqIn = &mgmtpb.PoolReintReq{ + Id: mockUUID, + Ranks: []uint32{1, 2, 3}, + TargetIdx: []uint32{1, 2}, + } + } + if tc.members == nil { + tc.members = system.Members{ + mockMember(t, 1, 2, "joined"), + mockMember(t, 2, 2, "joined"), + mockMember(t, 3, 1, "joined"), + } } if tc.mgmtSvc == nil { - tc.mgmtSvc = newTestMgmtSvc(t, log) + tc.mgmtSvc = mgmtSystemTestSetup(t, log, tc.members, nil) + } else { + for _, m := range tc.members { + if _, err := tc.mgmtSvc.membership.Add(m); err != nil { + t.Fatal(err) + } + } } addTestPoolService(t, tc.mgmtSvc.sysdb, mockPoolService) @@ -1569,12 +1596,6 @@ func TestServer_MgmtSvc_PoolReintegrate(t *testing.T) { tc.reqIn.Sys = build.DefaultSystemName } - _, err := tc.mgmtSvc.membership.Add(system.MockMember(t, 1, - system.MemberStateJoined)) - if err != nil { - t.Fatal(err) - } - gotResp, gotErr := tc.mgmtSvc.PoolReintegrate(test.Context(t), tc.reqIn) test.CmpErr(t, tc.expErr, gotErr) if tc.expErr != nil { @@ -1612,11 +1633,13 @@ func TestServer_MgmtSvc_PoolExclude(t *testing.T) { for name, tc := range map[string]struct { mgmtSvc *mgmtSvc setupMockDrpc func(_ *mgmtSvc, _ error) + nilReq bool req *mgmtpb.PoolExcludeReq expResp *mgmtpb.PoolExcludeResp expErr error }{ "nil request": { + nilReq: true, expErr: errors.New("nil request"), }, "wrong system": { @@ -1625,24 +1648,19 @@ func TestServer_MgmtSvc_PoolExclude(t *testing.T) { }, "missing superblock": { mgmtSvc: missingSB, - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errNotReplica, }, "not MS replica": { mgmtSvc: notAP, - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errNotReplica, }, "dRPC send fails": { - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errors.New("send failure"), }, "zero target count": { - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errors.New("zero target count"), }, "garbage resp": { - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, setupMockDrpc: func(svc *mgmtSvc, err error) { // dRPC call returns junk in the message body badBytes := makeBadBytes(42) @@ -1652,11 +1670,10 @@ func TestServer_MgmtSvc_PoolExclude(t *testing.T) { expErr: errors.New("unmarshal"), }, "missing uuid": { - req: &mgmtpb.PoolExcludeReq{Rank: 2, TargetIdx: []uint32{1, 2}}, + req: &mgmtpb.PoolExcludeReq{Ranks: []uint32{2}, TargetIdx: []uint32{1, 2}}, expErr: errors.New("empty pool id"), }, "successful drained": { - req: &mgmtpb.PoolExcludeReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expResp: &mgmtpb.PoolExcludeResp{}, }, } { @@ -1664,6 +1681,14 @@ func TestServer_MgmtSvc_PoolExclude(t *testing.T) { buf.Reset() defer test.ShowBufferOnFailure(t, buf) + if tc.req == nil && tc.nilReq == false { + tc.req = &mgmtpb.PoolExcludeReq{ + Id: mockUUID, + Ranks: []uint32{1, 2, 3}, + TargetIdx: []uint32{1, 2}, + } + } + if tc.mgmtSvc == nil { tc.mgmtSvc = newTestMgmtSvc(t, log) } @@ -1708,11 +1733,13 @@ func TestServer_MgmtSvc_PoolDrain(t *testing.T) { for name, tc := range map[string]struct { mgmtSvc *mgmtSvc setupMockDrpc func(_ *mgmtSvc, _ error) + nilReq bool req *mgmtpb.PoolDrainReq - expResp *mgmtpb.PoolDrainResp + expResp *mgmtpb.PoolRanksResp expErr error }{ "nil request": { + nilReq: true, expErr: errors.New("nil request"), }, "wrong system": { @@ -1721,24 +1748,19 @@ func TestServer_MgmtSvc_PoolDrain(t *testing.T) { }, "missing superblock": { mgmtSvc: missingSB, - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errNotReplica, }, "not MS replica": { mgmtSvc: notAP, - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errNotReplica, }, "dRPC send fails": { - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errors.New("send failure"), }, "zero target count": { - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, expErr: errors.New("zero target count"), }, "garbage resp": { - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, setupMockDrpc: func(svc *mgmtSvc, err error) { // dRPC call returns junk in the message body badBytes := makeBadBytes(42) @@ -1748,18 +1770,25 @@ func TestServer_MgmtSvc_PoolDrain(t *testing.T) { expErr: errors.New("unmarshal"), }, "missing uuid": { - req: &mgmtpb.PoolDrainReq{Rank: 2, TargetIdx: []uint32{1, 2}}, + req: &mgmtpb.PoolDrainReq{Ranks: []uint32{2}, TargetIdx: []uint32{1, 2}}, expErr: errors.New("empty pool id"), }, "successful drained": { - req: &mgmtpb.PoolDrainReq{Id: mockUUID, Rank: 2, TargetIdx: []uint32{1, 2}}, - expResp: &mgmtpb.PoolDrainResp{}, + expResp: &mgmtpb.PoolRanksResp{}, }, } { t.Run(name, func(t *testing.T) { buf.Reset() defer test.ShowBufferOnFailure(t, buf) + if tc.req == nil && tc.nilReq == false { + tc.req = &mgmtpb.PoolDrainReq{ + Id: mockUUID, + Ranks: []uint32{1, 2, 3}, + TargetIdx: []uint32{1, 2}, + } + } + if tc.mgmtSvc == nil { tc.mgmtSvc = newTestMgmtSvc(t, log) } diff --git a/src/control/server/mgmt_system.go b/src/control/server/mgmt_system.go index 4c8c0ef4c11..79b66919ef9 100644 --- a/src/control/server/mgmt_system.go +++ b/src/control/server/mgmt_system.go @@ -32,8 +32,6 @@ import ( sharedpb "github.com/daos-stack/daos/src/control/common/proto/shared" "github.com/daos-stack/daos/src/control/drpc" "github.com/daos-stack/daos/src/control/events" - "github.com/daos-stack/daos/src/control/fault" - "github.com/daos-stack/daos/src/control/fault/code" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/daos" "github.com/daos-stack/daos/src/control/lib/hostlist" @@ -50,8 +48,6 @@ const ( groupUpdatePauseProp = "group_update_paused" domainLabelsProp = "domain_labels" domainLabelsSep = "=" // invalid in a label name - - msgInvalidRank = "invalid ranks: check rank status" ) // GetAttachInfo handles a request to retrieve a map of ranks to fabric URIs, in addition @@ -1147,44 +1143,13 @@ func (svc *mgmtSvc) getPoolsRanks(ranks *ranklist.RankSet) ([]string, poolRanksM return poolIDs, poolRanks, nil } -func resultsFromPoolRanks(id string, succeeded *ranklist.RankSet, failed poolRanksMap) []*mgmtpb.PoolRankResult { - results := []*mgmtpb.PoolRankResult{} - - // Single result generated for all ranks operated on successfully. - if succeeded.Count() > 0 { - results = append(results, &mgmtpb.PoolRankResult{ - PoolId: id, - Ranks: succeeded.String(), - }) - } - - var msgs []string - for msg := range failed { - msgs = append(msgs, msg) - } - sort.Strings(msgs) - - // Result generated for each failure message rank-group. - for _, msg := range msgs { - results = append(results, &mgmtpb.PoolRankResult{ - // Status already included in error message. - Status: int32(daos.MiscError), - Msg: msg, - PoolId: id, - Ranks: failed[msg].String(), - }) - } - - return results -} - type poolRanksMap map[string]*ranklist.RankSet -type poolRankOpSig func(*mgmtSvc, context.Context, string, string, ranklist.Rank) (int32, error) +type poolRankOpSig func(*mgmtSvc, context.Context, *control.PoolRanksReq) (*control.PoolRanksResp, error) // Generate operation results by iterating through pool's ranks and calling supplied fn on each. -func (svc *mgmtSvc) getPoolRankResults(ctx context.Context, sys string, poolIDs []string, poolRanks poolRanksMap, drpcCall poolRankOpSig) ([]*mgmtpb.PoolRankResult, error) { - results := []*mgmtpb.PoolRankResult{} +func (svc *mgmtSvc) getPoolRankResults(ctx context.Context, sys string, poolIDs []string, poolRanks poolRanksMap, drpcCall poolRankOpSig) ([]*mgmtpb.PoolRanksResult, error) { + results := []*mgmtpb.PoolRanksResult{} for _, id := range poolIDs { rs := poolRanks[id] @@ -1193,81 +1158,84 @@ func (svc *mgmtSvc) getPoolRankResults(ctx context.Context, sys string, poolIDs } svc.log.Tracef("operating on ranks %v on pool %s", rs, id) - succeeded := ranklist.MustCreateRankSet("") - failed := make(poolRanksMap) - - // TODO DAOS-6611: Operate on multiple pool-ranks per call when - // drpc.MethodPool{Drain|Reint} API supports it. - for _, r := range rs.Ranks() { - status, err := drpcCall(svc, ctx, sys, id, r) - - if status == int32(daos.Success) { - succeeded.Add(r) - continue - } + req := &control.PoolRanksReq{ + ID: id, + Ranks: rs.Ranks(), + } + req.Sys = sys - msgErr := err.Error() + resp, err := drpcCall(svc, ctx, req) - // Check fault code to aggregate invalid rank results. - f, ok := errors.Cause(err).(*fault.Fault) - if ok && f.Code == code.ServerPoolInvalidRanks { - msgErr = msgInvalidRank - } + newResults, err := resp.GetResults(err) + if err != nil { + return nil, err + } - // Each rank-drain failure message will produce a single result. - if _, exists := failed[msgErr]; !exists { - failed[msgErr] = ranklist.MustCreateRankSet("") - } - failed[msgErr].Add(r) + pbResults := []*mgmtpb.PoolRanksResult{} + if err := convert.Types(newResults, &pbResults); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", newResults, pbResults) } - results = append(results, resultsFromPoolRanks(id, succeeded, failed)...) - svc.log.Tracef("results %+v", results) + results = append(results, pbResults...) } + svc.log.Tracef("pool-rank results %+v", results) + return results, nil } // Drain rank on a pool by calling over dRPC. Function signature satisfies poolRankOpSig type. -func drainPoolRank(svc *mgmtSvc, ctx context.Context, sys, id string, rank ranklist.Rank) (int32, error) { - pbReq := &mgmtpb.PoolDrainReq{ - Sys: sys, - Rank: rank.Uint32(), - Id: id, +func drainPoolRanks(svc *mgmtSvc, ctx context.Context, req *control.PoolRanksReq) (*control.PoolRanksResp, error) { + pbReq := &mgmtpb.PoolDrainReq{} + if err := convert.Types(req, pbReq); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", req, pbReq) + } + + resp := &control.PoolRanksResp{ + ID: req.ID, + InitialRankset: ranklist.RankSetFromRanks(req.Ranks).String(), } pbResp, err := svc.PoolDrain(ctx, pbReq) if err != nil { - return int32(daos.MiscError), err - } - if pbResp.Status != int32(daos.Success) { - return pbResp.Status, daos.Status(pbResp.Status) + return resp, err } - svc.log.Tracef("pool-drain triggered from system-drain: %+v (req: %+v)", pbResp, pbReq) + svc.log.Tracef("pool-drain triggered from system-drain: (%T: %+v) (%T: %+v)", pbReq, + pbReq, pbResp, pbResp) - return int32(daos.Success), nil + if err := convert.Types(pbResp, resp); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", pbResp, resp) + } + + return resp, nil } // Reint rank on a pool by calling over dRPC. Function signature satisfies poolRankOpSig type. -func reintPoolRank(svc *mgmtSvc, ctx context.Context, sys, id string, rank ranklist.Rank) (int32, error) { - pbReq := &mgmtpb.PoolReintReq{ - Sys: sys, - Rank: rank.Uint32(), - Id: id, +func reintPoolRanks(svc *mgmtSvc, ctx context.Context, req *control.PoolRanksReq) (*control.PoolRanksResp, error) { + pbReq := &mgmtpb.PoolReintReq{} + if err := convert.Types(req, pbReq); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", req, pbReq) + } + + resp := &control.PoolRanksResp{ + ID: req.ID, + InitialRankset: ranklist.RankSetFromRanks(req.Ranks).String(), } pbResp, err := svc.PoolReintegrate(ctx, pbReq) if err != nil { - return int32(daos.MiscError), err - } - if pbResp.Status != int32(daos.Success) { - return pbResp.Status, daos.Status(pbResp.Status) + return resp, err } - svc.log.Tracef("pool-reint triggered from system-reint: %+v (req: %+v)", pbResp, pbReq) + svc.log.Tracef("pool-reint triggered from system-reint: (%T: %+v) (%T: %+v)", pbReq, + pbReq, pbResp, pbResp) + + if err := convert.Types(pbResp, resp); err != nil { + return nil, errors.Wrapf(err, "convert %T->%T", pbResp, resp) + } - return int32(daos.Success), nil + return resp, nil } // SystemDrain marks specified ranks on all pools as being in a drain state. @@ -1294,9 +1262,9 @@ func (svc *mgmtSvc) SystemDrain(ctx context.Context, req *mgmtpb.SystemDrainReq) } // Generate results from dRPC calls. - var opCall poolRankOpSig = drainPoolRank + var opCall poolRankOpSig = drainPoolRanks if req.Reint { - opCall = reintPoolRank + opCall = reintPoolRanks } results, err := svc.getPoolRankResults(ctx, req.Sys, poolIDs, poolRanks, opCall) if err != nil { diff --git a/src/control/server/mgmt_system_test.go b/src/control/server/mgmt_system_test.go index 244dea72437..7278a3e97fa 100644 --- a/src/control/server/mgmt_system_test.go +++ b/src/control/server/mgmt_system_test.go @@ -1832,19 +1832,19 @@ func TestServer_MgmtSvc_SystemExclude(t *testing.T) { } func TestServer_MgmtSvc_SystemDrain(t *testing.T) { - dReq := func(id, rank int) *mgmtpb.PoolDrainReq { + dReq := func(id int32, ranks ...uint32) *mgmtpb.PoolDrainReq { return &mgmtpb.PoolDrainReq{ Sys: "daos_server", - Id: test.MockUUID(int32(id)), - Rank: uint32(rank), + Id: test.MockUUID(id), + Ranks: ranks, SvcRanks: []uint32{0}, } } - rReq := func(id, rank int) *mgmtpb.PoolReintReq { + rReq := func(id int32, ranks ...uint32) *mgmtpb.PoolReintReq { return &mgmtpb.PoolReintReq{ Sys: "daos_server", - Id: test.MockUUID(int32(id)), - Rank: uint32(rank), + Id: test.MockUUID(id), + Ranks: ranks, SvcRanks: []uint32{0}, } } @@ -1852,14 +1852,13 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { for name, tc := range map[string]struct { members system.Members req *mgmtpb.SystemDrainReq - expDrainReqs []*mgmt.PoolDrainReq - expReintReqs []*mgmt.PoolReintReq - drpcResp proto.Message - drpcErr error poolRanks map[string]string useLabels bool - expResp *mgmtpb.SystemDrainResp + expDrainReqs []*mgmt.PoolDrainReq + expReintReqs []*mgmt.PoolReintReq + drpcResps []*mockDrpcResponse // Sequential list of dRPC responses. expErr error + expResp *mgmtpb.SystemDrainResp }{ "nil req": { req: (*mgmtpb.SystemDrainReq)(nil), @@ -1896,7 +1895,7 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "2-5", }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{}, + Results: []*mgmtpb.PoolRanksResult{}, }, }, "matching ranks; multiple pools; no drpc response": { @@ -1905,19 +1904,77 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, + expDrainReqs: []*mgmtpb.PoolDrainReq{ + dReq(1, 0, 1), dReq(2, 1), + }, + expResp: &mgmtpb.SystemDrainResp{ + Results: []*mgmtpb.PoolRanksResult{ + { + Id: test.MockUUID(1), + Ranks: "0-1", + Status: -1025, + Msg: "validate response: dRPC returned no response", + }, + { + Id: test.MockUUID(2), + Ranks: "1", + Status: -1025, + Msg: "validate response: dRPC returned no response", + }, + }, + }, + }, + "matching ranks; multiple pools; empty drpc response": { + req: &mgmtpb.SystemDrainReq{Ranks: "0,1"}, + poolRanks: map[string]string{ + test.MockUUID(1): "0-4", + test.MockUUID(2): "1-7", + }, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{}, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{}, + }, + }, + expDrainReqs: []*mgmtpb.PoolDrainReq{ + dReq(1, 0, 1), + }, + expErr: errors.New("no ranks were operated on"), + }, + "matching ranks; multiple pools; errored drpc response": { + req: &mgmtpb.SystemDrainReq{Ranks: "0,1"}, + poolRanks: map[string]string{ + test.MockUUID(1): "0-4", + test.MockUUID(2): "1-7", + }, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{}, + Error: errNotReplica, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{}, + Error: errNotReplica, + }, + }, + expDrainReqs: []*mgmtpb.PoolDrainReq{ + dReq(1, 0, 1), dReq(2, 1), + }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ + Results: []*mgmtpb.PoolRanksResult{ { - PoolId: test.MockUUID(1), + Id: test.MockUUID(1), Ranks: "0-1", Status: -1025, - Msg: FaultDataPlaneNotStarted.Error(), + Msg: errNotReplica.Error(), }, { - PoolId: test.MockUUID(2), + Id: test.MockUUID(2), Ranks: "1", Status: -1025, - Msg: FaultDataPlaneNotStarted.Error(), + Msg: errNotReplica.Error(), }, }, }, @@ -1928,14 +1985,25 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, - drpcResp: &mgmtpb.PoolDrainResp{}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{0, 1}, + }, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1}, + }, + }, + }, expDrainReqs: []*mgmtpb.PoolDrainReq{ - dReq(1, 0), dReq(1, 1), dReq(2, 1), + dReq(1, 0, 1), dReq(2, 1), }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ - {PoolId: test.MockUUID(1), Ranks: "0-1"}, - {PoolId: test.MockUUID(2), Ranks: "1"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: test.MockUUID(1), Ranks: "0-1"}, + {Id: test.MockUUID(2), Ranks: "1"}, }, }, }, @@ -1949,15 +2017,26 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, - drpcResp: &mgmtpb.PoolDrainResp{}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{0, 1, 2, 3}, + }, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1, 2, 3}, + }, + }, + }, expDrainReqs: []*mgmtpb.PoolDrainReq{ - dReq(1, 0), dReq(1, 1), dReq(1, 2), dReq(1, 3), - dReq(2, 1), dReq(2, 2), dReq(2, 3), + dReq(1, 0, 1, 2, 3), + dReq(2, 1, 2, 3), }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ - {PoolId: test.MockUUID(1), Ranks: "0-3"}, - {PoolId: test.MockUUID(2), Ranks: "1-3"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: test.MockUUID(1), Ranks: "0-3"}, + {Id: test.MockUUID(2), Ranks: "1-3"}, }, }, }, @@ -1972,44 +2051,68 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(2): "1-7", }, useLabels: true, - drpcResp: &mgmtpb.PoolDrainResp{}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{0, 1, 2, 3}, + }, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1, 2, 3}, + }, + }, + }, expDrainReqs: []*mgmtpb.PoolDrainReq{ - dReq(1, 0), dReq(1, 1), dReq(1, 2), dReq(1, 3), - dReq(2, 1), dReq(2, 2), dReq(2, 3), + dReq(1, 0, 1, 2, 3), dReq(2, 1, 2, 3), }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ - {PoolId: "00000001", Ranks: "0-3"}, - {PoolId: "00000002", Ranks: "1-3"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: "00000001", Ranks: "0-3"}, + {Id: "00000002", Ranks: "1-3"}, }, }, }, - "matching ranks; variable states; drpc fails": { - members: system.Members{ - mockMember(t, 2, 0, "errored"), - mockMember(t, 1, 0, "excluded"), + "matching ranks; drpc resp missing failed rank id": { + req: &mgmtpb.SystemDrainReq{Ranks: "1-2"}, + poolRanks: map[string]string{ + test.MockUUID(1): "0-4", }, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + Status: -1, + FailedRank: uint32(ranklist.NilRank), + }, + }, + }, + expDrainReqs: []*mgmtpb.PoolDrainReq{ + dReq(1, 1, 2), + }, + expErr: errors.New("invalid rank returned"), + }, + "matching ranks; drpc fails": { req: &mgmtpb.SystemDrainReq{Ranks: "1-2"}, poolRanks: map[string]string{ test.MockUUID(1): "0-4", - test.MockUUID(2): "1-7", }, - drpcResp: &mgmtpb.PoolDrainResp{Status: -1}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + Status: -1, + FailedRank: 1, + }, + }, + }, expDrainReqs: []*mgmtpb.PoolDrainReq{ - dReq(1, 1), dReq(1, 2), dReq(2, 1), dReq(2, 2), + dReq(1, 1, 2), }, expResp: &mgmtpb.SystemDrainResp{ - Results: []*mgmtpb.PoolRankResult{ + Results: []*mgmtpb.PoolRanksResult{ { - PoolId: test.MockUUID(1), - Ranks: "1-2", - Status: -1025, - Msg: "DER_UNKNOWN(-1): Unknown error code -1", - }, - { - PoolId: test.MockUUID(2), - Ranks: "1-2", - Status: -1025, + Id: test.MockUUID(1), + Ranks: "1", + Status: -1, Msg: "DER_UNKNOWN(-1): Unknown error code -1", }, }, @@ -2024,44 +2127,65 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, - drpcResp: &mgmtpb.PoolReintResp{}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{0, 1}, + }, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1}, + }, + }, + }, expReintReqs: []*mgmtpb.PoolReintReq{ - rReq(1, 0), rReq(1, 1), rReq(2, 1), + rReq(1, 0, 1), rReq(2, 1), }, expResp: &mgmtpb.SystemDrainResp{ Reint: true, - Results: []*mgmtpb.PoolRankResult{ - {PoolId: test.MockUUID(1), Ranks: "0-1"}, - {PoolId: test.MockUUID(2), Ranks: "1"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: test.MockUUID(1), Ranks: "0-1"}, + {Id: test.MockUUID(2), Ranks: "1"}, }, }, }, "reintegrate; matching hosts; multiple pools; pool labels": { req: &mgmtpb.SystemDrainReq{ + Reint: true, // Resolves to ranks 0-3. Hosts: fmt.Sprintf("%s,%s", test.MockHostAddr(1), test.MockHostAddr(2)), - Reint: true, }, poolRanks: map[string]string{ test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, useLabels: true, - drpcResp: &mgmtpb.PoolReintResp{}, + drpcResps: []*mockDrpcResponse{ + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{0, 1, 2, 3}, + }, + }, + &mockDrpcResponse{ + Message: &mgmtpb.PoolRanksResp{ + SuccessRanks: []uint32{1, 2, 3}, + }, + }, + }, expReintReqs: []*mgmtpb.PoolReintReq{ - rReq(1, 0), rReq(1, 1), rReq(1, 2), rReq(1, 3), - rReq(2, 1), rReq(2, 2), rReq(2, 3), + rReq(1, 0, 1, 2, 3), rReq(2, 1, 2, 3), }, expResp: &mgmtpb.SystemDrainResp{ Reint: true, - Results: []*mgmtpb.PoolRankResult{ - {PoolId: "00000001", Ranks: "0-3"}, - {PoolId: "00000002", Ranks: "1-3"}, + Results: []*mgmtpb.PoolRanksResult{ + {Id: "00000001", Ranks: "0-3"}, + {Id: "00000002", Ranks: "1-3"}, }, }, }, - "reintegrate; matching ranks; variable states; drpc failed": { + "reintegrate; matching ranks; some invalid states; drpc not made": { members: system.Members{ // Only ranks in joined states can be reintegrated. mockMember(t, 4, 0, "adminexcluded"), @@ -2077,37 +2201,22 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { test.MockUUID(1): "0-4", test.MockUUID(2): "1-7", }, - drpcResp: &mgmtpb.PoolReintResp{Status: -1}, - expReintReqs: []*mgmtpb.PoolReintReq{ - // dRPC only called for joined rank - rReq(1, 3), rReq(2, 3), - }, + // Reint dRPC not made because of ranks with invalid state in request. + expReintReqs: []*mgmtpb.PoolReintReq{}, expResp: &mgmtpb.SystemDrainResp{ Reint: true, - Results: []*mgmtpb.PoolRankResult{ + Results: []*mgmtpb.PoolRanksResult{ { - PoolId: test.MockUUID(1), - Ranks: "3", + Id: test.MockUUID(1), + Ranks: "1-4", Status: -1025, - Msg: "DER_UNKNOWN(-1): Unknown error code -1", + Msg: "ranks with invalid state: 1,2,4", }, { - PoolId: test.MockUUID(1), - Ranks: "1-2,4", + Id: test.MockUUID(2), + Ranks: "1-4", Status: -1025, - Msg: msgInvalidRank, - }, - { - PoolId: test.MockUUID(2), - Ranks: "3", - Status: -1025, - Msg: "DER_UNKNOWN(-1): Unknown error code -1", - }, - { - PoolId: test.MockUUID(2), - Ranks: "1-2,4", - Status: -1025, - Msg: msgInvalidRank, + Msg: "ranks with invalid state: 1,2,4", }, }, }, @@ -2147,11 +2256,10 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { }) } - var mockDrpc *mockDrpcClient - if tc.drpcResp != nil { - mockDrpc = getMockDrpcClient(tc.drpcResp, tc.drpcErr) - setupSvcDrpcClient(svc, 0, mockDrpc) - } + dcc := new(mockDrpcClientConfig) + dcc.setSendMsgResponseList(t, tc.drpcResps...) + mdc := newMockDrpcClient(dcc) + setupSvcDrpcClient(svc, 0, mdc) if tc.req != nil && tc.req.Sys == "" { tc.req.Sys = build.DefaultSystemName @@ -2159,23 +2267,28 @@ func TestServer_MgmtSvc_SystemDrain(t *testing.T) { gotResp, gotErr := svc.SystemDrain(test.MustLogContext(t, log), tc.req) test.CmpErr(t, tc.expErr, gotErr) - if tc.expErr != nil { - return - } - - cmpOpts := []cmp.Option{ - cmpopts.IgnoreUnexported(mgmtpb.SystemDrainResp{}, - mgmtpb.PoolRankResult{}), - } - if diff := cmp.Diff(tc.expResp, gotResp, cmpOpts...); diff != "" { - t.Fatalf("unexpected response (-want, +got):\n%s\n", diff) - } - if mockDrpc == nil { - return + if tc.expErr == nil { + cmpOpts := []cmp.Option{ + cmpopts.IgnoreUnexported(mgmtpb.SystemDrainResp{}, + mgmtpb.PoolRanksResult{}), + cmpopts.IgnoreFields(mgmtpb.PoolRanksResult{}, + "Msg"), + } + if diff := cmp.Diff(tc.expResp, gotResp, cmpOpts...); diff != "" { + t.Fatalf("unexpected response (-want, +got):\n%s\n", diff) + } + // Compare errors are approximate. + for i, r := range tc.expResp.Results { + if r.Msg != "" { + e1 := errors.New(r.Msg) + e2 := errors.New(gotResp.Results[i].Msg) + test.CmpErr(t, e1, e2) + } + } } - gotDrpcCalls := mockDrpc.calls.get() + gotDrpcCalls := mdc.calls.get() nrDrpcCalls := len(gotDrpcCalls) nrDrainReqs := len(tc.expDrainReqs) diff --git a/src/mgmt/pool.pb-c.c b/src/mgmt/pool.pb-c.c index d63497fbee4..37e7b302bef 100644 --- a/src/mgmt/pool.pb-c.c +++ b/src/mgmt/pool.pb-c.c @@ -412,49 +412,42 @@ void mgmt__pool_drain_req__free_unpacked assert(message->base.descriptor == &mgmt__pool_drain_req__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } -void mgmt__pool_drain_resp__init - (Mgmt__PoolDrainResp *message) +void +mgmt__pool_ranks_resp__init(Mgmt__PoolRanksResp *message) { - static const Mgmt__PoolDrainResp init_value = MGMT__POOL_DRAIN_RESP__INIT; - *message = init_value; + static const Mgmt__PoolRanksResp init_value = MGMT__POOL_RANKS_RESP__INIT; + *message = init_value; } -size_t mgmt__pool_drain_resp__get_packed_size - (const Mgmt__PoolDrainResp *message) +size_t +mgmt__pool_ranks_resp__get_packed_size(const Mgmt__PoolRanksResp *message) { - assert(message->base.descriptor == &mgmt__pool_drain_resp__descriptor); - return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); + assert(message->base.descriptor == &mgmt__pool_ranks_resp__descriptor); + return protobuf_c_message_get_packed_size((const ProtobufCMessage *)(message)); } -size_t mgmt__pool_drain_resp__pack - (const Mgmt__PoolDrainResp *message, - uint8_t *out) +size_t +mgmt__pool_ranks_resp__pack(const Mgmt__PoolRanksResp *message, uint8_t *out) { - assert(message->base.descriptor == &mgmt__pool_drain_resp__descriptor); - return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); + assert(message->base.descriptor == &mgmt__pool_ranks_resp__descriptor); + return protobuf_c_message_pack((const ProtobufCMessage *)message, out); } -size_t mgmt__pool_drain_resp__pack_to_buffer - (const Mgmt__PoolDrainResp *message, - ProtobufCBuffer *buffer) +size_t +mgmt__pool_ranks_resp__pack_to_buffer(const Mgmt__PoolRanksResp *message, ProtobufCBuffer *buffer) { - assert(message->base.descriptor == &mgmt__pool_drain_resp__descriptor); - return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); + assert(message->base.descriptor == &mgmt__pool_ranks_resp__descriptor); + return protobuf_c_message_pack_to_buffer((const ProtobufCMessage *)message, buffer); } -Mgmt__PoolDrainResp * - mgmt__pool_drain_resp__unpack - (ProtobufCAllocator *allocator, - size_t len, - const uint8_t *data) +Mgmt__PoolRanksResp * +mgmt__pool_ranks_resp__unpack(ProtobufCAllocator *allocator, size_t len, const uint8_t *data) { - return (Mgmt__PoolDrainResp *) - protobuf_c_message_unpack (&mgmt__pool_drain_resp__descriptor, - allocator, len, data); + return (Mgmt__PoolRanksResp *)protobuf_c_message_unpack(&mgmt__pool_ranks_resp__descriptor, + allocator, len, data); } -void mgmt__pool_drain_resp__free_unpacked - (Mgmt__PoolDrainResp *message, - ProtobufCAllocator *allocator) +void +mgmt__pool_ranks_resp__free_unpacked(Mgmt__PoolRanksResp *message, ProtobufCAllocator *allocator) { if(!message) return; - assert(message->base.descriptor == &mgmt__pool_drain_resp__descriptor); + assert(message->base.descriptor == &mgmt__pool_ranks_resp__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } void mgmt__pool_extend_req__init @@ -585,44 +578,6 @@ mgmt__pool_reint_req__free_unpacked(Mgmt__PoolReintReq *message, ProtobufCAlloca assert(message->base.descriptor == &mgmt__pool_reint_req__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } -void -mgmt__pool_reint_resp__init(Mgmt__PoolReintResp *message) -{ - static const Mgmt__PoolReintResp init_value = MGMT__POOL_REINT_RESP__INIT; - *message = init_value; -} -size_t -mgmt__pool_reint_resp__get_packed_size(const Mgmt__PoolReintResp *message) -{ - assert(message->base.descriptor == &mgmt__pool_reint_resp__descriptor); - return protobuf_c_message_get_packed_size((const ProtobufCMessage *)(message)); -} -size_t -mgmt__pool_reint_resp__pack(const Mgmt__PoolReintResp *message, uint8_t *out) -{ - assert(message->base.descriptor == &mgmt__pool_reint_resp__descriptor); - return protobuf_c_message_pack((const ProtobufCMessage *)message, out); -} -size_t -mgmt__pool_reint_resp__pack_to_buffer(const Mgmt__PoolReintResp *message, ProtobufCBuffer *buffer) -{ - assert(message->base.descriptor == &mgmt__pool_reint_resp__descriptor); - return protobuf_c_message_pack_to_buffer((const ProtobufCMessage *)message, buffer); -} -Mgmt__PoolReintResp * -mgmt__pool_reint_resp__unpack(ProtobufCAllocator *allocator, size_t len, const uint8_t *data) -{ - return (Mgmt__PoolReintResp *)protobuf_c_message_unpack(&mgmt__pool_reint_resp__descriptor, - allocator, len, data); -} -void -mgmt__pool_reint_resp__free_unpacked(Mgmt__PoolReintResp *message, ProtobufCAllocator *allocator) -{ - if(!message) - return; - assert(message->base.descriptor == &mgmt__pool_reint_resp__descriptor); - protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); -} void mgmt__list_pools_req__init (Mgmt__ListPoolsReq *message) { @@ -2095,75 +2050,42 @@ const ProtobufCMessageDescriptor mgmt__pool_evict_resp__descriptor = (ProtobufCMessageInit) mgmt__pool_evict_resp__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__pool_exclude_req__field_descriptors[5] = -{ - { - "sys", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolExcludeReq, sys), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "id", - 2, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolExcludeReq, id), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "rank", - 3, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_UINT32, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolExcludeReq, rank), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "target_idx", - 4, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__PoolExcludeReq, n_target_idx), - offsetof(Mgmt__PoolExcludeReq, target_idx), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "svc_ranks", - 5, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__PoolExcludeReq, n_svc_ranks), - offsetof(Mgmt__PoolExcludeReq, svc_ranks), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, +static const ProtobufCFieldDescriptor mgmt__pool_exclude_req__field_descriptors[5] = { + { + "sys", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolExcludeReq, sys), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "id", 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolExcludeReq, id), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "ranks", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolExcludeReq, n_ranks), offsetof(Mgmt__PoolExcludeReq, ranks), NULL, NULL, + 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "target_idx", 4, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolExcludeReq, n_target_idx), offsetof(Mgmt__PoolExcludeReq, target_idx), + NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "svc_ranks", 5, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolExcludeReq, n_svc_ranks), offsetof(Mgmt__PoolExcludeReq, svc_ranks), + NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; static const unsigned mgmt__pool_exclude_req__field_indices_by_name[] = { - 1, /* field[1] = id */ - 2, /* field[2] = rank */ - 4, /* field[4] = svc_ranks */ - 0, /* field[0] = sys */ - 3, /* field[3] = target_idx */ + 1, /* field[1] = id */ + 2, /* field[2] = ranks */ + 4, /* field[4] = svc_ranks */ + 0, /* field[0] = sys */ + 3, /* field[3] = target_idx */ }; static const ProtobufCIntRange mgmt__pool_exclude_req__number_ranges[1 + 1] = { @@ -2223,75 +2145,42 @@ const ProtobufCMessageDescriptor mgmt__pool_exclude_resp__descriptor = (ProtobufCMessageInit) mgmt__pool_exclude_resp__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__pool_drain_req__field_descriptors[5] = -{ - { - "sys", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolDrainReq, sys), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "id", - 2, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolDrainReq, id), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "rank", - 3, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_UINT32, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolDrainReq, rank), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "target_idx", - 4, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__PoolDrainReq, n_target_idx), - offsetof(Mgmt__PoolDrainReq, target_idx), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "svc_ranks", - 5, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__PoolDrainReq, n_svc_ranks), - offsetof(Mgmt__PoolDrainReq, svc_ranks), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, +static const ProtobufCFieldDescriptor mgmt__pool_drain_req__field_descriptors[5] = { + { + "sys", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolDrainReq, sys), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "id", 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolDrainReq, id), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "ranks", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolDrainReq, n_ranks), offsetof(Mgmt__PoolDrainReq, ranks), NULL, NULL, + 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "target_idx", 4, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolDrainReq, n_target_idx), offsetof(Mgmt__PoolDrainReq, target_idx), NULL, + NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "svc_ranks", 5, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolDrainReq, n_svc_ranks), offsetof(Mgmt__PoolDrainReq, svc_ranks), NULL, + NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; static const unsigned mgmt__pool_drain_req__field_indices_by_name[] = { - 1, /* field[1] = id */ - 2, /* field[2] = rank */ - 4, /* field[4] = svc_ranks */ - 0, /* field[0] = sys */ - 3, /* field[3] = target_idx */ + 1, /* field[1] = id */ + 2, /* field[2] = ranks */ + 4, /* field[4] = svc_ranks */ + 0, /* field[0] = sys */ + 3, /* field[3] = target_idx */ }; static const ProtobufCIntRange mgmt__pool_drain_req__number_ranges[1 + 1] = { @@ -2313,43 +2202,46 @@ const ProtobufCMessageDescriptor mgmt__pool_drain_req__descriptor = (ProtobufCMessageInit) mgmt__pool_drain_req__init, NULL,NULL,NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__pool_drain_resp__field_descriptors[1] = -{ - { - "status", +static const ProtobufCFieldDescriptor mgmt__pool_ranks_resp__field_descriptors[3] = { + { + "status", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_INT32, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolRanksResp, status), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "failed_rank", 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_UINT32, 0, /* quantifier_offset */ + offsetof(Mgmt__PoolRanksResp, failed_rank), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "success_ranks", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolRanksResp, n_success_ranks), + offsetof(Mgmt__PoolRanksResp, success_ranks), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, +}; +static const unsigned mgmt__pool_ranks_resp__field_indices_by_name[] = { + 1, /* field[1] = failed_rank */ + 0, /* field[0] = status */ + 2, /* field[2] = success_ranks */ +}; +static const ProtobufCIntRange mgmt__pool_ranks_resp__number_ranges[1 + 1] = {{1, 0}, {0, 3}}; +const ProtobufCMessageDescriptor mgmt__pool_ranks_resp__descriptor = { + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "mgmt.PoolRanksResp", + "PoolRanksResp", + "Mgmt__PoolRanksResp", + "mgmt", + sizeof(Mgmt__PoolRanksResp), + 3, + mgmt__pool_ranks_resp__field_descriptors, + mgmt__pool_ranks_resp__field_indices_by_name, 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_INT32, - 0, /* quantifier_offset */ - offsetof(Mgmt__PoolDrainResp, status), + mgmt__pool_ranks_resp__number_ranges, + (ProtobufCMessageInit)mgmt__pool_ranks_resp__init, NULL, NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, -}; -static const unsigned mgmt__pool_drain_resp__field_indices_by_name[] = { - 0, /* field[0] = status */ -}; -static const ProtobufCIntRange mgmt__pool_drain_resp__number_ranges[1 + 1] = -{ - { 1, 0 }, - { 0, 1 } -}; -const ProtobufCMessageDescriptor mgmt__pool_drain_resp__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.PoolDrainResp", - "PoolDrainResp", - "Mgmt__PoolDrainResp", - "mgmt", - sizeof(Mgmt__PoolDrainResp), - 1, - mgmt__pool_drain_resp__field_descriptors, - mgmt__pool_drain_resp__field_indices_by_name, - 1, mgmt__pool_drain_resp__number_ranges, - (ProtobufCMessageInit) mgmt__pool_drain_resp__init, - NULL,NULL,NULL /* reserved[123] */ + NULL /* reserved[123] */ }; static const ProtobufCFieldDescriptor mgmt__pool_extend_req__field_descriptors[7] = { @@ -2503,20 +2395,22 @@ static const ProtobufCIntRange mgmt__pool_extend_resp__number_ranges[1 + 1] = { 1, 0 }, { 0, 2 } }; -const ProtobufCMessageDescriptor mgmt__pool_extend_resp__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.PoolExtendResp", - "PoolExtendResp", - "Mgmt__PoolExtendResp", - "mgmt", - sizeof(Mgmt__PoolExtendResp), - 2, - mgmt__pool_extend_resp__field_descriptors, - mgmt__pool_extend_resp__field_indices_by_name, - 1, mgmt__pool_extend_resp__number_ranges, - (ProtobufCMessageInit) mgmt__pool_extend_resp__init, - NULL,NULL,NULL /* reserved[123] */ +const ProtobufCMessageDescriptor mgmt__pool_extend_resp__descriptor = { + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "mgmt.PoolExtendResp", + "PoolExtendResp", + "Mgmt__PoolExtendResp", + "mgmt", + sizeof(Mgmt__PoolExtendResp), + 2, + mgmt__pool_extend_resp__field_descriptors, + mgmt__pool_extend_resp__field_indices_by_name, + 1, + mgmt__pool_extend_resp__number_ranges, + (ProtobufCMessageInit)mgmt__pool_extend_resp__init, + NULL, + NULL, + NULL /* reserved[123] */ }; static const ProtobufCFieldDescriptor mgmt__pool_reint_req__field_descriptors[7] = { { @@ -2530,9 +2424,10 @@ static const ProtobufCFieldDescriptor mgmt__pool_reint_req__field_descriptors[7] 0, NULL, NULL /* reserved1,reserved2, etc */ }, { - "rank", 3, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_UINT32, 0, /* quantifier_offset */ - offsetof(Mgmt__PoolReintReq, rank), NULL, NULL, 0, /* flags */ - 0, NULL, NULL /* reserved1,reserved2, etc */ + "ranks", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__PoolReintReq, n_ranks), offsetof(Mgmt__PoolReintReq, ranks), NULL, NULL, + 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ }, { "target_idx", 4, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, @@ -2561,7 +2456,7 @@ static const ProtobufCFieldDescriptor mgmt__pool_reint_req__field_descriptors[7] static const unsigned mgmt__pool_reint_req__field_indices_by_name[] = { 1, /* field[1] = id */ 6, /* field[6] = mem_ratio */ - 2, /* field[2] = rank */ + 2, /* field[2] = ranks */ 4, /* field[4] = svc_ranks */ 0, /* field[0] = sys */ 3, /* field[3] = target_idx */ @@ -2585,304 +2480,177 @@ const ProtobufCMessageDescriptor mgmt__pool_reint_req__descriptor = { NULL, NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__pool_reint_resp__field_descriptors[1] = { +static const ProtobufCFieldDescriptor mgmt__list_pools_req__field_descriptors[1] = { { - "status", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_INT32, 0, /* quantifier_offset */ - offsetof(Mgmt__PoolReintResp, status), NULL, NULL, 0, /* flags */ - 0, NULL, NULL /* reserved1,reserved2, etc */ + "sys", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsReq, sys), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ }, }; -static const unsigned mgmt__pool_reint_resp__field_indices_by_name[] = { - 0, /* field[0] = status */ +static const unsigned mgmt__list_pools_req__field_indices_by_name[] = { + 0, /* field[0] = sys */ }; -static const ProtobufCIntRange mgmt__pool_reint_resp__number_ranges[1 + 1] = {{1, 0}, {0, 1}}; -const ProtobufCMessageDescriptor mgmt__pool_reint_resp__descriptor = { +static const ProtobufCIntRange mgmt__list_pools_req__number_ranges[1 + 1] = {{1, 0}, {0, 1}}; +const ProtobufCMessageDescriptor mgmt__list_pools_req__descriptor = { PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.PoolReintResp", - "PoolReintResp", - "Mgmt__PoolReintResp", + "mgmt.ListPoolsReq", + "ListPoolsReq", + "Mgmt__ListPoolsReq", "mgmt", - sizeof(Mgmt__PoolReintResp), + sizeof(Mgmt__ListPoolsReq), 1, - mgmt__pool_reint_resp__field_descriptors, - mgmt__pool_reint_resp__field_indices_by_name, + mgmt__list_pools_req__field_descriptors, + mgmt__list_pools_req__field_indices_by_name, 1, - mgmt__pool_reint_resp__number_ranges, - (ProtobufCMessageInit)mgmt__pool_reint_resp__init, + mgmt__list_pools_req__number_ranges, + (ProtobufCMessageInit)mgmt__list_pools_req__init, NULL, NULL, NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__list_pools_req__field_descriptors[1] = -{ - { - "sys", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsReq, sys), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, -}; -static const unsigned mgmt__list_pools_req__field_indices_by_name[] = { - 0, /* field[0] = sys */ -}; -static const ProtobufCIntRange mgmt__list_pools_req__number_ranges[1 + 1] = -{ - { 1, 0 }, - { 0, 1 } -}; -const ProtobufCMessageDescriptor mgmt__list_pools_req__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.ListPoolsReq", - "ListPoolsReq", - "Mgmt__ListPoolsReq", - "mgmt", - sizeof(Mgmt__ListPoolsReq), - 1, - mgmt__list_pools_req__field_descriptors, - mgmt__list_pools_req__field_indices_by_name, - 1, mgmt__list_pools_req__number_ranges, - (ProtobufCMessageInit) mgmt__list_pools_req__init, - NULL,NULL,NULL /* reserved[123] */ +static const ProtobufCFieldDescriptor mgmt__list_pools_resp__pool__field_descriptors[5] = { + { + "uuid", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp__Pool, uuid), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "label", 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp__Pool, label), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "svc_reps", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__ListPoolsResp__Pool, n_svc_reps), + offsetof(Mgmt__ListPoolsResp__Pool, svc_reps), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "state", 4, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp__Pool, state), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "rebuild_state", 5, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, + 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp__Pool, rebuild_state), NULL, &protobuf_c_empty_string, + 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; -static const ProtobufCFieldDescriptor mgmt__list_pools_resp__pool__field_descriptors[5] = -{ - { - "uuid", +static const unsigned mgmt__list_pools_resp__pool__field_indices_by_name[] = { + 1, /* field[1] = label */ + 4, /* field[4] = rebuild_state */ + 3, /* field[3] = state */ + 2, /* field[2] = svc_reps */ + 0, /* field[0] = uuid */ +}; +static const ProtobufCIntRange mgmt__list_pools_resp__pool__number_ranges[1 + 1] = {{1, 0}, {0, 5}}; +const ProtobufCMessageDescriptor mgmt__list_pools_resp__pool__descriptor = { + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "mgmt.ListPoolsResp.Pool", + "Pool", + "Mgmt__ListPoolsResp__Pool", + "mgmt", + sizeof(Mgmt__ListPoolsResp__Pool), + 5, + mgmt__list_pools_resp__pool__field_descriptors, + mgmt__list_pools_resp__pool__field_indices_by_name, 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp__Pool, uuid), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "label", - 2, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp__Pool, label), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "svc_reps", - 3, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__ListPoolsResp__Pool, n_svc_reps), - offsetof(Mgmt__ListPoolsResp__Pool, svc_reps), + mgmt__list_pools_resp__pool__number_ranges, + (ProtobufCMessageInit)mgmt__list_pools_resp__pool__init, NULL, NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "state", - 4, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp__Pool, state), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "rebuild_state", - 5, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp__Pool, rebuild_state), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, -}; -static const unsigned mgmt__list_pools_resp__pool__field_indices_by_name[] = { - 1, /* field[1] = label */ - 4, /* field[4] = rebuild_state */ - 3, /* field[3] = state */ - 2, /* field[2] = svc_reps */ - 0, /* field[0] = uuid */ + NULL /* reserved[123] */ }; -static const ProtobufCIntRange mgmt__list_pools_resp__pool__number_ranges[1 + 1] = -{ - { 1, 0 }, - { 0, 5 } +static const ProtobufCFieldDescriptor mgmt__list_pools_resp__field_descriptors[3] = { + { + "status", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_INT32, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp, status), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "pools", 2, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_MESSAGE, + offsetof(Mgmt__ListPoolsResp, n_pools), offsetof(Mgmt__ListPoolsResp, pools), + &mgmt__list_pools_resp__pool__descriptor, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "data_version", 3, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_UINT64, 0, /* quantifier_offset */ + offsetof(Mgmt__ListPoolsResp, data_version), NULL, NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; -const ProtobufCMessageDescriptor mgmt__list_pools_resp__pool__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.ListPoolsResp.Pool", - "Pool", - "Mgmt__ListPoolsResp__Pool", - "mgmt", - sizeof(Mgmt__ListPoolsResp__Pool), - 5, - mgmt__list_pools_resp__pool__field_descriptors, - mgmt__list_pools_resp__pool__field_indices_by_name, - 1, mgmt__list_pools_resp__pool__number_ranges, - (ProtobufCMessageInit) mgmt__list_pools_resp__pool__init, - NULL,NULL,NULL /* reserved[123] */ +static const unsigned mgmt__list_pools_resp__field_indices_by_name[] = { + 2, /* field[2] = data_version */ + 1, /* field[1] = pools */ + 0, /* field[0] = status */ }; -static const ProtobufCFieldDescriptor mgmt__list_pools_resp__field_descriptors[3] = -{ - { - "status", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_INT32, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp, status), - NULL, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "pools", - 2, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_MESSAGE, - offsetof(Mgmt__ListPoolsResp, n_pools), - offsetof(Mgmt__ListPoolsResp, pools), - &mgmt__list_pools_resp__pool__descriptor, - NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "data_version", +static const ProtobufCIntRange mgmt__list_pools_resp__number_ranges[1 + 1] = {{1, 0}, {0, 3}}; +const ProtobufCMessageDescriptor mgmt__list_pools_resp__descriptor = { + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "mgmt.ListPoolsResp", + "ListPoolsResp", + "Mgmt__ListPoolsResp", + "mgmt", + sizeof(Mgmt__ListPoolsResp), 3, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_UINT64, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListPoolsResp, data_version), + mgmt__list_pools_resp__field_descriptors, + mgmt__list_pools_resp__field_indices_by_name, + 1, + mgmt__list_pools_resp__number_ranges, + (ProtobufCMessageInit)mgmt__list_pools_resp__init, NULL, NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, -}; -static const unsigned mgmt__list_pools_resp__field_indices_by_name[] = { - 2, /* field[2] = data_version */ - 1, /* field[1] = pools */ - 0, /* field[0] = status */ + NULL /* reserved[123] */ }; -static const ProtobufCIntRange mgmt__list_pools_resp__number_ranges[1 + 1] = -{ - { 1, 0 }, - { 0, 3 } +static const ProtobufCFieldDescriptor mgmt__list_cont_req__field_descriptors[3] = { + { + "sys", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListContReq, sys), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "id", 2, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListContReq, id), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, + { + "svc_ranks", 3, PROTOBUF_C_LABEL_REPEATED, PROTOBUF_C_TYPE_UINT32, + offsetof(Mgmt__ListContReq, n_svc_ranks), offsetof(Mgmt__ListContReq, svc_ranks), NULL, + NULL, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; -const ProtobufCMessageDescriptor mgmt__list_pools_resp__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.ListPoolsResp", - "ListPoolsResp", - "Mgmt__ListPoolsResp", - "mgmt", - sizeof(Mgmt__ListPoolsResp), - 3, - mgmt__list_pools_resp__field_descriptors, - mgmt__list_pools_resp__field_indices_by_name, - 1, mgmt__list_pools_resp__number_ranges, - (ProtobufCMessageInit) mgmt__list_pools_resp__init, - NULL,NULL,NULL /* reserved[123] */ +static const unsigned mgmt__list_cont_req__field_indices_by_name[] = { + 1, /* field[1] = id */ + 2, /* field[2] = svc_ranks */ + 0, /* field[0] = sys */ }; -static const ProtobufCFieldDescriptor mgmt__list_cont_req__field_descriptors[3] = -{ - { - "sys", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListContReq, sys), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "id", - 2, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListContReq, id), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, - { - "svc_ranks", +static const ProtobufCIntRange mgmt__list_cont_req__number_ranges[1 + 1] = {{1, 0}, {0, 3}}; +const ProtobufCMessageDescriptor mgmt__list_cont_req__descriptor = { + PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, + "mgmt.ListContReq", + "ListContReq", + "Mgmt__ListContReq", + "mgmt", + sizeof(Mgmt__ListContReq), 3, - PROTOBUF_C_LABEL_REPEATED, - PROTOBUF_C_TYPE_UINT32, - offsetof(Mgmt__ListContReq, n_svc_ranks), - offsetof(Mgmt__ListContReq, svc_ranks), + mgmt__list_cont_req__field_descriptors, + mgmt__list_cont_req__field_indices_by_name, + 1, + mgmt__list_cont_req__number_ranges, + (ProtobufCMessageInit)mgmt__list_cont_req__init, NULL, NULL, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, -}; -static const unsigned mgmt__list_cont_req__field_indices_by_name[] = { - 1, /* field[1] = id */ - 2, /* field[2] = svc_ranks */ - 0, /* field[0] = sys */ -}; -static const ProtobufCIntRange mgmt__list_cont_req__number_ranges[1 + 1] = -{ - { 1, 0 }, - { 0, 3 } -}; -const ProtobufCMessageDescriptor mgmt__list_cont_req__descriptor = -{ - PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, - "mgmt.ListContReq", - "ListContReq", - "Mgmt__ListContReq", - "mgmt", - sizeof(Mgmt__ListContReq), - 3, - mgmt__list_cont_req__field_descriptors, - mgmt__list_cont_req__field_indices_by_name, - 1, mgmt__list_cont_req__number_ranges, - (ProtobufCMessageInit) mgmt__list_cont_req__init, - NULL,NULL,NULL /* reserved[123] */ + NULL /* reserved[123] */ }; -static const ProtobufCFieldDescriptor mgmt__list_cont_resp__cont__field_descriptors[1] = -{ - { - "uuid", - 1, - PROTOBUF_C_LABEL_NONE, - PROTOBUF_C_TYPE_STRING, - 0, /* quantifier_offset */ - offsetof(Mgmt__ListContResp__Cont, uuid), - NULL, - &protobuf_c_empty_string, - 0, /* flags */ - 0,NULL,NULL /* reserved1,reserved2, etc */ - }, +static const ProtobufCFieldDescriptor mgmt__list_cont_resp__cont__field_descriptors[1] = { + { + "uuid", 1, PROTOBUF_C_LABEL_NONE, PROTOBUF_C_TYPE_STRING, 0, /* quantifier_offset */ + offsetof(Mgmt__ListContResp__Cont, uuid), NULL, &protobuf_c_empty_string, 0, /* flags */ + 0, NULL, NULL /* reserved1,reserved2, etc */ + }, }; static const unsigned mgmt__list_cont_resp__cont__field_indices_by_name[] = { 0, /* field[0] = uuid */ diff --git a/src/mgmt/pool.pb-c.h b/src/mgmt/pool.pb-c.h index edd5c37ac31..0501f6ff8f1 100644 --- a/src/mgmt/pool.pb-c.h +++ b/src/mgmt/pool.pb-c.h @@ -24,11 +24,10 @@ typedef struct _Mgmt__PoolEvictResp Mgmt__PoolEvictResp; typedef struct _Mgmt__PoolExcludeReq Mgmt__PoolExcludeReq; typedef struct _Mgmt__PoolExcludeResp Mgmt__PoolExcludeResp; typedef struct _Mgmt__PoolDrainReq Mgmt__PoolDrainReq; -typedef struct _Mgmt__PoolDrainResp Mgmt__PoolDrainResp; +typedef struct _Mgmt__PoolRanksResp Mgmt__PoolRanksResp; typedef struct _Mgmt__PoolExtendReq Mgmt__PoolExtendReq; typedef struct _Mgmt__PoolExtendResp Mgmt__PoolExtendResp; typedef struct _Mgmt__PoolReintReq Mgmt__PoolReintReq; -typedef struct _Mgmt__PoolReintResp Mgmt__PoolReintResp; typedef struct _Mgmt__ListPoolsReq Mgmt__ListPoolsReq; typedef struct _Mgmt__ListPoolsResp Mgmt__ListPoolsResp; typedef struct _Mgmt__ListPoolsResp__Pool Mgmt__ListPoolsResp__Pool; @@ -380,15 +379,16 @@ struct _Mgmt__PoolExcludeReq */ char *sys; /* - * uuid or label of pool to exclude some targets + * uuid or label of pool to exclude some targets on each selected rank */ - char *id; + char *id; /* - * target to move to the down state + * Ranks to operate on */ - uint32_t rank; + size_t n_ranks; + uint32_t *ranks; /* - * target ranks + * Targets to move to the down state on each selected rank */ size_t n_target_idx; uint32_t *target_idx; @@ -398,10 +398,16 @@ struct _Mgmt__PoolExcludeReq size_t n_svc_ranks; uint32_t *svc_ranks; }; -#define MGMT__POOL_EXCLUDE_REQ__INIT \ - { PROTOBUF_C_MESSAGE_INIT (&mgmt__pool_exclude_req__descriptor) \ - , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, 0, 0,NULL, 0,NULL } - +#define MGMT__POOL_EXCLUDE_REQ__INIT \ + {PROTOBUF_C_MESSAGE_INIT(&mgmt__pool_exclude_req__descriptor), \ + (char *)protobuf_c_empty_string, \ + (char *)protobuf_c_empty_string, \ + 0, \ + NULL, \ + 0, \ + NULL, \ + 0, \ + NULL} /* * PoolExcludeResp returns resultant state of Exclude operation. @@ -430,15 +436,16 @@ struct _Mgmt__PoolDrainReq */ char *sys; /* - * uuid or label of pool to drain some targets + * uuid or label of pool to drain some targets on each selected rank */ char *id; /* - * rank to move to the down state + * Ranks to operate on */ - uint32_t rank; + size_t n_ranks; + uint32_t *ranks; /* - * rank targets + * Targets to move to the drain state on each selected rank */ size_t n_target_idx; uint32_t *target_idx; @@ -448,26 +455,38 @@ struct _Mgmt__PoolDrainReq size_t n_svc_ranks; uint32_t *svc_ranks; }; -#define MGMT__POOL_DRAIN_REQ__INIT \ - { PROTOBUF_C_MESSAGE_INIT (&mgmt__pool_drain_req__descriptor) \ - , (char *)protobuf_c_empty_string, (char *)protobuf_c_empty_string, 0, 0,NULL, 0,NULL } - +#define MGMT__POOL_DRAIN_REQ__INIT \ + {PROTOBUF_C_MESSAGE_INIT(&mgmt__pool_drain_req__descriptor), \ + (char *)protobuf_c_empty_string, \ + (char *)protobuf_c_empty_string, \ + 0, \ + NULL, \ + 0, \ + NULL, \ + 0, \ + NULL} /* - * PoolDrainResp returns resultant state of Drain operation. + * PoolRanksResp returns response from operation on multiple pool-ranks. */ -struct _Mgmt__PoolDrainResp -{ - ProtobufCMessage base; - /* - * DAOS error code - */ - int32_t status; +struct _Mgmt__PoolRanksResp { + ProtobufCMessage base; + /* + * DAOS error code for failed rank attempt + */ + int32_t status; + /* + * Rank ID that failed operation + */ + uint32_t failed_rank; + /* + * Pool-ranks that were successfully operated on + */ + size_t n_success_ranks; + uint32_t *success_ranks; }; -#define MGMT__POOL_DRAIN_RESP__INIT \ - { PROTOBUF_C_MESSAGE_INIT (&mgmt__pool_drain_resp__descriptor) \ - , 0 } - +#define MGMT__POOL_RANKS_RESP__INIT \ + {PROTOBUF_C_MESSAGE_INIT(&mgmt__pool_ranks_resp__descriptor), 0, 0, 0, NULL} /* * PoolExtendReq supplies pool identifier and rank list. @@ -484,7 +503,7 @@ struct _Mgmt__PoolExtendReq */ char *id; /* - * ranks + * Ranks to operate on */ size_t n_ranks; uint32_t *ranks; @@ -543,15 +562,16 @@ struct _Mgmt__PoolReintReq { */ char *sys; /* - * uuid or label of pool to add target up to + * uuid or label of pool to reintegrate some targets on each selected rank */ char *id; /* - * target to move to the up state + * Ranks to operate on */ - uint32_t rank; + size_t n_ranks; + uint32_t *ranks; /* - * target ranks + * Targets to move to the reintegrate state on each rank */ size_t n_target_idx; uint32_t *target_idx; @@ -575,6 +595,7 @@ struct _Mgmt__PoolReintReq { (char *)protobuf_c_empty_string, \ (char *)protobuf_c_empty_string, \ 0, \ + NULL, \ 0, \ NULL, \ 0, \ @@ -583,18 +604,6 @@ struct _Mgmt__PoolReintReq { NULL, \ 0} -/* - * PoolReintResp returns resultant state of reintegrate operation. - */ -struct _Mgmt__PoolReintResp { - ProtobufCMessage base; - /* - * DAOS error code - */ - int32_t status; -}; -#define MGMT__POOL_REINT_RESP__INIT {PROTOBUF_C_MESSAGE_INIT(&mgmt__pool_reint_resp__descriptor), 0} - /* * ListPoolsReq represents a request to list pools on a given DAOS system. */ @@ -1326,25 +1335,19 @@ Mgmt__PoolDrainReq * void mgmt__pool_drain_req__free_unpacked (Mgmt__PoolDrainReq *message, ProtobufCAllocator *allocator); -/* Mgmt__PoolDrainResp methods */ -void mgmt__pool_drain_resp__init - (Mgmt__PoolDrainResp *message); -size_t mgmt__pool_drain_resp__get_packed_size - (const Mgmt__PoolDrainResp *message); -size_t mgmt__pool_drain_resp__pack - (const Mgmt__PoolDrainResp *message, - uint8_t *out); -size_t mgmt__pool_drain_resp__pack_to_buffer - (const Mgmt__PoolDrainResp *message, - ProtobufCBuffer *buffer); -Mgmt__PoolDrainResp * - mgmt__pool_drain_resp__unpack - (ProtobufCAllocator *allocator, - size_t len, - const uint8_t *data); -void mgmt__pool_drain_resp__free_unpacked - (Mgmt__PoolDrainResp *message, - ProtobufCAllocator *allocator); +/* Mgmt__PoolRanksResp methods */ +void +mgmt__pool_ranks_resp__init(Mgmt__PoolRanksResp *message); +size_t +mgmt__pool_ranks_resp__get_packed_size(const Mgmt__PoolRanksResp *message); +size_t +mgmt__pool_ranks_resp__pack(const Mgmt__PoolRanksResp *message, uint8_t *out); +size_t +mgmt__pool_ranks_resp__pack_to_buffer(const Mgmt__PoolRanksResp *message, ProtobufCBuffer *buffer); +Mgmt__PoolRanksResp * +mgmt__pool_ranks_resp__unpack(ProtobufCAllocator *allocator, size_t len, const uint8_t *data); +void +mgmt__pool_ranks_resp__free_unpacked(Mgmt__PoolRanksResp *message, ProtobufCAllocator *allocator); /* Mgmt__PoolExtendReq methods */ void mgmt__pool_extend_req__init (Mgmt__PoolExtendReq *message); @@ -1396,19 +1399,6 @@ Mgmt__PoolReintReq * mgmt__pool_reint_req__unpack(ProtobufCAllocator *allocator, size_t len, const uint8_t *data); void mgmt__pool_reint_req__free_unpacked(Mgmt__PoolReintReq *message, ProtobufCAllocator *allocator); -/* Mgmt__PoolReintResp methods */ -void -mgmt__pool_reint_resp__init(Mgmt__PoolReintResp *message); -size_t -mgmt__pool_reint_resp__get_packed_size(const Mgmt__PoolReintResp *message); -size_t -mgmt__pool_reint_resp__pack(const Mgmt__PoolReintResp *message, uint8_t *out); -size_t -mgmt__pool_reint_resp__pack_to_buffer(const Mgmt__PoolReintResp *message, ProtobufCBuffer *buffer); -Mgmt__PoolReintResp * -mgmt__pool_reint_resp__unpack(ProtobufCAllocator *allocator, size_t len, const uint8_t *data); -void -mgmt__pool_reint_resp__free_unpacked(Mgmt__PoolReintResp *message, ProtobufCAllocator *allocator); /* Mgmt__ListPoolsReq methods */ void mgmt__list_pools_req__init (Mgmt__ListPoolsReq *message); @@ -1805,9 +1795,7 @@ typedef void (*Mgmt__PoolExcludeResp_Closure) typedef void (*Mgmt__PoolDrainReq_Closure) (const Mgmt__PoolDrainReq *message, void *closure_data); -typedef void (*Mgmt__PoolDrainResp_Closure) - (const Mgmt__PoolDrainResp *message, - void *closure_data); +typedef void (*Mgmt__PoolRanksResp_Closure)(const Mgmt__PoolRanksResp *message, void *closure_data); typedef void (*Mgmt__PoolExtendReq_Closure) (const Mgmt__PoolExtendReq *message, void *closure_data); @@ -1815,7 +1803,6 @@ typedef void (*Mgmt__PoolExtendResp_Closure) (const Mgmt__PoolExtendResp *message, void *closure_data); typedef void (*Mgmt__PoolReintReq_Closure)(const Mgmt__PoolReintReq *message, void *closure_data); -typedef void (*Mgmt__PoolReintResp_Closure)(const Mgmt__PoolReintResp *message, void *closure_data); typedef void (*Mgmt__ListPoolsReq_Closure) (const Mgmt__ListPoolsReq *message, void *closure_data); @@ -1896,11 +1883,10 @@ extern const ProtobufCMessageDescriptor mgmt__pool_evict_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_exclude_req__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_exclude_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_drain_req__descriptor; -extern const ProtobufCMessageDescriptor mgmt__pool_drain_resp__descriptor; +extern const ProtobufCMessageDescriptor mgmt__pool_ranks_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_extend_req__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_extend_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__pool_reint_req__descriptor; -extern const ProtobufCMessageDescriptor mgmt__pool_reint_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__list_pools_req__descriptor; extern const ProtobufCMessageDescriptor mgmt__list_pools_resp__descriptor; extern const ProtobufCMessageDescriptor mgmt__list_pools_resp__pool__descriptor; diff --git a/src/mgmt/srv_drpc.c b/src/mgmt/srv_drpc.c index 5918d45dc78..9aed7ed51b9 100644 --- a/src/mgmt/srv_drpc.c +++ b/src/mgmt/srv_drpc.c @@ -778,7 +778,7 @@ ds_mgmt_drpc_pool_exclude(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) D_GOTO(out, rc = -DER_NOMEM); rc = pool_change_target_state(req->id, svc_ranks, req->n_target_idx, req->target_idx, - req->rank, PO_COMP_ST_DOWN, 0 /* scm_size */, + req->ranks[0], PO_COMP_ST_DOWN, 0 /* scm_size */, 0 /* nvme_size */, 0 /* meta_size */); d_rank_list_free(svc_ranks); @@ -801,16 +801,23 @@ ds_mgmt_drpc_pool_exclude(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) void ds_mgmt_drpc_pool_drain(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) { - struct drpc_alloc alloc = PROTO_ALLOCATOR_INIT(alloc); + struct drpc_alloc alloc = PROTO_ALLOCATOR_INIT(alloc); Mgmt__PoolDrainReq *req = NULL; - Mgmt__PoolDrainResp resp; + Mgmt__PoolDrainResp resp; + d_rank_list_t *tgt_ranks = NULL; d_rank_list_t *svc_ranks = NULL; + d_rank_list_t *drained_ranks = NULL; uint8_t *body; - size_t len; - int rc; + size_t len; + int rc; + int i; + int j = 0; mgmt__pool_drain_resp__init(&resp); + /* UINT32_MAX/CRT_NO_RANK indicates nil rank in daos_{,io_}server */ + resp.failed_rank = CRT_NO_RANK; + /* Unpack the inner request from the drpc call body */ req = mgmt__pool_drain_req__unpack(&alloc.alloc, drpc_req->body.len, @@ -822,16 +829,41 @@ ds_mgmt_drpc_pool_drain(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) return; } + tgt_ranks = uint32_array_to_rank_list(req->ranks, req->n_ranks); + if (tgt_ranks == NULL) + D_GOTO(out, rc = -DER_NOMEM); + svc_ranks = uint32_array_to_rank_list(req->svc_ranks, req->n_svc_ranks); if (svc_ranks == NULL) - D_GOTO(out, rc = -DER_NOMEM); + D_GOTO(out_tgt, rc = -DER_NOMEM); - rc = pool_change_target_state(req->id, svc_ranks, req->n_target_idx, req->target_idx, - req->rank, PO_COMP_ST_DRAIN, 0 /* scm_size */, - 0 /* nvme_size */, 0 /* meta_size */); + drained_ranks = d_rank_list_alloc(req->n_ranks); + if (drained_ranks == NULL) + D_GOTO(out_svc, rc = -DER_NOMEM); - d_rank_list_free(svc_ranks); + for (i = 0; i < req->n_ranks; i++) { + rc = pool_change_target_state( + req->id, svc_ranks, req->n_target_idx, req->target_idx, req->ranks[i], + PO_COMP_ST_DRAIN, 0 /* scm_size */, 0 /* nvme_size */, 0 /* meta_size */); + if (rc != 0) { + resp.failed_rank = req->ranks[i]; + goto out_list; + } + + D_ASSERT(j < drained_ranks->rl_nr); + drained_ranks->rl_ranks[j++] = req->ranks[i]; + } +out_list: + rc = rank_list_to_uint32_array(drained_ranks, &resp.drained_ranks, &resp.n_drained_ranks); + if (rc != 0) { + D_ERROR("Failed to convert enabled target rank list: rc=%d\n", rc); + } + d_rank_list_free(drained_ranks); +out_svc: + d_rank_list_free(svc_ranks); +out_tgt: + d_rank_list_free(tgt_ranks); out: resp.status = rc; len = mgmt__pool_drain_resp__get_packed_size(&resp); @@ -974,7 +1006,7 @@ ds_mgmt_drpc_pool_reintegrate(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) D_GOTO(out, rc = -DER_NOMEM); rc = pool_change_target_state(req->id, svc_ranks, req->n_target_idx, req->target_idx, - req->rank, PO_COMP_ST_UP, scm_bytes, nvme_bytes, + req->ranks[0], PO_COMP_ST_UP, scm_bytes, nvme_bytes, req->tier_bytes[DAOS_MEDIA_SCM] /* meta_size */); d_rank_list_free(svc_ranks); diff --git a/src/mgmt/tests/srv_drpc_tests.c b/src/mgmt/tests/srv_drpc_tests.c index 02a99a08b5c..81509b2e735 100644 --- a/src/mgmt/tests/srv_drpc_tests.c +++ b/src/mgmt/tests/srv_drpc_tests.c @@ -1789,7 +1789,8 @@ setup_exclude_drpc_call(Drpc__Call *call, char *uuid, uint32_t rank) req.id = uuid; req.n_target_idx = 3; - req.rank = rank; + req.n_ranks = 3; + req.ranks = TEST_RANKS; req.target_idx = TEST_IDXS; pack_pool_exclude_req(call, &req); } @@ -1894,7 +1895,8 @@ setup_drain_drpc_call(Drpc__Call *call, char *uuid, uint32_t rank) req.id = uuid; req.n_target_idx = 3; - req.rank = rank; + req.n_ranks = 3; + req.ranks = TEST_RANKS; req.target_idx = TEST_IDXS; pack_pool_drain_req(call, &req); } diff --git a/src/proto/mgmt/mgmt.proto b/src/proto/mgmt/mgmt.proto index 7cf6bcd0c52..1d9040f31b9 100644 --- a/src/proto/mgmt/mgmt.proto +++ b/src/proto/mgmt/mgmt.proto @@ -43,11 +43,11 @@ service MgmtSvc { // Exclude a pool target. rpc PoolExclude(PoolExcludeReq) returns (PoolExcludeResp) {} // Drain a pool target. - rpc PoolDrain(PoolDrainReq) returns (PoolDrainResp) {} + rpc PoolDrain(PoolDrainReq) returns (PoolRanksResp) {} // Extend a pool. rpc PoolExtend(PoolExtendReq) returns (PoolExtendResp) {} // Reintegrate a pool target. - rpc PoolReintegrate(PoolReintReq) returns (PoolReintResp) {} + rpc PoolReintegrate(PoolReintReq) returns (PoolRanksResp) {} // PoolQuery queries a DAOS pool. rpc PoolQuery(PoolQueryReq) returns (PoolQueryResp) {} // PoolQueryTarget queries a DAOS storage target. diff --git a/src/proto/mgmt/pool.proto b/src/proto/mgmt/pool.proto index df92588499b..72d0a509b28 100644 --- a/src/proto/mgmt/pool.proto +++ b/src/proto/mgmt/pool.proto @@ -80,9 +80,9 @@ message PoolEvictResp { // PoolExcludeReq supplies pool identifier, rank, and target_idxs. message PoolExcludeReq { string sys = 1; // DAOS system identifier - string id = 2; // uuid or label of pool to exclude some targets - uint32 rank = 3; // target to move to the down state - repeated uint32 target_idx = 4; // target ranks + string id = 2; // uuid or label of pool to exclude some targets on each selected rank + repeated uint32 ranks = 3; // Ranks to operate on + repeated uint32 target_idx = 4; // Targets to move to the down state on each selected rank repeated uint32 svc_ranks = 5; // List of pool service ranks } @@ -94,22 +94,25 @@ message PoolExcludeResp { // PoolDrainReq supplies pool identifier, rank, and target_idxs. message PoolDrainReq { string sys = 1; // DAOS system identifier - string id = 2; // uuid or label of pool to drain some targets - uint32 rank = 3; // rank to move to the down state - repeated uint32 target_idx = 4; // rank targets + string id = 2; // uuid or label of pool to drain some targets on each selected rank + repeated uint32 ranks = 3; // Ranks to operate on + repeated uint32 target_idx = 4; // Targets to move to the drain state on each selected rank repeated uint32 svc_ranks = 5; // List of pool service ranks } -// PoolDrainResp returns resultant state of Drain operation. -message PoolDrainResp { - int32 status = 1; // DAOS error code +// PoolRanksResp returns response from operation on multiple pool-ranks. +message PoolRanksResp +{ + int32 status = 1; // DAOS error code for failed rank attempt + uint32 failed_rank = 2; // Rank ID that failed operation + repeated uint32 success_ranks = 3; // Pool-ranks that were successfully operated on } // PoolExtendReq supplies pool identifier and rank list. message PoolExtendReq { string sys = 1; // DAOS system identifier string id = 2; // uuid or label of pool to add target up to - repeated uint32 ranks = 3; // ranks + repeated uint32 ranks = 3; // Ranks to operate on repeated uint32 svc_ranks = 4; // List of pool service ranks repeated uint64 tier_bytes = 5; // Size in bytes of storage tiers repeated uint32 fault_domains = 6; // fault domain tree, minimal format @@ -123,21 +126,17 @@ message PoolExtendResp { } // PoolReintReq supplies pool identifier, rank, and target_idxs. -message PoolReintReq { +message PoolReintReq +{ string sys = 1; // DAOS system identifier - string id = 2; // uuid or label of pool to add target up to - uint32 rank = 3; // target to move to the up state - repeated uint32 target_idx = 4; // target ranks + string id = 2; // uuid or label of pool to reintegrate some targets on each selected rank + repeated uint32 ranks = 3; // Ranks to operate on + repeated uint32 target_idx = 4; // Targets to move to the reintegrate state on each rank repeated uint32 svc_ranks = 5; // List of pool service ranks repeated uint64 tier_bytes = 6; // Size in bytes of storage tiers float mem_ratio = 7; // Fraction of meta-blob-sz to use as mem-file-sz } -// PoolReintResp returns resultant state of reintegrate operation. -message PoolReintResp { - int32 status = 1; // DAOS error code -} - // ListPoolsReq represents a request to list pools on a given DAOS system. message ListPoolsReq { string sys = 1; // DAOS system identifier diff --git a/src/proto/mgmt/system.proto b/src/proto/mgmt/system.proto index f5954975b6c..442972aac68 100644 --- a/src/proto/mgmt/system.proto +++ b/src/proto/mgmt/system.proto @@ -79,13 +79,13 @@ message SystemExcludeResp { repeated shared.RankResult results = 1; } -// Results for system OSA calls on multiple pool-ranks. -message PoolRankResult +// Results for operations on multiple pool-ranks. +message PoolRanksResult { int32 status = 1; // Status of the OSA operation on a specific pool string msg = 2; // Error message if status indicates an error - string pool_id = 3; // Label or uuid of pool - string ranks = 4; // Rank-set that has encountered this result + string id = 3; // Label or uuid of pool + string ranks = 4; // rankset that has encountered this result } // SystemDrainReq supplies system-drain parameters. @@ -101,7 +101,7 @@ message SystemDrainReq message SystemDrainResp { bool reint = 1; // Flag to indicate if results are for drain or reint. - repeated PoolRankResult results = 2; // Results for drain or reint calls on pool-ranks. + repeated PoolRanksResult results = 2; // Results for drain or reint calls on pool-ranks. } // SystemQueryReq supplies system query parameters. diff --git a/src/tests/ftest/deployment/network_failure.py b/src/tests/ftest/deployment/network_failure.py index 84c7b259370..9bce449dc5a 100644 --- a/src/tests/ftest/deployment/network_failure.py +++ b/src/tests/ftest/deployment/network_failure.py @@ -1,5 +1,6 @@ """ (C) Copyright 2022-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -234,7 +235,7 @@ def verify_network_failure(self, ior_namespace, container_namespace): # 7. Call dmg pool reintegrate one rank at a time to enable all ranks. self.log_step("Reintegrate one rank at a time to enable all ranks.") for disabled_rank in disabled_ranks: - self.pool.reintegrate(rank=disabled_rank) + self.pool.reintegrate(ranks=disabled_rank) self.pool.wait_for_rebuild_to_start(interval=5) self.pool.wait_for_rebuild_to_end(interval=10) diff --git a/src/tests/ftest/deployment/server_rank_failure.py b/src/tests/ftest/deployment/server_rank_failure.py index d1637fe54eb..a1216c686e3 100644 --- a/src/tests/ftest/deployment/server_rank_failure.py +++ b/src/tests/ftest/deployment/server_rank_failure.py @@ -1,5 +1,6 @@ """ (C) Copyright 2022-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -191,7 +192,7 @@ def verify_rank_failure(self, ior_namespace): for disabled_rank in disabled_ranks: while True: try: - self.pool.reintegrate(rank=disabled_rank) + self.pool.reintegrate(ranks=disabled_rank) break except CommandFailure as error: self.log.debug("## pool reintegrate error: %s", error) diff --git a/src/tests/ftest/deployment/target_failure.py b/src/tests/ftest/deployment/target_failure.py index 77419b4608a..f4d01217763 100644 --- a/src/tests/ftest/deployment/target_failure.py +++ b/src/tests/ftest/deployment/target_failure.py @@ -1,5 +1,6 @@ """ (C) Copyright 2022-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -134,10 +135,10 @@ def verify_failure_with_protection(self, ior_namespace): self.log.info("Reintegrate rank 1 target 1") # Reintegrate one target and wait for rebuild to finish before reintegrating the # next one. - self.pool.reintegrate(rank="1", tgt_idx="1") + self.pool.reintegrate(ranks="1", tgt_idx="1") self.pool.measure_rebuild_time(operation="Reintegrate rank 1 -> target 1", interval=5) self.log.info("Reintegrate rank 0 target 1") - self.pool.reintegrate(rank="0", tgt_idx="1") + self.pool.reintegrate(ranks="0", tgt_idx="1") self.pool.measure_rebuild_time(operation="Reintegrate rank 0 -> target 1", interval=5) self.container.set_prop(prop='status', value="healthy") @@ -242,7 +243,7 @@ def test_target_failure_wo_rf(self): errors.append("Container health isn't UNCLEAN after first IOR!") # 6. Reintegrate the excluded target. - self.pool.reintegrate(rank="1", tgt_idx="0") + self.pool.reintegrate(ranks="1", tgt_idx="0") self.pool.measure_rebuild_time(operation="Reintegrate 1 target", interval=5) self.container.set_prop(prop='status', value="healthy") @@ -385,7 +386,7 @@ def test_target_failure_parallel(self): # 7. Reintegrate the excluded target. self.log.info("Reintegrate target") - self.pool[excluded_pool_num].reintegrate(rank="1", tgt_idx="0") + self.pool[excluded_pool_num].reintegrate(ranks="1", tgt_idx="0") self.pool[excluded_pool_num].measure_rebuild_time( operation="Reintegrate 1 target", interval=5) diff --git a/src/tests/ftest/util/dmg_utils.py b/src/tests/ftest/util/dmg_utils.py index cbca403895a..cf7e3515dc1 100644 --- a/src/tests/ftest/util/dmg_utils.py +++ b/src/tests/ftest/util/dmg_utils.py @@ -1,5 +1,6 @@ """ (C) Copyright 2018-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -888,13 +889,13 @@ def pool_get_prop(self, pool, name=None): """ return self._get_json_result(("pool", "get-prop"), pool=pool, name=name) - def pool_exclude(self, pool, rank, tgt_idx=None): + def pool_exclude(self, pool, ranks, tgt_idx=None): """Exclude a daos_server from the pool. Args: pool (str): Pool uuid. - rank (int): Rank of the daos_server to exclude - tgt_idx (int): target to be excluded from the pool + ranks (int): Ranks of the daos_server to exclude + tgt_idx (int): target to be excluded from each pool Returns: CmdResult: Object that contains exit status, stdout, and other @@ -905,7 +906,7 @@ def pool_exclude(self, pool, rank, tgt_idx=None): """ return self._get_result( - ("pool", "exclude"), pool=pool, rank=rank, tgt_idx=tgt_idx) + ("pool", "exclude"), pool=pool, ranks=ranks, tgt_idx=tgt_idx) def pool_extend(self, pool, ranks): """Extend the daos_server pool. @@ -925,13 +926,13 @@ def pool_extend(self, pool, ranks): return self._get_result( ("pool", "extend"), pool=pool, ranks=ranks) - def pool_drain(self, pool, rank, tgt_idx=None): + def pool_drain(self, pool, ranks, tgt_idx=None): """Drain a daos_server from the pool. Args: pool (str): Pool uuid. - rank (int): Rank of the daos_server to drain - tgt_idx (int): target to be excluded from the pool + ranks (int): Ranks of the daos_server to drain + tgt_idx (int): target to be drained from each pool Returns: CmdResult: Object that contains exit status, stdout, and other @@ -942,15 +943,15 @@ def pool_drain(self, pool, rank, tgt_idx=None): """ return self._get_result( - ("pool", "drain"), pool=pool, rank=rank, tgt_idx=tgt_idx) + ("pool", "drain"), pool=pool, ranks=ranks, tgt_idx=tgt_idx) - def pool_reintegrate(self, pool, rank, tgt_idx=None): + def pool_reintegrate(self, pool, ranks, tgt_idx=None): """Reintegrate a daos_server to the pool. Args: pool (str): Pool uuid. - rank (int): Rank of the daos_server to reintegrate - tgt_idx (int): target to be reintegrated to the pool + ranks (int): Ranks of the daos_server to reintegrate + tgt_idx (int): target to be reintegrated from each pool Returns: CmdResult: Object that contains exit status, stdout, and other @@ -961,7 +962,7 @@ def pool_reintegrate(self, pool, rank, tgt_idx=None): """ return self._get_result( - ("pool", "reintegrate"), pool=pool, rank=rank, tgt_idx=tgt_idx) + ("pool", "reintegrate"), pool=pool, ranks=ranks, tgt_idx=tgt_idx) def cont_set_owner(self, pool, cont, user=None, group=None): """Dmg container set-owner to the specified new user/group. diff --git a/src/tests/ftest/util/dmg_utils_base.py b/src/tests/ftest/util/dmg_utils_base.py index 4787c05d393..8d8e97f8fe5 100644 --- a/src/tests/ftest/util/dmg_utils_base.py +++ b/src/tests/ftest/util/dmg_utils_base.py @@ -1,5 +1,6 @@ """ (C) Copyright 2020-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -457,7 +458,7 @@ def __init__(self): """Create a dmg pool drain command object.""" super().__init__("/run/dmg/pool/drain/*", "drain") self.pool = BasicParameter(None, position=1) - self.rank = FormattedParameter("--rank={}", None) + self.ranks = FormattedParameter("--ranks={}", None) self.tgt_idx = FormattedParameter("--target-idx={}", None) class EvictSubCommand(CommandWithParameters): @@ -475,7 +476,7 @@ def __init__(self): """Create a dmg pool exclude command object.""" super().__init__("/run/dmg/pool/exclude/*", "exclude") self.pool = BasicParameter(None, position=1) - self.rank = FormattedParameter("--rank={}", None) + self.ranks = FormattedParameter("--ranks={}", None) self.tgt_idx = FormattedParameter("--target-idx={}", None) class ExtendSubCommand(CommandWithParameters): @@ -552,7 +553,7 @@ def __init__(self): """Create a dmg pool reintegrate command object.""" super().__init__("/run/dmg/pool/reintegrate/*", "reintegrate") self.pool = BasicParameter(None, position=1) - self.rank = FormattedParameter("--rank={}", None) + self.ranks = FormattedParameter("--ranks={}", None) self.tgt_idx = FormattedParameter("--target-idx={}", None) class SetPropSubCommand(CommandWithParameters): From 577695ed9196396910199a7189c0a67067706bd9 Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 14:31:57 -0500 Subject: [PATCH 2/8] DAOS-4753 test: Initial tests for validating multiple ranks. Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks Skip-unit-tests: true Summary: - Add new OSA tests to validate multiple ranks support via dmg commands. - Updates are done for drain and reintegration tests (offline/online) Signed-off-by: rpadma2 --- src/tests/ftest/osa/offline_drain.py | 542 ++++++----- src/tests/ftest/osa/offline_drain.yaml | 217 ++--- src/tests/ftest/osa/online_drain.py | 17 + src/tests/ftest/osa/online_drain.yaml | 3 + src/tests/ftest/util/osa_utils.py | 890 +++++++++--------- utils/cq/check_update_copyright.sh | 2 +- utils/githooks/find_base.sh | 3 +- utils/githooks/git-version.sh | 7 +- utils/githooks/hook_base.sh | 7 +- utils/githooks/pre-commit | 5 +- .../pre-commit.d/10-update-copyright.sh | 3 +- utils/githooks/pre-commit.d/20-codespell.sh | 3 +- utils/githooks/pre-commit.d/30-Jenkinsfile.sh | 3 +- utils/githooks/pre-commit.d/40-yamllint.sh | 3 +- .../githooks/pre-commit.d/50-clang-format.sh | 9 +- utils/githooks/pre-commit.d/60-gofmt.sh | 3 +- utils/githooks/pre-commit.d/70-isort.sh | 13 +- utils/githooks/pre-commit.d/71-flake.sh | 3 +- utils/githooks/pre-commit.d/72-pylint.sh | 3 +- utils/githooks/pre-commit.d/73-ftest.sh | 3 +- utils/githooks/prepare-commit-msg | 5 +- utils/rpms/packaging/get_release_branch | 16 +- 22 files changed, 913 insertions(+), 847 deletions(-) diff --git a/src/tests/ftest/osa/offline_drain.py b/src/tests/ftest/osa/offline_drain.py index f24ea91016a..9a5cde4bfec 100644 --- a/src/tests/ftest/osa/offline_drain.py +++ b/src/tests/ftest/osa/offline_drain.py @@ -1,260 +1,282 @@ -""" - (C) Copyright 2020-2023 Intel Corporation. - - SPDX-License-Identifier: BSD-2-Clause-Patent -""" -import random - -from nvme_utils import ServerFillUp -from osa_utils import OSAUtils -from test_utils_pool import add_pool -from write_host_file import write_host_file - - -class OSAOfflineDrain(OSAUtils, ServerFillUp): - # pylint: disable=too-many-ancestors - """ - Test Class Description: This test runs - daos_server offline drain test cases. - - :avocado: recursive - """ - - def setUp(self): - """Set up for test case.""" - super().setUp() - self.dmg_command = self.get_dmg_command() - self.ranks = self.params.get("rank_list", '/run/test_ranks/*') - self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') - self.ior_test_sequence = self.params.get( - "ior_test_sequence", '/run/ior/iorflags/*') - # Recreate the client hostfile without slots defined - self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) - - def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0): - """Run the offline drain without data. - - Args: - num_pool (int) : total pools to create for testing purposes. - data (bool) : whether pool has no data or to create some data in pool. - Defaults to False. - oclass (str): DAOS object class (eg: RP_2G1,etc) - """ - # Create a pool - pool = {} - target_list = [] - - if oclass is None: - oclass = self.ior_cmd.dfs_oclass.value - - # Exclude target : random two targets (target idx : 0-7) - exc = random.randint(0, 6) # nosec - target_list.append(exc) - target_list.append(exc + 1) - t_string = "{},{}".format(target_list[0], target_list[1]) - - for val in range(0, num_pool): - pool[val] = add_pool(self, connect=False) - self.pool = pool[val] - self.pool.set_property("reclaim", "disabled") - test_seq = self.ior_test_sequence[0] - - if data: - # if pool_fillup is greater than 0, then - # use start_ior_load method from nvme_utils.py. - # Otherwise, use the osa_utils.py run_ior_thread - # method. - if pool_fillup > 0: - self.ior_cmd.dfs_oclass.update(oclass) - self.ior_cmd.dfs_dir_oclass.update(oclass) - self.ior_default_flags = self.ior_w_flags - self.log.info(self.pool.pool_percentage_used()) - self.start_ior_load(storage='NVMe', operation="Auto_Write", percent=pool_fillup) - self.log.info(self.pool.pool_percentage_used()) - else: - self.run_ior_thread("Write", oclass, test_seq) - self.run_mdtest_thread(oclass) - if self.test_with_snapshot is True: - # Create a snapshot of the container - # after IOR job completes. - self.container.create_snap() - self.log.info("Created container snapshot: %s", self.container.epoch) - if self.test_during_aggregation is True: - self.run_ior_thread("Write", oclass, test_seq) - - # Drain ranks and targets - for val in range(0, num_pool): - # Drain ranks provided in YAML file - for index, rank in enumerate(self.ranks): - self.pool = pool[val] - # If we are testing using multiple pools, reintegrate - # the rank back and then drain. - self.pool.display_pool_daos_space("Pool space: Beginning") - # Get initial total free space (scm+nvme) - initial_total_space = self.pool.get_total_space(refresh=True) - pver_begin = self.pool.get_version(True) - self.log.info("Pool Version at the beginning %s", pver_begin) - if self.test_during_aggregation is True and index == 0: - self.pool.set_property("reclaim", "time") - self.delete_extra_container(self.pool) - self.simple_osa_reintegrate_loop(rank=rank, action="drain") - if (self.test_during_rebuild is True and val == 0): - # Exclude rank 3 - self.pool.exclude([3]) - self.pool.wait_for_rebuild_to_start() - # If the pool is filled up just drain only a single rank. - if pool_fillup > 0 and index > 0: - continue - output = self.pool.drain(rank, t_string) - self.print_and_assert_on_rebuild_failure(output) - total_space_after_drain = self.pool.get_total_space(refresh=True) - - pver_drain = self.pool.get_version(True) - self.log.info("Pool Version after drain %d", pver_drain) - # Check pool version incremented after pool drain - self.assertGreater(pver_drain, (pver_begin + 1), - "Pool Version Error: After drain") - if self.test_during_aggregation is False: - self.assertGreater(initial_total_space, total_space_after_drain, - "Expected total space after drain is more than initial") - if num_pool > 1: - output = self.pool.reintegrate(rank, t_string) - self.print_and_assert_on_rebuild_failure(output) - total_space_after_reintegration = self.pool.get_total_space(refresh=True) - self.assertGreater( - total_space_after_reintegration, total_space_after_drain, - "Expected total space after reintegration is less than drain") - if (self.test_during_rebuild is True and val == 0): - # Reintegrate rank 3 - output = self.pool.reintegrate("3") - self.print_and_assert_on_rebuild_failure(output) - total_space_after_reintegration = self.pool.get_total_space(refresh=True) - self.assertGreater( - total_space_after_reintegration, total_space_after_drain, - "Expected total space after reintegration is less than drain") - - for val in range(0, num_pool): - display_string = "Pool{} space at the End".format(val) - pool[val].display_pool_daos_space(display_string) - if data: - if pool_fillup > 0: - self.start_ior_load(storage='NVMe', operation='Auto_Read', percent=pool_fillup) - else: - self.run_ior_thread("Read", oclass, test_seq) - self.run_mdtest_thread(oclass) - self.container = self.pool_cont_dict[self.pool][0] - self.container.daos.env['UCX_LOG_LEVEL'] = 'error' - self.container.check() - - def test_osa_offline_drain(self): - """JIRA ID: DAOS-4750. - - Test Description: Validate Offline Drain - - :avocado: tags=all,pr,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum,ior - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain - """ - self.log.info("Offline Drain : Basic Drain") - self.run_offline_drain_test(1, True) - - def test_osa_offline_drain_without_checksum(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain without enabling checksum in container properties. - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_without_checksum - """ - self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") - self.log.info("Offline Drain : Without Checksum") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_during_aggregation(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain during aggregation - - :avocado: tags=all,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_aggregation - """ - self.test_during_aggregation = self.params.get( - "test_with_aggregation", "/run/aggregation/*") - self.log.info("Offline Drain : During Aggregation") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_oclass(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain with different object class - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_oclass - """ - self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") - self.log.info("Offline Drain : Oclass") - for oclass in self.test_oclass: - self.run_offline_drain_test(1, data=True, oclass=oclass) - - def test_osa_offline_drain_multiple_pools(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain with multiple pools - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_multiple_pools - """ - self.log.info("Offline Drain : Multiple Pools") - self.run_offline_drain_test(2, data=True) - - def test_osa_offline_drain_during_rebuild(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain during rebuild - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,rebuild - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_rebuild - """ - self.test_during_rebuild = self.params.get("test_with_rebuild", "/run/rebuild/*") - self.log.info("Offline Drain : During Rebuild") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_after_snapshot(self): - """Test ID: DAOS-8057. - - Test Description: Validate Offline Drain after taking snapshot. - - :avocado: tags=all,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_after_snapshot - """ - self.test_with_snapshot = self.params.get("test_with_snapshot", "/run/snapshot/*") - self.log.info("Offline Drain : After taking snapshot") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_with_less_pool_space(self): - """Test ID: DAOS-7160. - - Test Description: Drain rank after with less pool space. - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_less_pool_space - """ - self.log.info("Offline Drain : Test with less pool space") - oclass = self.params.get("pool_test_oclass", '/run/pool_capacity/*') - pool_fillup = self.params.get("pool_fillup", '/run/pool_capacity/*') - self.run_offline_drain_test(1, data=True, oclass=oclass, pool_fillup=pool_fillup) +""" + (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +import random + +from nvme_utils import ServerFillUp +from osa_utils import OSAUtils +from test_utils_pool import add_pool +from write_host_file import write_host_file + + +class OSAOfflineDrain(OSAUtils, ServerFillUp): + # pylint: disable=too-many-ancestors + """ + Test Class Description: This test runs + daos_server offline drain test cases. + + :avocado: recursive + """ + + def setUp(self): + """Set up for test case.""" + super().setUp() + self.dmg_command = self.get_dmg_command() + self.ranks = self.params.get("rank_list", '/run/test_ranks/*') + self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') + self.ior_test_sequence = self.params.get( + "ior_test_sequence", '/run/ior/iorflags/*') + # Recreate the client hostfile without slots defined + self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) + self.multiple_ranks = None + + def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0): + """Run the offline drain without data. + + Args: + num_pool (int) : total pools to create for testing purposes. + data (bool) : whether pool has no data or to create some data in pool. + Defaults to False. + oclass (str): DAOS object class (eg: RP_2G1,etc) + """ + # Create a pool + pool = {} + target_list = [] + + if oclass is None: + oclass = self.ior_cmd.dfs_oclass.value + + # For testing with multiple ranks as dmg parameters, use a list of ranks. + if self.test_with_multiple_ranks is True: + self.ranks = self.multiple_ranks + + # Exclude target : random two targets (target idx : 0-7) + exc = random.randint(0, 6) # nosec + target_list.append(exc) + target_list.append(exc + 1) + t_string = "{},{}".format(target_list[0], target_list[1]) + + for val in range(0, num_pool): + pool[val] = add_pool(self, connect=False) + self.pool = pool[val] + self.pool.set_property("reclaim", "disabled") + test_seq = self.ior_test_sequence[0] + + if data: + # if pool_fillup is greater than 0, then + # use start_ior_load method from nvme_utils.py. + # Otherwise, use the osa_utils.py run_ior_thread + # method. + if pool_fillup > 0: + self.ior_cmd.dfs_oclass.update(oclass) + self.ior_cmd.dfs_dir_oclass.update(oclass) + self.ior_default_flags = self.ior_w_flags + self.log.info(self.pool.pool_percentage_used()) + self.start_ior_load(storage='NVMe', operation="Auto_Write", percent=pool_fillup) + self.log.info(self.pool.pool_percentage_used()) + else: + self.run_ior_thread("Write", oclass, test_seq) + self.run_mdtest_thread(oclass) + if self.test_with_snapshot is True: + # Create a snapshot of the container + # after IOR job completes. + self.container.create_snap() + self.log.info("Created container snapshot: %s", self.container.epoch) + if self.test_during_aggregation is True: + self.run_ior_thread("Write", oclass, test_seq) + + # Drain ranks and targets + for val in range(0, num_pool): + # Drain ranks provided in YAML file + for index, rank in enumerate(self.ranks): + self.pool = pool[val] + # If we are testing using multiple pools, reintegrate + # the rank back and then drain. + self.pool.display_pool_daos_space("Pool space: Beginning") + # Get initial total free space (scm+nvme) + initial_total_space = self.pool.get_total_space(refresh=True) + pver_begin = self.pool.get_version(True) + self.log.info("Pool Version at the beginning %s", pver_begin) + if self.test_during_aggregation is True and index == 0: + self.pool.set_property("reclaim", "time") + self.delete_extra_container(self.pool) + self.simple_osa_reintegrate_loop(rank=rank, action="drain") + if (self.test_during_rebuild is True and val == 0): + # Exclude rank 3 + self.pool.exclude([3]) + self.pool.wait_for_rebuild_to_start() + # If the pool is filled up just drain only a single rank. + if pool_fillup > 0 and index > 0: + continue + output = self.pool.drain(rank, t_string) + self.print_and_assert_on_rebuild_failure(output) + total_space_after_drain = self.pool.get_total_space(refresh=True) + + pver_drain = self.pool.get_version(True) + self.log.info("Pool Version after drain %d", pver_drain) + # Check pool version incremented after pool drain + self.assertGreater(pver_drain, (pver_begin + 1), + "Pool Version Error: After drain") + if self.test_during_aggregation is False: + self.assertGreater(initial_total_space, total_space_after_drain, + "Expected total space after drain is more than initial") + if num_pool > 1: + output = self.pool.reintegrate(rank, t_string) + self.print_and_assert_on_rebuild_failure(output) + total_space_after_reintegration = self.pool.get_total_space(refresh=True) + self.assertGreater( + total_space_after_reintegration, total_space_after_drain, + "Expected total space after reintegration is less than drain") + if (self.test_during_rebuild is True and val == 0): + # Reintegrate rank 3 + output = self.pool.reintegrate("3") + self.print_and_assert_on_rebuild_failure(output) + total_space_after_reintegration = self.pool.get_total_space(refresh=True) + self.assertGreater( + total_space_after_reintegration, total_space_after_drain, + "Expected total space after reintegration is less than drain") + + for val in range(0, num_pool): + display_string = "Pool{} space at the End".format(val) + pool[val].display_pool_daos_space(display_string) + if data: + if pool_fillup > 0: + self.start_ior_load(storage='NVMe', operation='Auto_Read', percent=pool_fillup) + else: + self.run_ior_thread("Read", oclass, test_seq) + self.run_mdtest_thread(oclass) + self.container = self.pool_cont_dict[self.pool][0] + self.container.daos.env['UCX_LOG_LEVEL'] = 'error' + self.container.check() + + def test_osa_offline_drain(self): + """JIRA ID: DAOS-4750. + + Test Description: Validate Offline Drain + + :avocado: tags=all,pr,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum,ior + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain + """ + self.log.info("Offline Drain : Basic Drain") + self.run_offline_drain_test(1, True) + + def test_osa_offline_drain_without_checksum(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain without enabling checksum in container properties. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_without_checksum + """ + self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") + self.log.info("Offline Drain : Without Checksum") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_during_aggregation(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain during aggregation + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_aggregation + """ + self.test_during_aggregation = self.params.get( + "test_with_aggregation", "/run/aggregation/*") + self.log.info("Offline Drain : During Aggregation") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_oclass(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain with different object class + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_oclass + """ + self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") + self.log.info("Offline Drain : Oclass") + for oclass in self.test_oclass: + self.run_offline_drain_test(1, data=True, oclass=oclass) + + def test_osa_offline_drain_multiple_pools(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain with multiple pools + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_multiple_pools + """ + self.log.info("Offline Drain : Multiple Pools") + self.run_offline_drain_test(2, data=True) + + def test_osa_offline_drain_during_rebuild(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain during rebuild + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,rebuild + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_rebuild + """ + self.test_during_rebuild = self.params.get("test_with_rebuild", "/run/rebuild/*") + self.log.info("Offline Drain : During Rebuild") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_after_snapshot(self): + """Test ID: DAOS-8057. + + Test Description: Validate Offline Drain after taking snapshot. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_after_snapshot + """ + self.test_with_snapshot = self.params.get("test_with_snapshot", "/run/snapshot/*") + self.log.info("Offline Drain : After taking snapshot") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_with_less_pool_space(self): + """Test ID: DAOS-7160. + + Test Description: Drain rank after with less pool space. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_less_pool_space + """ + self.log.info("Offline Drain : Test with less pool space") + oclass = self.params.get("pool_test_oclass", '/run/pool_capacity/*') + pool_fillup = self.params.get("pool_fillup", '/run/pool_capacity/*') + self.run_offline_drain_test(1, data=True, oclass=oclass, pool_fillup=pool_fillup) + + def test_osa_offline_drain_with_multiple_ranks(self): + """Test ID: DAOS-4753. + + Test Description: Drain multiple ranks at the same time. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_multiple_ranks + """ + self.log.info("Offline Drain : Test with mutiple ranks") + self.test_with_multiple_ranks = self.params.get("test_with_multiple_ranks", + '/run/multiple_ranks/*') + self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') + self.run_offline_drain_test(1, data=True) diff --git a/src/tests/ftest/osa/offline_drain.yaml b/src/tests/ftest/osa/offline_drain.yaml index a8776edd4a4..76182b8e1ff 100644 --- a/src/tests/ftest/osa/offline_drain.yaml +++ b/src/tests/ftest/osa/offline_drain.yaml @@ -1,107 +1,110 @@ -hosts: - test_servers: 3 - test_clients: 1 -timeout: 2400 -setup: - start_servers_once: false -skip_add_log_msg: true -server_config: - name: daos_server - engines_per_host: 2 - engines: - 0: - pinned_numa_node: 0 - nr_xs_helpers: 1 - fabric_iface: ib0 - fabric_iface_port: 31416 - log_file: daos_server0.log - log_mask: INFO,MEM=ERR - env_vars: - - DD_MASK=mgmt,md - storage: auto - 1: - pinned_numa_node: 1 - nr_xs_helpers: 1 - fabric_iface: ib1 - fabric_iface_port: 31516 - log_file: daos_server1.log - log_mask: INFO,MEM=ERR - env_vars: - - DD_MASK=mgmt,md - storage: auto -pool: - scm_size: 12000000000 - nvme_size: 108000000000 - svcn: 4 - rebuild_timeout: 240 - properties: scrub:timed -container: - type: POSIX - control_method: daos - oclass: RP_3G6 - properties: cksum:crc64,cksum_size:16384,srv_cksum:on,rd_fac:2 -dkeys: - single: - no_of_dkeys: - - 50 -akeys: - single: - no_of_akeys: - - 10 -record: - 1KB: - length: - - 1024 -ior: - clientslots: - slots: 48 - test_file: /testFile - repetitions: 1 - dfs_destroy: false - iorflags: - write_flags: "-w -F -k -G 1" - read_flags: "-F -r -R -k -G 1" - api: DFS - dfs_oclass: RP_3G6 - dfs_dir_oclass: RP_3G6 - ior_test_sequence: - # - [scmsize, nvmesize, transfersize, blocksize] - # The values are set to be in the multiples of 10. - # Values are appx GB. - - [12000000000, 108000000000, 500000, 500000000] -mdtest: - api: DFS - client_processes: - np: 30 - num_of_files_dirs: 4067 # creating total of 120K files - test_dir: "/" - iteration: 1 - dfs_destroy: false - dfs_oclass: RP_3G6 - dfs_dir_oclass: RP_3G6 - manager: "MPICH" - flags: "-u" - wr_size: - 32K: - write_bytes: 32768 - read_bytes: 32768 - verbosity_value: 1 - depth: 0 -test_obj_class: - oclass: - - RP_2G8 - - RP_4G1 - - EC_2P1G1 -aggregation: - test_with_aggregation: true -rebuild: - test_with_rebuild: true -checksum: - test_with_checksum: false -snapshot: - test_with_snapshot: true -test_ranks: - rank_list: ["2", "5"] -pool_capacity: - pool_fillup: 10 - pool_test_oclass: RP_2GX +hosts: + test_servers: 3 + test_clients: 1 +timeout: 2400 +setup: + start_servers_once: false +skip_add_log_msg: true +server_config: + name: daos_server + engines_per_host: 2 + engines: + 0: + pinned_numa_node: 0 + nr_xs_helpers: 1 + fabric_iface: ib0 + fabric_iface_port: 31416 + log_file: daos_server0.log + log_mask: INFO,MEM=ERR + env_vars: + - DD_MASK=mgmt,md + storage: auto + 1: + pinned_numa_node: 1 + nr_xs_helpers: 1 + fabric_iface: ib1 + fabric_iface_port: 31516 + log_file: daos_server1.log + log_mask: INFO,MEM=ERR + env_vars: + - DD_MASK=mgmt,md + storage: auto +pool: + scm_size: 12000000000 + nvme_size: 108000000000 + svcn: 4 + rebuild_timeout: 240 + properties: scrub:timed +container: + type: POSIX + control_method: daos + oclass: RP_3G6 + properties: cksum:crc64,cksum_size:16384,srv_cksum:on,rd_fac:2 +dkeys: + single: + no_of_dkeys: + - 50 +akeys: + single: + no_of_akeys: + - 10 +record: + 1KB: + length: + - 1024 +ior: + clientslots: + slots: 48 + test_file: /testFile + repetitions: 1 + dfs_destroy: false + iorflags: + write_flags: "-w -F -k -G 1" + read_flags: "-F -r -R -k -G 1" + api: DFS + dfs_oclass: RP_3G6 + dfs_dir_oclass: RP_3G6 + ior_test_sequence: + # - [scmsize, nvmesize, transfersize, blocksize] + # The values are set to be in the multiples of 10. + # Values are appx GB. + - [12000000000, 108000000000, 500000, 500000000] +mdtest: + api: DFS + client_processes: + np: 30 + num_of_files_dirs: 4067 # creating total of 120K files + test_dir: "/" + iteration: 1 + dfs_destroy: false + dfs_oclass: RP_3G6 + dfs_dir_oclass: RP_3G6 + manager: "MPICH" + flags: "-u" + wr_size: + 32K: + write_bytes: 32768 + read_bytes: 32768 + verbosity_value: 1 + depth: 0 +test_obj_class: + oclass: + - RP_2G8 + - RP_4G1 + - EC_2P1G1 +aggregation: + test_with_aggregation: true +rebuild: + test_with_rebuild: true +checksum: + test_with_checksum: false +snapshot: + test_with_snapshot: true +mutliple_ranks: + test_with_multiple_ranks: true + rank_list: ["1, 2"] +test_ranks: + rank_list: ["2", "5"] +pool_capacity: + pool_fillup: 10 + pool_test_oclass: RP_2GX diff --git a/src/tests/ftest/osa/online_drain.py b/src/tests/ftest/osa/online_drain.py index 3ee4f436256..07090d3d788 100644 --- a/src/tests/ftest/osa/online_drain.py +++ b/src/tests/ftest/osa/online_drain.py @@ -1,5 +1,6 @@ """ (C) Copyright 2020-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -186,3 +187,19 @@ def test_osa_online_drain_mdtest(self): """ self.log.info("Online Drain : With Mdtest") self.run_online_drain_test(1, app_name="mdtest") + + def test_osa_online_drain_with_multiple_ranks(self): + """Test ID: DAOS-4753. + + Test Description: Drain multiple ranks at the same time. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,online_drain + :avocado: tags=OSAOnlineDrain,test_osa_online_drain_with_multiple_ranks + """ + self.log.info("Online Drain : Test with mutiple ranks") + self.test_with_multiple_ranks = self.params.get("test_with_multiple_ranks", + '/run/multiple_ranks/*') + self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') + self.run_online_drain_test(1, data=True) \ No newline at end of file diff --git a/src/tests/ftest/osa/online_drain.yaml b/src/tests/ftest/osa/online_drain.yaml index 738683694b1..ed2a9f2ee49 100644 --- a/src/tests/ftest/osa/online_drain.yaml +++ b/src/tests/ftest/osa/online_drain.yaml @@ -90,3 +90,6 @@ rebuild: test_with_rebuild: true checksum: test_with_checksum: false +mutliple_ranks: + test_with_multiple_ranks: true + rank_list: ["1, 2"] \ No newline at end of file diff --git a/src/tests/ftest/util/osa_utils.py b/src/tests/ftest/util/osa_utils.py index 410b6ce46a2..54d526509fa 100644 --- a/src/tests/ftest/util/osa_utils.py +++ b/src/tests/ftest/util/osa_utils.py @@ -1,444 +1,446 @@ -""" - (C) Copyright 2020-2024 Intel Corporation. - - SPDX-License-Identifier: BSD-2-Clause-Patent -""" -import queue -import re -import threading -import time - -from avocado import fail_on -from exception_utils import CommandFailure -from general_utils import run_command -from ior_test_base import IorTestBase -from mdtest_test_base import MdtestBase - - -class OSAUtils(MdtestBase, IorTestBase): - """Test Class Description: This test runs daos_server offline drain test cases. - - :avocado: recursive - """ - - def setUp(self): - """Set up for test case.""" - super().setUp() - self.pool_cont_dict = {} - self.container = None - self.obj = None - self.ioreq = None - self.dmg_command = self.get_dmg_command() - self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*', - default=[0])[0] - self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*', - default=[0])[0] - self.record_length = self.params.get("length", '/run/record/*', - default=[0])[0] - self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*', - default="") - self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*') - self.server_count = len(self.hostlist_servers) - self.engine_count = self.server_managers[0].get_config_value( - "engines_per_host") - self.out_queue = queue.Queue() - self.dmg_command.exit_status_exception = False - self.test_during_aggregation = False - self.test_during_rebuild = False - self.test_with_checksum = True - # By default, test_with_rf is set to False. - # It is up to individual test to enable it. - self.test_with_rf = False - self.test_with_blank_node = False - self.test_with_snapshot = False - - @fail_on(CommandFailure) - def assert_on_rebuild_failure(self): - """If the rebuild is not successful, raise assert.""" - rebuild_status = self.pool.get_rebuild_status(True) - self.log.info("Rebuild Status: %s", rebuild_status) - if rebuild_status in ["failed", "scanning", "aborted", "busy"]: - self.fail("Rebuild failed") - - @fail_on(CommandFailure) - def print_and_assert_on_rebuild_failure(self, out, timeout=3): - """Print the out value (daos, dmg, etc) and check for rebuild completion. - - If rebuild does not complete, raise an assertion. - """ - self.log.info(out) - self.pool.wait_for_rebuild_to_start() - self.pool.wait_for_rebuild_to_end(timeout) - self.assert_on_rebuild_failure() - - @fail_on(CommandFailure) - def get_ipaddr_for_rank(self, rank=None): - """Obtain the IPAddress and port number for a particular server rank. - - Args: - rank (int): daos_engine rank. Defaults to None. - - Returns: - ip_addr (str) : IPAddress for the rank. - port_num (str) : Port number for the rank. - """ - output = self.dmg_command.system_query() - members_length = self.server_count * self.engine_count - for index in range(0, members_length): - if rank == int(output["response"]["members"][index]["rank"]): - temp = output["response"]["members"][index]["addr"] - ip_addr = temp.split(":") - temp = output["response"]["members"][index]["fabric_uri"] - port_num = temp.split(":") - return ip_addr[0], port_num[2] - return None, None - - @fail_on(CommandFailure) - def remove_pool_dir(self, ip_addr=None, port_num=None): - """Remove the /mnt/daos[x]//vos-* directory. - - Args: - ip_addr (str): IP address of the daos server. Defaults to None. - port_number (str) : Port number the daos server. - """ - # Create the expected port list - # expected_ports = [port0] - Single engine/server - # expected_ports = [port0, port1] - Two engine/server - expected_ports = [engine_param.get_value("fabric_iface_port") - for engine_param in self.server_managers[-1]. - manager.job.yaml.engine_params] - self.log.info("Expected ports : %s", expected_ports) - if ip_addr is None or port_num is None: - self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num) - self.fail("No IP Address or Port number provided") - else: - if self.engine_count == 1: - self.log.info("Single Engine per Server") - cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ - sudo rm -rf /mnt/daos/{}/vos-*". \ - format(ip_addr, self.pool.uuid) - elif self.engine_count == 2: - if port_num == str(expected_ports[0]): - port_val = 0 - elif port_num == str(expected_ports[1]): - port_val = 1 - else: - port_val = None # To appease pylint - self.log.info("port_number: %s", port_num) - self.fail("Invalid port number") - cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ - sudo rm -rf /mnt/daos{}/{}/vos-*". \ - format(ip_addr, port_val, self.pool.uuid) - else: - cmd = None # To appease pylint - self.fail("Not supported engine per server configuration") - run_command(cmd) - - def set_container(self, container): - """Set the OSA utils container object. - - Args: - container (TestContainer): Container object to be used within OSA utils. - """ - self.container = container - - def simple_osa_reintegrate_loop(self, rank, action="exclude", loop_time=100): - """Exclude or drain and reintegrate a rank for a certain amount of time. - - Args: - rank (int): daos server rank. - action (str, optional): "exclude" or "drain". Defaults to "exclude" - loop_time (int, optional): Total time to perform drain/reintegrate operation in a loop. - Defaults to 100. - """ - start_time = 0 - finish_time = 0 - start_time = time.time() - while int(finish_time - start_time) < loop_time: - if action == "exclude": - output = self.pool.exclude(rank) - else: - output = self.pool.drain(rank) - self.print_and_assert_on_rebuild_failure(output) - output = self.pool.reintegrate(rank) - self.print_and_assert_on_rebuild_failure(output) - finish_time = time.time() - - def prepare_cont_ior_write_read(self, oclass, flags): - """Prepare the containers for IOR write and read invocations. - - To enable aggregation: - - Create two containers and read always from first container - Normal usage (use only a single container): - - Create a single container and use the same. - - Args: - oclass (str): IOR object class - flags (str): IOR flags - """ - self.log.info(self.pool_cont_dict) - # If pool is not in the dictionary, - # initialize its container list to None - # {pool : [None, None], [None, None]} - if self.pool not in self.pool_cont_dict: - self.pool_cont_dict[self.pool] = [None] * 4 - # Create container if the pool doesn't have one. - # Otherwise, use the existing container in the pool. - # pool_cont_dict {pool A: [containerA, Updated, - # containerB, Updated], - # pool B : containerA, Updated, - # containerB, None]} - if self.pool_cont_dict[self.pool][0] is None: - self.add_container(self.pool, create=False) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - self.container.create() - self.pool_cont_dict[self.pool][0] = self.container - self.pool_cont_dict[self.pool][1] = "Updated" - else: - if ((self.test_during_aggregation is True) - and (self.pool_cont_dict[self.pool][1] == "Updated") - and (self.pool_cont_dict[self.pool][3] is None) - and ("-w" in flags)): - # Write to the second container - self.add_container(self.pool, create=False) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - self.container.create() - self.pool_cont_dict[self.pool][2] = self.container - self.pool_cont_dict[self.pool][3] = "Updated" - else: - self.container = self.pool_cont_dict[self.pool][0] - - def delete_extra_container(self, pool): - """Delete the extra container in the pool. - - Refer prepare_cont_ior_write_read. This method should be called when OSA tests intend to - enable aggregation. - - Args: - pool (TestPool): pool object - """ - self.pool.set_property("reclaim", "time") - extra_container = self.pool_cont_dict[pool][2] - extra_container.destroy() - self.pool_cont_dict[pool][3] = None - - def get_object_replica_value(self, oclass): - """Get the object replica value for an object class. - - Args: - oclass (str): Object Class (eg: RP_2G1,etc) - - Returns: - int: Object replica value - - """ - value = 0 - if "_" in oclass: - replica_list = oclass.split("_") - value = replica_list[1][0] - else: - self.log.info("Wrong Object Class. Cannot split") - return int(value) - - def update_cont_properties(self, cont_prop): - """Update the existing container properties. - - Args: - cont_prop (str): Replace existing container properties with new value - """ - self.container.properties.value = cont_prop - - def set_cont_class_properties(self, oclass="S1"): - """Update the container class to match the IOR/Mdtest object class. - - Fix the rf factor based on object replica value. - Also, remove the redundancy factor for S type object class. - - Args: - oclass (str, optional): Container object class to be set. Defaults to "S1". - """ - self.container.oclass.value = oclass - # Set the container properties properly for S!, S2 class. - # rf should not be set to 1 for S type object class. - match = re.search("^S\\d$", oclass) - prop = self.container.properties.value - if match is not None: - prop = prop.replace("rd_fac:1", "rd_fac:0") - else: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - prop = prop.replace("rd_fac:1", rf_value) - self.container.properties.value = prop - # Over-write oclass settings if using redundancy factor - # and self.test_with_rf is True. - # This has to be done so that container created doesn't - # use the object class. - if self.test_with_rf is True and \ - "rf" in self.container.properties.value: - self.log.info( - "Detected container redundancy factor: %s", - self.container.properties.value) - self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") - self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") - self.container.oclass.update(None) - - def assert_on_exception(self, out_queue=None): - """Assert on exception while executing an application. - - Args: - out_queue (queue): Check whether the queue is empty. If empty, app (ior, mdtest) didn't - encounter error. - """ - if out_queue is None: - out_queue = self.out_queue - if out_queue.empty(): - pass - else: - exc = out_queue.get(block=False) - out_queue.put(exc) - raise CommandFailure(exc) - - def cleanup_queue(self, out_queue=None): - """Cleanup the existing thread queue. - - Args: - out_queue (queue): Queue to cleanup. - """ - if out_queue is None: - out_queue = self.out_queue - while not out_queue.empty(): - out_queue.get(block=True) - - def run_ior_thread(self, action, oclass, test, single_cont_read=True, fail_on_warning=True, - pool=None): - """Start the IOR thread for either writing or reading data to/from a container. - - Args: - action (str): Start the IOR thread with Read or Write - oclass (str): IOR object class - test (list): IOR test sequence - flags (str): IOR flags - single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. - fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. - pool (TestPool, optional): Pool to run ior on. Defaults to None. - - """ - # Intermediate (between correct and hack) implementation for allowing a - # pool to be passed in. Needs to be fixed by making the pool argument - # required. - if pool is None: - pool = self.pool - - self.cleanup_queue() - if action == "Write": - flags = self.ior_w_flags - else: - flags = self.ior_r_flags - - # Add a thread for these IOR arguments - process = threading.Thread(target=self.ior_thread, - kwargs={"pool": pool, - "oclass": oclass, - "test": test, - "flags": flags, - "single_cont_read": - single_cont_read, - "fail_on_warning": - fail_on_warning}) - # Launch the IOR thread - process.start() - # Wait for the thread to finish - process.join() - if fail_on_warning and not self.out_queue.empty(): - self.assert_on_exception() - - def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_warning=True): - """Start an IOR thread. - - Args: - pool (object): pool handle - oclass (str): IOR object class, container class. - test (list): IOR test sequence - flags (str): IOR flags - single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. - fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. - """ - self.cleanup_queue() - self.pool = pool - self.ior_cmd.get_params(self) - self.ior_cmd.set_daos_params(self.pool, None) - self.log.info("Redundancy Factor : %s", self.test_with_rf) - self.ior_cmd.dfs_oclass.update(oclass) - self.ior_cmd.dfs_dir_oclass.update(oclass) - if single_cont_read is True: - # Prepare the containers created and use in a specific - # way defined in prepare_cont_ior_write. - self.prepare_cont_ior_write_read(oclass, flags) - elif single_cont_read is False and self.container is not None: - # Here self.container is having actual value. Just use it. - self.log.info(self.container) - else: - self.fail("Not supported option on ior_thread") - try: - job_manager = self.get_ior_job_manager_command() - except CommandFailure as err_msg: - self.out_queue.put(err_msg) - self.assert_on_exception() - job_manager.job.dfs_cont.update(self.container.identifier) - self.ior_cmd.transfer_size.update(test[2]) - self.ior_cmd.block_size.update(test[3]) - self.ior_cmd.flags.update(flags) - # Update oclass settings if using redundancy factor - # and self.test_with_rf is True. - if self.test_with_rf is True and "rf" in self.container.properties.value: - self.log.info( - "Detected container redundancy factor: %s", self.container.properties.value) - self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") - self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") - # Run run_ior_with_pool without invoking the pool query method for - # displaying pool space information (display_space=False) - self.run_ior_with_pool(create_pool=False, create_cont=False, - fail_on_warning=fail_on_warning, - display_space=False, - out_queue=self.out_queue) - if fail_on_warning and not self.out_queue.empty(): - self.assert_on_exception() - - def run_mdtest_thread(self, oclass="RP_2G1"): - """Start mdtest thread and wait until thread completes. - - Args: - oclass (str): IOR object class, container class. - """ - # Create container only - self.mdtest_cmd.dfs_destroy.update(False) - create_container = 0 - if self.container is None: - self.add_container(self.pool, create=False) - create_container = 1 - self.mdtest_cmd.dfs_oclass.update(oclass) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - if create_container == 1: - self.container.create() - job_manager = self.get_mdtest_job_manager_command(self.manager) - job_manager.job.dfs_cont.update(self.container.identifier) - # Add a thread for these IOR arguments - process = threading.Thread(target=self.execute_mdtest) - # Launch the MDtest thread - process.start() - # Wait for the thread to finish - process.join() - if not self.out_queue.empty(): - self.assert_on_exception() +""" + (C) Copyright 2020-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +import queue +import re +import threading +import time + +from avocado import fail_on +from exception_utils import CommandFailure +from general_utils import run_command +from ior_test_base import IorTestBase +from mdtest_test_base import MdtestBase + + +class OSAUtils(MdtestBase, IorTestBase): + """Test Class Description: This test runs daos_server offline drain test cases. + + :avocado: recursive + """ + + def setUp(self): + """Set up for test case.""" + super().setUp() + self.pool_cont_dict = {} + self.container = None + self.obj = None + self.ioreq = None + self.dmg_command = self.get_dmg_command() + self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*', + default=[0])[0] + self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*', + default=[0])[0] + self.record_length = self.params.get("length", '/run/record/*', + default=[0])[0] + self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*', + default="") + self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*') + self.server_count = len(self.hostlist_servers) + self.engine_count = self.server_managers[0].get_config_value( + "engines_per_host") + self.out_queue = queue.Queue() + self.dmg_command.exit_status_exception = False + self.test_during_aggregation = False + self.test_during_rebuild = False + self.test_with_checksum = True + # By default, test_with_rf is set to False. + # It is up to individual test to enable it. + self.test_with_rf = False + self.test_with_blank_node = False + self.test_with_snapshot = False + self.test_with_multiple_ranks = False + + @fail_on(CommandFailure) + def assert_on_rebuild_failure(self): + """If the rebuild is not successful, raise assert.""" + rebuild_status = self.pool.get_rebuild_status(True) + self.log.info("Rebuild Status: %s", rebuild_status) + if rebuild_status in ["failed", "scanning", "aborted", "busy"]: + self.fail("Rebuild failed") + + @fail_on(CommandFailure) + def print_and_assert_on_rebuild_failure(self, out, timeout=3): + """Print the out value (daos, dmg, etc) and check for rebuild completion. + + If rebuild does not complete, raise an assertion. + """ + self.log.info(out) + self.pool.wait_for_rebuild_to_start() + self.pool.wait_for_rebuild_to_end(timeout) + self.assert_on_rebuild_failure() + + @fail_on(CommandFailure) + def get_ipaddr_for_rank(self, rank=None): + """Obtain the IPAddress and port number for a particular server rank. + + Args: + rank (int): daos_engine rank. Defaults to None. + + Returns: + ip_addr (str) : IPAddress for the rank. + port_num (str) : Port number for the rank. + """ + output = self.dmg_command.system_query() + members_length = self.server_count * self.engine_count + for index in range(0, members_length): + if rank == int(output["response"]["members"][index]["rank"]): + temp = output["response"]["members"][index]["addr"] + ip_addr = temp.split(":") + temp = output["response"]["members"][index]["fabric_uri"] + port_num = temp.split(":") + return ip_addr[0], port_num[2] + return None, None + + @fail_on(CommandFailure) + def remove_pool_dir(self, ip_addr=None, port_num=None): + """Remove the /mnt/daos[x]//vos-* directory. + + Args: + ip_addr (str): IP address of the daos server. Defaults to None. + port_number (str) : Port number the daos server. + """ + # Create the expected port list + # expected_ports = [port0] - Single engine/server + # expected_ports = [port0, port1] - Two engine/server + expected_ports = [engine_param.get_value("fabric_iface_port") + for engine_param in self.server_managers[-1]. + manager.job.yaml.engine_params] + self.log.info("Expected ports : %s", expected_ports) + if ip_addr is None or port_num is None: + self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num) + self.fail("No IP Address or Port number provided") + else: + if self.engine_count == 1: + self.log.info("Single Engine per Server") + cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ + sudo rm -rf /mnt/daos/{}/vos-*". \ + format(ip_addr, self.pool.uuid) + elif self.engine_count == 2: + if port_num == str(expected_ports[0]): + port_val = 0 + elif port_num == str(expected_ports[1]): + port_val = 1 + else: + port_val = None # To appease pylint + self.log.info("port_number: %s", port_num) + self.fail("Invalid port number") + cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ + sudo rm -rf /mnt/daos{}/{}/vos-*". \ + format(ip_addr, port_val, self.pool.uuid) + else: + cmd = None # To appease pylint + self.fail("Not supported engine per server configuration") + run_command(cmd) + + def set_container(self, container): + """Set the OSA utils container object. + + Args: + container (TestContainer): Container object to be used within OSA utils. + """ + self.container = container + + def simple_osa_reintegrate_loop(self, rank, action="exclude", loop_time=100): + """Exclude or drain and reintegrate a rank for a certain amount of time. + + Args: + rank (int): daos server rank. + action (str, optional): "exclude" or "drain". Defaults to "exclude" + loop_time (int, optional): Total time to perform drain/reintegrate operation in a loop. + Defaults to 100. + """ + start_time = 0 + finish_time = 0 + start_time = time.time() + while int(finish_time - start_time) < loop_time: + if action == "exclude": + output = self.pool.exclude(rank) + else: + output = self.pool.drain(rank) + self.print_and_assert_on_rebuild_failure(output) + output = self.pool.reintegrate(rank) + self.print_and_assert_on_rebuild_failure(output) + finish_time = time.time() + + def prepare_cont_ior_write_read(self, oclass, flags): + """Prepare the containers for IOR write and read invocations. + + To enable aggregation: + - Create two containers and read always from first container + Normal usage (use only a single container): + - Create a single container and use the same. + + Args: + oclass (str): IOR object class + flags (str): IOR flags + """ + self.log.info(self.pool_cont_dict) + # If pool is not in the dictionary, + # initialize its container list to None + # {pool : [None, None], [None, None]} + if self.pool not in self.pool_cont_dict: + self.pool_cont_dict[self.pool] = [None] * 4 + # Create container if the pool doesn't have one. + # Otherwise, use the existing container in the pool. + # pool_cont_dict {pool A: [containerA, Updated, + # containerB, Updated], + # pool B : containerA, Updated, + # containerB, None]} + if self.pool_cont_dict[self.pool][0] is None: + self.add_container(self.pool, create=False) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + self.container.create() + self.pool_cont_dict[self.pool][0] = self.container + self.pool_cont_dict[self.pool][1] = "Updated" + else: + if ((self.test_during_aggregation is True) + and (self.pool_cont_dict[self.pool][1] == "Updated") + and (self.pool_cont_dict[self.pool][3] is None) + and ("-w" in flags)): + # Write to the second container + self.add_container(self.pool, create=False) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + self.container.create() + self.pool_cont_dict[self.pool][2] = self.container + self.pool_cont_dict[self.pool][3] = "Updated" + else: + self.container = self.pool_cont_dict[self.pool][0] + + def delete_extra_container(self, pool): + """Delete the extra container in the pool. + + Refer prepare_cont_ior_write_read. This method should be called when OSA tests intend to + enable aggregation. + + Args: + pool (TestPool): pool object + """ + self.pool.set_property("reclaim", "time") + extra_container = self.pool_cont_dict[pool][2] + extra_container.destroy() + self.pool_cont_dict[pool][3] = None + + def get_object_replica_value(self, oclass): + """Get the object replica value for an object class. + + Args: + oclass (str): Object Class (eg: RP_2G1,etc) + + Returns: + int: Object replica value + + """ + value = 0 + if "_" in oclass: + replica_list = oclass.split("_") + value = replica_list[1][0] + else: + self.log.info("Wrong Object Class. Cannot split") + return int(value) + + def update_cont_properties(self, cont_prop): + """Update the existing container properties. + + Args: + cont_prop (str): Replace existing container properties with new value + """ + self.container.properties.value = cont_prop + + def set_cont_class_properties(self, oclass="S1"): + """Update the container class to match the IOR/Mdtest object class. + + Fix the rf factor based on object replica value. + Also, remove the redundancy factor for S type object class. + + Args: + oclass (str, optional): Container object class to be set. Defaults to "S1". + """ + self.container.oclass.value = oclass + # Set the container properties properly for S!, S2 class. + # rf should not be set to 1 for S type object class. + match = re.search("^S\\d$", oclass) + prop = self.container.properties.value + if match is not None: + prop = prop.replace("rd_fac:1", "rd_fac:0") + else: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + prop = prop.replace("rd_fac:1", rf_value) + self.container.properties.value = prop + # Over-write oclass settings if using redundancy factor + # and self.test_with_rf is True. + # This has to be done so that container created doesn't + # use the object class. + if self.test_with_rf is True and \ + "rf" in self.container.properties.value: + self.log.info( + "Detected container redundancy factor: %s", + self.container.properties.value) + self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") + self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") + self.container.oclass.update(None) + + def assert_on_exception(self, out_queue=None): + """Assert on exception while executing an application. + + Args: + out_queue (queue): Check whether the queue is empty. If empty, app (ior, mdtest) didn't + encounter error. + """ + if out_queue is None: + out_queue = self.out_queue + if out_queue.empty(): + pass + else: + exc = out_queue.get(block=False) + out_queue.put(exc) + raise CommandFailure(exc) + + def cleanup_queue(self, out_queue=None): + """Cleanup the existing thread queue. + + Args: + out_queue (queue): Queue to cleanup. + """ + if out_queue is None: + out_queue = self.out_queue + while not out_queue.empty(): + out_queue.get(block=True) + + def run_ior_thread(self, action, oclass, test, single_cont_read=True, fail_on_warning=True, + pool=None): + """Start the IOR thread for either writing or reading data to/from a container. + + Args: + action (str): Start the IOR thread with Read or Write + oclass (str): IOR object class + test (list): IOR test sequence + flags (str): IOR flags + single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. + fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. + pool (TestPool, optional): Pool to run ior on. Defaults to None. + + """ + # Intermediate (between correct and hack) implementation for allowing a + # pool to be passed in. Needs to be fixed by making the pool argument + # required. + if pool is None: + pool = self.pool + + self.cleanup_queue() + if action == "Write": + flags = self.ior_w_flags + else: + flags = self.ior_r_flags + + # Add a thread for these IOR arguments + process = threading.Thread(target=self.ior_thread, + kwargs={"pool": pool, + "oclass": oclass, + "test": test, + "flags": flags, + "single_cont_read": + single_cont_read, + "fail_on_warning": + fail_on_warning}) + # Launch the IOR thread + process.start() + # Wait for the thread to finish + process.join() + if fail_on_warning and not self.out_queue.empty(): + self.assert_on_exception() + + def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_warning=True): + """Start an IOR thread. + + Args: + pool (object): pool handle + oclass (str): IOR object class, container class. + test (list): IOR test sequence + flags (str): IOR flags + single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. + fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. + """ + self.cleanup_queue() + self.pool = pool + self.ior_cmd.get_params(self) + self.ior_cmd.set_daos_params(self.pool, None) + self.log.info("Redundancy Factor : %s", self.test_with_rf) + self.ior_cmd.dfs_oclass.update(oclass) + self.ior_cmd.dfs_dir_oclass.update(oclass) + if single_cont_read is True: + # Prepare the containers created and use in a specific + # way defined in prepare_cont_ior_write. + self.prepare_cont_ior_write_read(oclass, flags) + elif single_cont_read is False and self.container is not None: + # Here self.container is having actual value. Just use it. + self.log.info(self.container) + else: + self.fail("Not supported option on ior_thread") + try: + job_manager = self.get_ior_job_manager_command() + except CommandFailure as err_msg: + self.out_queue.put(err_msg) + self.assert_on_exception() + job_manager.job.dfs_cont.update(self.container.identifier) + self.ior_cmd.transfer_size.update(test[2]) + self.ior_cmd.block_size.update(test[3]) + self.ior_cmd.flags.update(flags) + # Update oclass settings if using redundancy factor + # and self.test_with_rf is True. + if self.test_with_rf is True and "rf" in self.container.properties.value: + self.log.info( + "Detected container redundancy factor: %s", self.container.properties.value) + self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") + self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") + # Run run_ior_with_pool without invoking the pool query method for + # displaying pool space information (display_space=False) + self.run_ior_with_pool(create_pool=False, create_cont=False, + fail_on_warning=fail_on_warning, + display_space=False, + out_queue=self.out_queue) + if fail_on_warning and not self.out_queue.empty(): + self.assert_on_exception() + + def run_mdtest_thread(self, oclass="RP_2G1"): + """Start mdtest thread and wait until thread completes. + + Args: + oclass (str): IOR object class, container class. + """ + # Create container only + self.mdtest_cmd.dfs_destroy.update(False) + create_container = 0 + if self.container is None: + self.add_container(self.pool, create=False) + create_container = 1 + self.mdtest_cmd.dfs_oclass.update(oclass) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + if create_container == 1: + self.container.create() + job_manager = self.get_mdtest_job_manager_command(self.manager) + job_manager.job.dfs_cont.update(self.container.identifier) + # Add a thread for these IOR arguments + process = threading.Thread(target=self.execute_mdtest) + # Launch the MDtest thread + process.start() + # Wait for the thread to finish + process.join() + if not self.out_queue.empty(): + self.assert_on_exception() diff --git a/utils/cq/check_update_copyright.sh b/utils/cq/check_update_copyright.sh index 7b89b47b41b..473045e3c45 100755 --- a/utils/cq/check_update_copyright.sh +++ b/utils/cq/check_update_copyright.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2024 Intel Corporation. # Copyright 2025 Hewlett Packard Enterprise Development LP diff --git a/utils/githooks/find_base.sh b/utils/githooks/find_base.sh index 292a02f66e7..b812d28a783 100644 --- a/utils/githooks/find_base.sh +++ b/utils/githooks/find_base.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # /* # * (C) Copyright 2024 Intel Corporation. +# * (C) Copyright 2025 Hewlett Packard Enterprise Development LP # * # * SPDX-License-Identifier: BSD-2-Clause-Patent # */ diff --git a/utils/githooks/git-version.sh b/utils/githooks/git-version.sh index 29e141f40c8..c99aa5b534d 100644 --- a/utils/githooks/git-version.sh +++ b/utils/githooks/git-version.sh @@ -1,4 +1,9 @@ -#!/bin/bash +#!/usr/bin/env bash +# +# Copyright 2025 Hewlett Packard Enterprise Development LP +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# VERSION=$(git --version | sed -ne 's/^[^0-9]*\([[0-9\.]*\).*/\1/p') if [ -z "$VERSION" ]; then diff --git a/utils/githooks/hook_base.sh b/utils/githooks/hook_base.sh index 7e3a61cc544..b6fa66d73d7 100755 --- a/utils/githooks/hook_base.sh +++ b/utils/githooks/hook_base.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # @@ -41,8 +42,8 @@ run-parts() { # don't run vim .swp files [ "${i%.sw?}" != "${i}" ] && continue skip_item=false - for skip in "${skip_list[@]}"; do - if [[ "${i}" =~ ${skip} ]]; then + for skip in "${skip_list[@]:-}"; do + if [[ -n "${skip}" ]] && [[ "${i}" =~ ${skip} ]]; then skip_item=true echo "Skipping ${i}" break diff --git a/utils/githooks/pre-commit b/utils/githooks/pre-commit index cab804423fc..78af2ba4c28 100755 --- a/utils/githooks/pre-commit +++ b/utils/githooks/pre-commit @@ -1,4 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash +# +# Copyright 2025 Hewlett Packard Enterprise Development LP +# set -eu . utils/githooks/hook_base.sh diff --git a/utils/githooks/pre-commit.d/10-update-copyright.sh b/utils/githooks/pre-commit.d/10-update-copyright.sh index b88cce8e634..e962e000168 100755 --- a/utils/githooks/pre-commit.d/10-update-copyright.sh +++ b/utils/githooks/pre-commit.d/10-update-copyright.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/20-codespell.sh b/utils/githooks/pre-commit.d/20-codespell.sh index eece684244e..dcc7bdb6380 100755 --- a/utils/githooks/pre-commit.d/20-codespell.sh +++ b/utils/githooks/pre-commit.d/20-codespell.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/30-Jenkinsfile.sh b/utils/githooks/pre-commit.d/30-Jenkinsfile.sh index c69f08ffac5..f1f44d80989 100755 --- a/utils/githooks/pre-commit.d/30-Jenkinsfile.sh +++ b/utils/githooks/pre-commit.d/30-Jenkinsfile.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2023-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/40-yamllint.sh b/utils/githooks/pre-commit.d/40-yamllint.sh index 60b62fe86a8..c080edc7447 100755 --- a/utils/githooks/pre-commit.d/40-yamllint.sh +++ b/utils/githooks/pre-commit.d/40-yamllint.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/50-clang-format.sh b/utils/githooks/pre-commit.d/50-clang-format.sh index 82b725d2624..a20b215ba6c 100755 --- a/utils/githooks/pre-commit.d/50-clang-format.sh +++ b/utils/githooks/pre-commit.d/50-clang-format.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # @@ -21,10 +22,10 @@ if ! command -v clang-format > /dev/null 2>&1; then exit 0 fi -echo "Formatting C files" - # Check version of clang-format, and print a helpful message if it's too old. If the right version # is not found then exit. -./site_scons/site_tools/extra/extra.py || exit 0 +./site_scons/site_tools/extra/extra.py > /dev/null || exit 0 + +echo "Formatting C files" git-clang-format --staged src diff --git a/utils/githooks/pre-commit.d/60-gofmt.sh b/utils/githooks/pre-commit.d/60-gofmt.sh index 0a702948786..c8cad14f530 100755 --- a/utils/githooks/pre-commit.d/60-gofmt.sh +++ b/utils/githooks/pre-commit.d/60-gofmt.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/70-isort.sh b/utils/githooks/pre-commit.d/70-isort.sh index 9b3d9fc445a..986e4642fa9 100755 --- a/utils/githooks/pre-commit.d/70-isort.sh +++ b/utils/githooks/pre-commit.d/70-isort.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2023-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # @@ -20,17 +21,7 @@ fi if ! command -v isort > /dev/null 2>&1; then echo "isort not installed. Install isort command to improve pre-commit checks:" echo " python3 -m pip install -r ./utils/cq/requirements.txt" - . /etc/os-release - if [ "$ID" = "fedora" ]; then - echo " or" - echo " dnf install python3-isort" - fi - exit 0 fi echo "Checking if python imports are sorted" -if ! echo "$py_files" | xargs -r isort --check-only --jobs 8; then - echo " isort check failed, run 'isort --jobs 8 .' to fix." - exit 1 -fi diff --git a/utils/githooks/pre-commit.d/71-flake.sh b/utils/githooks/pre-commit.d/71-flake.sh index 082e0f863d8..cf00e8e6388 100755 --- a/utils/githooks/pre-commit.d/71-flake.sh +++ b/utils/githooks/pre-commit.d/71-flake.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/72-pylint.sh b/utils/githooks/pre-commit.d/72-pylint.sh index 4bff2456a66..047de5ba974 100755 --- a/utils/githooks/pre-commit.d/72-pylint.sh +++ b/utils/githooks/pre-commit.d/72-pylint.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2022-2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/pre-commit.d/73-ftest.sh b/utils/githooks/pre-commit.d/73-ftest.sh index 9eaf4e8253b..0fc7248f08e 100755 --- a/utils/githooks/pre-commit.d/73-ftest.sh +++ b/utils/githooks/pre-commit.d/73-ftest.sh @@ -1,6 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash # # Copyright 2024 Intel Corporation. +# Copyright 2025 Hewlett Packard Enterprise Development LP # # SPDX-License-Identifier: BSD-2-Clause-Patent # diff --git a/utils/githooks/prepare-commit-msg b/utils/githooks/prepare-commit-msg index 59aec7c8578..f050ebb9c34 100755 --- a/utils/githooks/prepare-commit-msg +++ b/utils/githooks/prepare-commit-msg @@ -1,3 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash +# +# Copyright 2025 Hewlett Packard Enterprise Development LP +# . utils/githooks/hook_base.sh diff --git a/utils/rpms/packaging/get_release_branch b/utils/rpms/packaging/get_release_branch index e017cca2071..6ce032d1d27 100755 --- a/utils/rpms/packaging/get_release_branch +++ b/utils/rpms/packaging/get_release_branch @@ -1,14 +1,20 @@ -#!/bin/bash - +#!/usr/bin/env bash +# +# Copyright 2025 Hewlett Packard Enterprise Development LP +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# # find the base branch of the current branch # base branches can be master, release/2.4+, release/3+ # or optionally branches passed into $1 set -eu -o pipefail IFS=' ' read -r -a add_bases <<< "${1:-}" origin="${ORIGIN:-origin}" -mapfile -t all_bases < <(echo "master" - git branch -r | sed -ne "/^ $origin\\/release\\/\(2.[4-9]\|[3-9]\)/s/^ $origin\\///p") -all_bases+=("${add_bases[@]}") +all_bases=() +while IFS= read -r base; do + all_bases+=("$base") +done < <(echo "master" + git branch -r | sed -ne "/^ $origin\\/release\\/\(2.[4-9]\|[3-9]\)/s/^ $origin\\///p") TARGET="master" min_diff=-1 for base in "${all_bases[@]}"; do From f1c555ee7eb1efb3b58bb81787a579acd35376c1 Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 15:27:04 -0500 Subject: [PATCH 3/8] DAOS-4753 test: Updates to online_dran.py Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/osa/online_drain.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tests/ftest/osa/online_drain.py b/src/tests/ftest/osa/online_drain.py index 07090d3d788..1ee7a7e8a90 100644 --- a/src/tests/ftest/osa/online_drain.py +++ b/src/tests/ftest/osa/online_drain.py @@ -31,6 +31,7 @@ def setUp(self): self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) self.dmg_command.exit_status_exception = True self.pool = None + self.multiple_ranks = None def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"): """Run the Online drain without data. @@ -52,6 +53,9 @@ def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"): # Drain one of the ranks (or server) rank = self.random.choice(list(self.server_managers[0].ranks.keys())) + # For testing with multiple ranks as dmg parameters, use a list of ranks. + if self.test_with_multiple_ranks is True: + rank = self.multiple_ranks for val in range(0, num_pool): pool[val] = add_pool(self, connect=False) From ea1457dc247fb4a394f84baa597b6d7f270e353e Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 15:33:12 -0500 Subject: [PATCH 4/8] DAOS-4753 test: Address end of line issue. Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/osa/offline_drain.py | 2 +- src/tests/ftest/osa/offline_drain.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tests/ftest/osa/offline_drain.py b/src/tests/ftest/osa/offline_drain.py index 9a5cde4bfec..73a60f80d11 100644 --- a/src/tests/ftest/osa/offline_drain.py +++ b/src/tests/ftest/osa/offline_drain.py @@ -49,7 +49,7 @@ def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup= if oclass is None: oclass = self.ior_cmd.dfs_oclass.value - # For testing with multiple ranks as dmg parameters, use a list of ranks. + # For testing multiple ranks as dmg parameters, use a list of ranks. if self.test_with_multiple_ranks is True: self.ranks = self.multiple_ranks diff --git a/src/tests/ftest/osa/offline_drain.yaml b/src/tests/ftest/osa/offline_drain.yaml index 76182b8e1ff..03784a29608 100644 --- a/src/tests/ftest/osa/offline_drain.yaml +++ b/src/tests/ftest/osa/offline_drain.yaml @@ -101,6 +101,7 @@ checksum: snapshot: test_with_snapshot: true mutliple_ranks: + # For multiple ranks test_with_multiple_ranks: true rank_list: ["1, 2"] test_ranks: From 60302db6f0abf7f4f9685c5b871edd65c22f9440 Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 15:56:13 -0500 Subject: [PATCH 5/8] DAOS-4753 test: Pass multiple ranks as parameter Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/osa/offline_drain.py | 563 ++++++++++++------------- src/tests/ftest/osa/offline_drain.yaml | 220 +++++----- src/tests/ftest/osa/online_drain.py | 10 +- src/tests/ftest/osa/online_drain.yaml | 1 - src/tests/ftest/util/osa_utils.py | 1 - 5 files changed, 394 insertions(+), 401 deletions(-) diff --git a/src/tests/ftest/osa/offline_drain.py b/src/tests/ftest/osa/offline_drain.py index 73a60f80d11..5094fe18b7a 100644 --- a/src/tests/ftest/osa/offline_drain.py +++ b/src/tests/ftest/osa/offline_drain.py @@ -1,282 +1,281 @@ -""" - (C) Copyright 2020-2023 Intel Corporation. - (C) Copyright 2025 Hewlett Packard Enterprise Development LP - - SPDX-License-Identifier: BSD-2-Clause-Patent -""" -import random - -from nvme_utils import ServerFillUp -from osa_utils import OSAUtils -from test_utils_pool import add_pool -from write_host_file import write_host_file - - -class OSAOfflineDrain(OSAUtils, ServerFillUp): - # pylint: disable=too-many-ancestors - """ - Test Class Description: This test runs - daos_server offline drain test cases. - - :avocado: recursive - """ - - def setUp(self): - """Set up for test case.""" - super().setUp() - self.dmg_command = self.get_dmg_command() - self.ranks = self.params.get("rank_list", '/run/test_ranks/*') - self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') - self.ior_test_sequence = self.params.get( - "ior_test_sequence", '/run/ior/iorflags/*') - # Recreate the client hostfile without slots defined - self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) - self.multiple_ranks = None - - def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0): - """Run the offline drain without data. - - Args: - num_pool (int) : total pools to create for testing purposes. - data (bool) : whether pool has no data or to create some data in pool. - Defaults to False. - oclass (str): DAOS object class (eg: RP_2G1,etc) - """ - # Create a pool - pool = {} - target_list = [] - - if oclass is None: - oclass = self.ior_cmd.dfs_oclass.value - - # For testing multiple ranks as dmg parameters, use a list of ranks. - if self.test_with_multiple_ranks is True: - self.ranks = self.multiple_ranks - - # Exclude target : random two targets (target idx : 0-7) - exc = random.randint(0, 6) # nosec - target_list.append(exc) - target_list.append(exc + 1) - t_string = "{},{}".format(target_list[0], target_list[1]) - - for val in range(0, num_pool): - pool[val] = add_pool(self, connect=False) - self.pool = pool[val] - self.pool.set_property("reclaim", "disabled") - test_seq = self.ior_test_sequence[0] - - if data: - # if pool_fillup is greater than 0, then - # use start_ior_load method from nvme_utils.py. - # Otherwise, use the osa_utils.py run_ior_thread - # method. - if pool_fillup > 0: - self.ior_cmd.dfs_oclass.update(oclass) - self.ior_cmd.dfs_dir_oclass.update(oclass) - self.ior_default_flags = self.ior_w_flags - self.log.info(self.pool.pool_percentage_used()) - self.start_ior_load(storage='NVMe', operation="Auto_Write", percent=pool_fillup) - self.log.info(self.pool.pool_percentage_used()) - else: - self.run_ior_thread("Write", oclass, test_seq) - self.run_mdtest_thread(oclass) - if self.test_with_snapshot is True: - # Create a snapshot of the container - # after IOR job completes. - self.container.create_snap() - self.log.info("Created container snapshot: %s", self.container.epoch) - if self.test_during_aggregation is True: - self.run_ior_thread("Write", oclass, test_seq) - - # Drain ranks and targets - for val in range(0, num_pool): - # Drain ranks provided in YAML file - for index, rank in enumerate(self.ranks): - self.pool = pool[val] - # If we are testing using multiple pools, reintegrate - # the rank back and then drain. - self.pool.display_pool_daos_space("Pool space: Beginning") - # Get initial total free space (scm+nvme) - initial_total_space = self.pool.get_total_space(refresh=True) - pver_begin = self.pool.get_version(True) - self.log.info("Pool Version at the beginning %s", pver_begin) - if self.test_during_aggregation is True and index == 0: - self.pool.set_property("reclaim", "time") - self.delete_extra_container(self.pool) - self.simple_osa_reintegrate_loop(rank=rank, action="drain") - if (self.test_during_rebuild is True and val == 0): - # Exclude rank 3 - self.pool.exclude([3]) - self.pool.wait_for_rebuild_to_start() - # If the pool is filled up just drain only a single rank. - if pool_fillup > 0 and index > 0: - continue - output = self.pool.drain(rank, t_string) - self.print_and_assert_on_rebuild_failure(output) - total_space_after_drain = self.pool.get_total_space(refresh=True) - - pver_drain = self.pool.get_version(True) - self.log.info("Pool Version after drain %d", pver_drain) - # Check pool version incremented after pool drain - self.assertGreater(pver_drain, (pver_begin + 1), - "Pool Version Error: After drain") - if self.test_during_aggregation is False: - self.assertGreater(initial_total_space, total_space_after_drain, - "Expected total space after drain is more than initial") - if num_pool > 1: - output = self.pool.reintegrate(rank, t_string) - self.print_and_assert_on_rebuild_failure(output) - total_space_after_reintegration = self.pool.get_total_space(refresh=True) - self.assertGreater( - total_space_after_reintegration, total_space_after_drain, - "Expected total space after reintegration is less than drain") - if (self.test_during_rebuild is True and val == 0): - # Reintegrate rank 3 - output = self.pool.reintegrate("3") - self.print_and_assert_on_rebuild_failure(output) - total_space_after_reintegration = self.pool.get_total_space(refresh=True) - self.assertGreater( - total_space_after_reintegration, total_space_after_drain, - "Expected total space after reintegration is less than drain") - - for val in range(0, num_pool): - display_string = "Pool{} space at the End".format(val) - pool[val].display_pool_daos_space(display_string) - if data: - if pool_fillup > 0: - self.start_ior_load(storage='NVMe', operation='Auto_Read', percent=pool_fillup) - else: - self.run_ior_thread("Read", oclass, test_seq) - self.run_mdtest_thread(oclass) - self.container = self.pool_cont_dict[self.pool][0] - self.container.daos.env['UCX_LOG_LEVEL'] = 'error' - self.container.check() - - def test_osa_offline_drain(self): - """JIRA ID: DAOS-4750. - - Test Description: Validate Offline Drain - - :avocado: tags=all,pr,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum,ior - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain - """ - self.log.info("Offline Drain : Basic Drain") - self.run_offline_drain_test(1, True) - - def test_osa_offline_drain_without_checksum(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain without enabling checksum in container properties. - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_without_checksum - """ - self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") - self.log.info("Offline Drain : Without Checksum") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_during_aggregation(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain during aggregation - - :avocado: tags=all,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_aggregation - """ - self.test_during_aggregation = self.params.get( - "test_with_aggregation", "/run/aggregation/*") - self.log.info("Offline Drain : During Aggregation") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_oclass(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain with different object class - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_oclass - """ - self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") - self.log.info("Offline Drain : Oclass") - for oclass in self.test_oclass: - self.run_offline_drain_test(1, data=True, oclass=oclass) - - def test_osa_offline_drain_multiple_pools(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain with multiple pools - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_multiple_pools - """ - self.log.info("Offline Drain : Multiple Pools") - self.run_offline_drain_test(2, data=True) - - def test_osa_offline_drain_during_rebuild(self): - """Test ID: DAOS-7159. - - Test Description: Validate Offline Drain during rebuild - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,rebuild - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_rebuild - """ - self.test_during_rebuild = self.params.get("test_with_rebuild", "/run/rebuild/*") - self.log.info("Offline Drain : During Rebuild") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_after_snapshot(self): - """Test ID: DAOS-8057. - - Test Description: Validate Offline Drain after taking snapshot. - - :avocado: tags=all,daily_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,checksum - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_after_snapshot - """ - self.test_with_snapshot = self.params.get("test_with_snapshot", "/run/snapshot/*") - self.log.info("Offline Drain : After taking snapshot") - self.run_offline_drain_test(1, data=True) - - def test_osa_offline_drain_with_less_pool_space(self): - """Test ID: DAOS-7160. - - Test Description: Drain rank after with less pool space. - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_less_pool_space - """ - self.log.info("Offline Drain : Test with less pool space") - oclass = self.params.get("pool_test_oclass", '/run/pool_capacity/*') - pool_fillup = self.params.get("pool_fillup", '/run/pool_capacity/*') - self.run_offline_drain_test(1, data=True, oclass=oclass, pool_fillup=pool_fillup) - - def test_osa_offline_drain_with_multiple_ranks(self): - """Test ID: DAOS-4753. - - Test Description: Drain multiple ranks at the same time. - - :avocado: tags=all,full_regression - :avocado: tags=hw,medium - :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full - :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_multiple_ranks - """ - self.log.info("Offline Drain : Test with mutiple ranks") - self.test_with_multiple_ranks = self.params.get("test_with_multiple_ranks", - '/run/multiple_ranks/*') - self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') - self.run_offline_drain_test(1, data=True) +""" + (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +import random + +from nvme_utils import ServerFillUp +from osa_utils import OSAUtils +from test_utils_pool import add_pool +from write_host_file import write_host_file + + +class OSAOfflineDrain(OSAUtils, ServerFillUp): + # pylint: disable=too-many-ancestors + """ + Test Class Description: This test runs + daos_server offline drain test cases. + + :avocado: recursive + """ + + def setUp(self): + """Set up for test case.""" + super().setUp() + self.dmg_command = self.get_dmg_command() + self.ranks = self.params.get("rank_list", '/run/test_ranks/*') + self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') + self.ior_test_sequence = self.params.get( + "ior_test_sequence", '/run/ior/iorflags/*') + # Recreate the client hostfile without slots defined + self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) + + def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0, multiple_ranks=False): + """Run the offline drain without data. + + Args: + num_pool (int) : total pools to create for testing purposes. + data (bool) : whether pool has no data or to create some data in pool. + Defaults to False. + oclass (str): DAOS object class (eg: RP_2G1,etc) + pool_fillup (int): Fill up the pool to a desired percent value. + multiple_ranks (bool) : Perform multiple ranks testing (Default: False) + """ + # Create a pool + pool = {} + target_list = [] + + if oclass is None: + oclass = self.ior_cmd.dfs_oclass.value + + # For testing multiple ranks as dmg parameters, use a list of ranks. + if multiple_ranks is True: + self.ranks = self.multiple_ranks + + # Exclude target : random two targets (target idx : 0-7) + exc = random.randint(0, 6) # nosec + target_list.append(exc) + target_list.append(exc + 1) + t_string = "{},{}".format(target_list[0], target_list[1]) + + for val in range(0, num_pool): + pool[val] = add_pool(self, connect=False) + self.pool = pool[val] + self.pool.set_property("reclaim", "disabled") + test_seq = self.ior_test_sequence[0] + + if data: + # if pool_fillup is greater than 0, then + # use start_ior_load method from nvme_utils.py. + # Otherwise, use the osa_utils.py run_ior_thread + # method. + if pool_fillup > 0: + self.ior_cmd.dfs_oclass.update(oclass) + self.ior_cmd.dfs_dir_oclass.update(oclass) + self.ior_default_flags = self.ior_w_flags + self.log.info(self.pool.pool_percentage_used()) + self.start_ior_load(storage='NVMe', operation="Auto_Write", percent=pool_fillup) + self.log.info(self.pool.pool_percentage_used()) + else: + self.run_ior_thread("Write", oclass, test_seq) + self.run_mdtest_thread(oclass) + if self.test_with_snapshot is True: + # Create a snapshot of the container + # after IOR job completes. + self.container.create_snap() + self.log.info("Created container snapshot: %s", self.container.epoch) + if self.test_during_aggregation is True: + self.run_ior_thread("Write", oclass, test_seq) + + # Drain ranks and targets + for val in range(0, num_pool): + # Drain ranks provided in YAML file + for index, rank in enumerate(self.ranks): + self.pool = pool[val] + # If we are testing using multiple pools, reintegrate + # the rank back and then drain. + self.pool.display_pool_daos_space("Pool space: Beginning") + # Get initial total free space (scm+nvme) + initial_total_space = self.pool.get_total_space(refresh=True) + pver_begin = self.pool.get_version(True) + self.log.info("Pool Version at the beginning %s", pver_begin) + if self.test_during_aggregation is True and index == 0: + self.pool.set_property("reclaim", "time") + self.delete_extra_container(self.pool) + self.simple_osa_reintegrate_loop(rank=rank, action="drain") + if (self.test_during_rebuild is True and val == 0): + # Exclude rank 3 + self.pool.exclude([3]) + self.pool.wait_for_rebuild_to_start() + # If the pool is filled up just drain only a single rank. + if pool_fillup > 0 and index > 0: + continue + output = self.pool.drain(rank, t_string) + self.print_and_assert_on_rebuild_failure(output) + total_space_after_drain = self.pool.get_total_space(refresh=True) + + pver_drain = self.pool.get_version(True) + self.log.info("Pool Version after drain %d", pver_drain) + # Check pool version incremented after pool drain + self.assertGreater(pver_drain, (pver_begin + 1), + "Pool Version Error: After drain") + if self.test_during_aggregation is False: + self.assertGreater(initial_total_space, total_space_after_drain, + "Expected total space after drain is more than initial") + if num_pool > 1: + output = self.pool.reintegrate(rank, t_string) + self.print_and_assert_on_rebuild_failure(output) + total_space_after_reintegration = self.pool.get_total_space(refresh=True) + self.assertGreater( + total_space_after_reintegration, total_space_after_drain, + "Expected total space after reintegration is less than drain") + if (self.test_during_rebuild is True and val == 0): + # Reintegrate rank 3 + output = self.pool.reintegrate("3") + self.print_and_assert_on_rebuild_failure(output) + total_space_after_reintegration = self.pool.get_total_space(refresh=True) + self.assertGreater( + total_space_after_reintegration, total_space_after_drain, + "Expected total space after reintegration is less than drain") + + for val in range(0, num_pool): + display_string = "Pool{} space at the End".format(val) + pool[val].display_pool_daos_space(display_string) + if data: + if pool_fillup > 0: + self.start_ior_load(storage='NVMe', operation='Auto_Read', percent=pool_fillup) + else: + self.run_ior_thread("Read", oclass, test_seq) + self.run_mdtest_thread(oclass) + self.container = self.pool_cont_dict[self.pool][0] + self.container.daos.env['UCX_LOG_LEVEL'] = 'error' + self.container.check() + + def test_osa_offline_drain(self): + """JIRA ID: DAOS-4750. + + Test Description: Validate Offline Drain + + :avocado: tags=all,pr,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum,ior + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain + """ + self.log.info("Offline Drain : Basic Drain") + self.run_offline_drain_test(1, True) + + def test_osa_offline_drain_without_checksum(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain without enabling checksum in container properties. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_without_checksum + """ + self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") + self.log.info("Offline Drain : Without Checksum") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_during_aggregation(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain during aggregation + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_aggregation + """ + self.test_during_aggregation = self.params.get( + "test_with_aggregation", "/run/aggregation/*") + self.log.info("Offline Drain : During Aggregation") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_oclass(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain with different object class + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_oclass + """ + self.test_with_checksum = self.params.get("test_with_checksum", "/run/checksum/*") + self.log.info("Offline Drain : Oclass") + for oclass in self.test_oclass: + self.run_offline_drain_test(1, data=True, oclass=oclass) + + def test_osa_offline_drain_multiple_pools(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain with multiple pools + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_multiple_pools + """ + self.log.info("Offline Drain : Multiple Pools") + self.run_offline_drain_test(2, data=True) + + def test_osa_offline_drain_during_rebuild(self): + """Test ID: DAOS-7159. + + Test Description: Validate Offline Drain during rebuild + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,rebuild + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_during_rebuild + """ + self.test_during_rebuild = self.params.get("test_with_rebuild", "/run/rebuild/*") + self.log.info("Offline Drain : During Rebuild") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_after_snapshot(self): + """Test ID: DAOS-8057. + + Test Description: Validate Offline Drain after taking snapshot. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,checksum + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_after_snapshot + """ + self.test_with_snapshot = self.params.get("test_with_snapshot", "/run/snapshot/*") + self.log.info("Offline Drain : After taking snapshot") + self.run_offline_drain_test(1, data=True) + + def test_osa_offline_drain_with_less_pool_space(self): + """Test ID: DAOS-7160. + + Test Description: Drain rank after with less pool space. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_less_pool_space + """ + self.log.info("Offline Drain : Test with less pool space") + oclass = self.params.get("pool_test_oclass", '/run/pool_capacity/*') + pool_fillup = self.params.get("pool_fillup", '/run/pool_capacity/*') + self.run_offline_drain_test(1, data=True, oclass=oclass, pool_fillup=pool_fillup) + + def test_osa_offline_drain_with_multiple_ranks(self): + """Test ID: DAOS-4753. + + Test Description: Drain multiple ranks at the same time. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full + :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_multiple_ranks + """ + self.log.info("Offline Drain : Test with mutiple ranks") + self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') + self.run_offline_drain_test(1, data=True, multiple_ranks=True) diff --git a/src/tests/ftest/osa/offline_drain.yaml b/src/tests/ftest/osa/offline_drain.yaml index 03784a29608..bb0b06a03a2 100644 --- a/src/tests/ftest/osa/offline_drain.yaml +++ b/src/tests/ftest/osa/offline_drain.yaml @@ -1,111 +1,109 @@ -hosts: - test_servers: 3 - test_clients: 1 -timeout: 2400 -setup: - start_servers_once: false -skip_add_log_msg: true -server_config: - name: daos_server - engines_per_host: 2 - engines: - 0: - pinned_numa_node: 0 - nr_xs_helpers: 1 - fabric_iface: ib0 - fabric_iface_port: 31416 - log_file: daos_server0.log - log_mask: INFO,MEM=ERR - env_vars: - - DD_MASK=mgmt,md - storage: auto - 1: - pinned_numa_node: 1 - nr_xs_helpers: 1 - fabric_iface: ib1 - fabric_iface_port: 31516 - log_file: daos_server1.log - log_mask: INFO,MEM=ERR - env_vars: - - DD_MASK=mgmt,md - storage: auto -pool: - scm_size: 12000000000 - nvme_size: 108000000000 - svcn: 4 - rebuild_timeout: 240 - properties: scrub:timed -container: - type: POSIX - control_method: daos - oclass: RP_3G6 - properties: cksum:crc64,cksum_size:16384,srv_cksum:on,rd_fac:2 -dkeys: - single: - no_of_dkeys: - - 50 -akeys: - single: - no_of_akeys: - - 10 -record: - 1KB: - length: - - 1024 -ior: - clientslots: - slots: 48 - test_file: /testFile - repetitions: 1 - dfs_destroy: false - iorflags: - write_flags: "-w -F -k -G 1" - read_flags: "-F -r -R -k -G 1" - api: DFS - dfs_oclass: RP_3G6 - dfs_dir_oclass: RP_3G6 - ior_test_sequence: - # - [scmsize, nvmesize, transfersize, blocksize] - # The values are set to be in the multiples of 10. - # Values are appx GB. - - [12000000000, 108000000000, 500000, 500000000] -mdtest: - api: DFS - client_processes: - np: 30 - num_of_files_dirs: 4067 # creating total of 120K files - test_dir: "/" - iteration: 1 - dfs_destroy: false - dfs_oclass: RP_3G6 - dfs_dir_oclass: RP_3G6 - manager: "MPICH" - flags: "-u" - wr_size: - 32K: - write_bytes: 32768 - read_bytes: 32768 - verbosity_value: 1 - depth: 0 -test_obj_class: - oclass: - - RP_2G8 - - RP_4G1 - - EC_2P1G1 -aggregation: - test_with_aggregation: true -rebuild: - test_with_rebuild: true -checksum: - test_with_checksum: false -snapshot: - test_with_snapshot: true -mutliple_ranks: - # For multiple ranks - test_with_multiple_ranks: true - rank_list: ["1, 2"] -test_ranks: - rank_list: ["2", "5"] -pool_capacity: - pool_fillup: 10 - pool_test_oclass: RP_2GX +hosts: + test_servers: 3 + test_clients: 1 +timeout: 2400 +setup: + start_servers_once: false +skip_add_log_msg: true +server_config: + name: daos_server + engines_per_host: 2 + engines: + 0: + pinned_numa_node: 0 + nr_xs_helpers: 1 + fabric_iface: ib0 + fabric_iface_port: 31416 + log_file: daos_server0.log + log_mask: INFO,MEM=ERR + env_vars: + - DD_MASK=mgmt,md + storage: auto + 1: + pinned_numa_node: 1 + nr_xs_helpers: 1 + fabric_iface: ib1 + fabric_iface_port: 31516 + log_file: daos_server1.log + log_mask: INFO,MEM=ERR + env_vars: + - DD_MASK=mgmt,md + storage: auto +pool: + scm_size: 12000000000 + nvme_size: 108000000000 + svcn: 4 + rebuild_timeout: 240 + properties: scrub:timed +container: + type: POSIX + control_method: daos + oclass: RP_3G6 + properties: cksum:crc64,cksum_size:16384,srv_cksum:on,rd_fac:2 +dkeys: + single: + no_of_dkeys: + - 50 +akeys: + single: + no_of_akeys: + - 10 +record: + 1KB: + length: + - 1024 +ior: + clientslots: + slots: 48 + test_file: /testFile + repetitions: 1 + dfs_destroy: false + iorflags: + write_flags: "-w -F -k -G 1" + read_flags: "-F -r -R -k -G 1" + api: DFS + dfs_oclass: RP_3G6 + dfs_dir_oclass: RP_3G6 + ior_test_sequence: + # - [scmsize, nvmesize, transfersize, blocksize] + # The values are set to be in the multiples of 10. + # Values are appx GB. + - [12000000000, 108000000000, 500000, 500000000] +mdtest: + api: DFS + client_processes: + np: 30 + num_of_files_dirs: 4067 # creating total of 120K files + test_dir: "/" + iteration: 1 + dfs_destroy: false + dfs_oclass: RP_3G6 + dfs_dir_oclass: RP_3G6 + manager: "MPICH" + flags: "-u" + wr_size: + 32K: + write_bytes: 32768 + read_bytes: 32768 + verbosity_value: 1 + depth: 0 +test_obj_class: + oclass: + - RP_2G8 + - RP_4G1 + - EC_2P1G1 +aggregation: + test_with_aggregation: true +rebuild: + test_with_rebuild: true +checksum: + test_with_checksum: false +snapshot: + test_with_snapshot: true +mutliple_ranks: + rank_list: ["1, 2"] +test_ranks: + rank_list: ["2", "5"] +pool_capacity: + pool_fillup: 10 + pool_test_oclass: RP_2GX diff --git a/src/tests/ftest/osa/online_drain.py b/src/tests/ftest/osa/online_drain.py index 1ee7a7e8a90..05286d71b9f 100644 --- a/src/tests/ftest/osa/online_drain.py +++ b/src/tests/ftest/osa/online_drain.py @@ -31,15 +31,15 @@ def setUp(self): self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) self.dmg_command.exit_status_exception = True self.pool = None - self.multiple_ranks = None - def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"): + def run_online_drain_test(self, num_pool, oclass=None, app_name="ior", multiple_ranks=False): """Run the Online drain without data. Args: num_pool (int) : total pools to create for testing purposes. oclass (str) : Object class type (RP_2G1, etc) app_name (str) : application to run on parallel (ior or mdtest). Defaults to ior. + multiple_ranks (bool): Perform multiple ranks testing (Default: False) """ # Create a pool pool = {} @@ -54,7 +54,7 @@ def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"): # Drain one of the ranks (or server) rank = self.random.choice(list(self.server_managers[0].ranks.keys())) # For testing with multiple ranks as dmg parameters, use a list of ranks. - if self.test_with_multiple_ranks is True: + if multiple_ranks is True: rank = self.multiple_ranks for val in range(0, num_pool): @@ -203,7 +203,5 @@ def test_osa_online_drain_with_multiple_ranks(self): :avocado: tags=OSAOnlineDrain,test_osa_online_drain_with_multiple_ranks """ self.log.info("Online Drain : Test with mutiple ranks") - self.test_with_multiple_ranks = self.params.get("test_with_multiple_ranks", - '/run/multiple_ranks/*') self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') - self.run_online_drain_test(1, data=True) \ No newline at end of file + self.run_online_drain_test(1, data=True, multiple_ranks=True) \ No newline at end of file diff --git a/src/tests/ftest/osa/online_drain.yaml b/src/tests/ftest/osa/online_drain.yaml index ed2a9f2ee49..911d257c950 100644 --- a/src/tests/ftest/osa/online_drain.yaml +++ b/src/tests/ftest/osa/online_drain.yaml @@ -91,5 +91,4 @@ rebuild: checksum: test_with_checksum: false mutliple_ranks: - test_with_multiple_ranks: true rank_list: ["1, 2"] \ No newline at end of file diff --git a/src/tests/ftest/util/osa_utils.py b/src/tests/ftest/util/osa_utils.py index 54d526509fa..6db4c3ef39d 100644 --- a/src/tests/ftest/util/osa_utils.py +++ b/src/tests/ftest/util/osa_utils.py @@ -52,7 +52,6 @@ def setUp(self): self.test_with_rf = False self.test_with_blank_node = False self.test_with_snapshot = False - self.test_with_multiple_ranks = False @fail_on(CommandFailure) def assert_on_rebuild_failure(self): From 2165436f71399e78f0836de7b40276f2380d8aa0 Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 15:59:38 -0500 Subject: [PATCH 6/8] DAOS-4753 test: Update osa_utils.py format. Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/util/osa_utils.py | 890 +++++++++++++++--------------- 1 file changed, 445 insertions(+), 445 deletions(-) diff --git a/src/tests/ftest/util/osa_utils.py b/src/tests/ftest/util/osa_utils.py index 6db4c3ef39d..5f66c4ca6a3 100644 --- a/src/tests/ftest/util/osa_utils.py +++ b/src/tests/ftest/util/osa_utils.py @@ -1,445 +1,445 @@ -""" - (C) Copyright 2020-2024 Intel Corporation. - (C) Copyright 2025 Hewlett Packard Enterprise Development LP - - SPDX-License-Identifier: BSD-2-Clause-Patent -""" -import queue -import re -import threading -import time - -from avocado import fail_on -from exception_utils import CommandFailure -from general_utils import run_command -from ior_test_base import IorTestBase -from mdtest_test_base import MdtestBase - - -class OSAUtils(MdtestBase, IorTestBase): - """Test Class Description: This test runs daos_server offline drain test cases. - - :avocado: recursive - """ - - def setUp(self): - """Set up for test case.""" - super().setUp() - self.pool_cont_dict = {} - self.container = None - self.obj = None - self.ioreq = None - self.dmg_command = self.get_dmg_command() - self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*', - default=[0])[0] - self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*', - default=[0])[0] - self.record_length = self.params.get("length", '/run/record/*', - default=[0])[0] - self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*', - default="") - self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*') - self.server_count = len(self.hostlist_servers) - self.engine_count = self.server_managers[0].get_config_value( - "engines_per_host") - self.out_queue = queue.Queue() - self.dmg_command.exit_status_exception = False - self.test_during_aggregation = False - self.test_during_rebuild = False - self.test_with_checksum = True - # By default, test_with_rf is set to False. - # It is up to individual test to enable it. - self.test_with_rf = False - self.test_with_blank_node = False - self.test_with_snapshot = False - - @fail_on(CommandFailure) - def assert_on_rebuild_failure(self): - """If the rebuild is not successful, raise assert.""" - rebuild_status = self.pool.get_rebuild_status(True) - self.log.info("Rebuild Status: %s", rebuild_status) - if rebuild_status in ["failed", "scanning", "aborted", "busy"]: - self.fail("Rebuild failed") - - @fail_on(CommandFailure) - def print_and_assert_on_rebuild_failure(self, out, timeout=3): - """Print the out value (daos, dmg, etc) and check for rebuild completion. - - If rebuild does not complete, raise an assertion. - """ - self.log.info(out) - self.pool.wait_for_rebuild_to_start() - self.pool.wait_for_rebuild_to_end(timeout) - self.assert_on_rebuild_failure() - - @fail_on(CommandFailure) - def get_ipaddr_for_rank(self, rank=None): - """Obtain the IPAddress and port number for a particular server rank. - - Args: - rank (int): daos_engine rank. Defaults to None. - - Returns: - ip_addr (str) : IPAddress for the rank. - port_num (str) : Port number for the rank. - """ - output = self.dmg_command.system_query() - members_length = self.server_count * self.engine_count - for index in range(0, members_length): - if rank == int(output["response"]["members"][index]["rank"]): - temp = output["response"]["members"][index]["addr"] - ip_addr = temp.split(":") - temp = output["response"]["members"][index]["fabric_uri"] - port_num = temp.split(":") - return ip_addr[0], port_num[2] - return None, None - - @fail_on(CommandFailure) - def remove_pool_dir(self, ip_addr=None, port_num=None): - """Remove the /mnt/daos[x]//vos-* directory. - - Args: - ip_addr (str): IP address of the daos server. Defaults to None. - port_number (str) : Port number the daos server. - """ - # Create the expected port list - # expected_ports = [port0] - Single engine/server - # expected_ports = [port0, port1] - Two engine/server - expected_ports = [engine_param.get_value("fabric_iface_port") - for engine_param in self.server_managers[-1]. - manager.job.yaml.engine_params] - self.log.info("Expected ports : %s", expected_ports) - if ip_addr is None or port_num is None: - self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num) - self.fail("No IP Address or Port number provided") - else: - if self.engine_count == 1: - self.log.info("Single Engine per Server") - cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ - sudo rm -rf /mnt/daos/{}/vos-*". \ - format(ip_addr, self.pool.uuid) - elif self.engine_count == 2: - if port_num == str(expected_ports[0]): - port_val = 0 - elif port_num == str(expected_ports[1]): - port_val = 1 - else: - port_val = None # To appease pylint - self.log.info("port_number: %s", port_num) - self.fail("Invalid port number") - cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ - sudo rm -rf /mnt/daos{}/{}/vos-*". \ - format(ip_addr, port_val, self.pool.uuid) - else: - cmd = None # To appease pylint - self.fail("Not supported engine per server configuration") - run_command(cmd) - - def set_container(self, container): - """Set the OSA utils container object. - - Args: - container (TestContainer): Container object to be used within OSA utils. - """ - self.container = container - - def simple_osa_reintegrate_loop(self, rank, action="exclude", loop_time=100): - """Exclude or drain and reintegrate a rank for a certain amount of time. - - Args: - rank (int): daos server rank. - action (str, optional): "exclude" or "drain". Defaults to "exclude" - loop_time (int, optional): Total time to perform drain/reintegrate operation in a loop. - Defaults to 100. - """ - start_time = 0 - finish_time = 0 - start_time = time.time() - while int(finish_time - start_time) < loop_time: - if action == "exclude": - output = self.pool.exclude(rank) - else: - output = self.pool.drain(rank) - self.print_and_assert_on_rebuild_failure(output) - output = self.pool.reintegrate(rank) - self.print_and_assert_on_rebuild_failure(output) - finish_time = time.time() - - def prepare_cont_ior_write_read(self, oclass, flags): - """Prepare the containers for IOR write and read invocations. - - To enable aggregation: - - Create two containers and read always from first container - Normal usage (use only a single container): - - Create a single container and use the same. - - Args: - oclass (str): IOR object class - flags (str): IOR flags - """ - self.log.info(self.pool_cont_dict) - # If pool is not in the dictionary, - # initialize its container list to None - # {pool : [None, None], [None, None]} - if self.pool not in self.pool_cont_dict: - self.pool_cont_dict[self.pool] = [None] * 4 - # Create container if the pool doesn't have one. - # Otherwise, use the existing container in the pool. - # pool_cont_dict {pool A: [containerA, Updated, - # containerB, Updated], - # pool B : containerA, Updated, - # containerB, None]} - if self.pool_cont_dict[self.pool][0] is None: - self.add_container(self.pool, create=False) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - self.container.create() - self.pool_cont_dict[self.pool][0] = self.container - self.pool_cont_dict[self.pool][1] = "Updated" - else: - if ((self.test_during_aggregation is True) - and (self.pool_cont_dict[self.pool][1] == "Updated") - and (self.pool_cont_dict[self.pool][3] is None) - and ("-w" in flags)): - # Write to the second container - self.add_container(self.pool, create=False) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - self.container.create() - self.pool_cont_dict[self.pool][2] = self.container - self.pool_cont_dict[self.pool][3] = "Updated" - else: - self.container = self.pool_cont_dict[self.pool][0] - - def delete_extra_container(self, pool): - """Delete the extra container in the pool. - - Refer prepare_cont_ior_write_read. This method should be called when OSA tests intend to - enable aggregation. - - Args: - pool (TestPool): pool object - """ - self.pool.set_property("reclaim", "time") - extra_container = self.pool_cont_dict[pool][2] - extra_container.destroy() - self.pool_cont_dict[pool][3] = None - - def get_object_replica_value(self, oclass): - """Get the object replica value for an object class. - - Args: - oclass (str): Object Class (eg: RP_2G1,etc) - - Returns: - int: Object replica value - - """ - value = 0 - if "_" in oclass: - replica_list = oclass.split("_") - value = replica_list[1][0] - else: - self.log.info("Wrong Object Class. Cannot split") - return int(value) - - def update_cont_properties(self, cont_prop): - """Update the existing container properties. - - Args: - cont_prop (str): Replace existing container properties with new value - """ - self.container.properties.value = cont_prop - - def set_cont_class_properties(self, oclass="S1"): - """Update the container class to match the IOR/Mdtest object class. - - Fix the rf factor based on object replica value. - Also, remove the redundancy factor for S type object class. - - Args: - oclass (str, optional): Container object class to be set. Defaults to "S1". - """ - self.container.oclass.value = oclass - # Set the container properties properly for S!, S2 class. - # rf should not be set to 1 for S type object class. - match = re.search("^S\\d$", oclass) - prop = self.container.properties.value - if match is not None: - prop = prop.replace("rd_fac:1", "rd_fac:0") - else: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - prop = prop.replace("rd_fac:1", rf_value) - self.container.properties.value = prop - # Over-write oclass settings if using redundancy factor - # and self.test_with_rf is True. - # This has to be done so that container created doesn't - # use the object class. - if self.test_with_rf is True and \ - "rf" in self.container.properties.value: - self.log.info( - "Detected container redundancy factor: %s", - self.container.properties.value) - self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") - self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") - self.container.oclass.update(None) - - def assert_on_exception(self, out_queue=None): - """Assert on exception while executing an application. - - Args: - out_queue (queue): Check whether the queue is empty. If empty, app (ior, mdtest) didn't - encounter error. - """ - if out_queue is None: - out_queue = self.out_queue - if out_queue.empty(): - pass - else: - exc = out_queue.get(block=False) - out_queue.put(exc) - raise CommandFailure(exc) - - def cleanup_queue(self, out_queue=None): - """Cleanup the existing thread queue. - - Args: - out_queue (queue): Queue to cleanup. - """ - if out_queue is None: - out_queue = self.out_queue - while not out_queue.empty(): - out_queue.get(block=True) - - def run_ior_thread(self, action, oclass, test, single_cont_read=True, fail_on_warning=True, - pool=None): - """Start the IOR thread for either writing or reading data to/from a container. - - Args: - action (str): Start the IOR thread with Read or Write - oclass (str): IOR object class - test (list): IOR test sequence - flags (str): IOR flags - single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. - fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. - pool (TestPool, optional): Pool to run ior on. Defaults to None. - - """ - # Intermediate (between correct and hack) implementation for allowing a - # pool to be passed in. Needs to be fixed by making the pool argument - # required. - if pool is None: - pool = self.pool - - self.cleanup_queue() - if action == "Write": - flags = self.ior_w_flags - else: - flags = self.ior_r_flags - - # Add a thread for these IOR arguments - process = threading.Thread(target=self.ior_thread, - kwargs={"pool": pool, - "oclass": oclass, - "test": test, - "flags": flags, - "single_cont_read": - single_cont_read, - "fail_on_warning": - fail_on_warning}) - # Launch the IOR thread - process.start() - # Wait for the thread to finish - process.join() - if fail_on_warning and not self.out_queue.empty(): - self.assert_on_exception() - - def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_warning=True): - """Start an IOR thread. - - Args: - pool (object): pool handle - oclass (str): IOR object class, container class. - test (list): IOR test sequence - flags (str): IOR flags - single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. - fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. - """ - self.cleanup_queue() - self.pool = pool - self.ior_cmd.get_params(self) - self.ior_cmd.set_daos_params(self.pool, None) - self.log.info("Redundancy Factor : %s", self.test_with_rf) - self.ior_cmd.dfs_oclass.update(oclass) - self.ior_cmd.dfs_dir_oclass.update(oclass) - if single_cont_read is True: - # Prepare the containers created and use in a specific - # way defined in prepare_cont_ior_write. - self.prepare_cont_ior_write_read(oclass, flags) - elif single_cont_read is False and self.container is not None: - # Here self.container is having actual value. Just use it. - self.log.info(self.container) - else: - self.fail("Not supported option on ior_thread") - try: - job_manager = self.get_ior_job_manager_command() - except CommandFailure as err_msg: - self.out_queue.put(err_msg) - self.assert_on_exception() - job_manager.job.dfs_cont.update(self.container.identifier) - self.ior_cmd.transfer_size.update(test[2]) - self.ior_cmd.block_size.update(test[3]) - self.ior_cmd.flags.update(flags) - # Update oclass settings if using redundancy factor - # and self.test_with_rf is True. - if self.test_with_rf is True and "rf" in self.container.properties.value: - self.log.info( - "Detected container redundancy factor: %s", self.container.properties.value) - self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") - self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") - # Run run_ior_with_pool without invoking the pool query method for - # displaying pool space information (display_space=False) - self.run_ior_with_pool(create_pool=False, create_cont=False, - fail_on_warning=fail_on_warning, - display_space=False, - out_queue=self.out_queue) - if fail_on_warning and not self.out_queue.empty(): - self.assert_on_exception() - - def run_mdtest_thread(self, oclass="RP_2G1"): - """Start mdtest thread and wait until thread completes. - - Args: - oclass (str): IOR object class, container class. - """ - # Create container only - self.mdtest_cmd.dfs_destroy.update(False) - create_container = 0 - if self.container is None: - self.add_container(self.pool, create=False) - create_container = 1 - self.mdtest_cmd.dfs_oclass.update(oclass) - self.set_cont_class_properties(oclass) - if self.test_with_checksum is False: - tmp = self.get_object_replica_value(oclass) - rf_value = "rd_fac:{}".format(tmp - 1) - self.update_cont_properties(rf_value) - if create_container == 1: - self.container.create() - job_manager = self.get_mdtest_job_manager_command(self.manager) - job_manager.job.dfs_cont.update(self.container.identifier) - # Add a thread for these IOR arguments - process = threading.Thread(target=self.execute_mdtest) - # Launch the MDtest thread - process.start() - # Wait for the thread to finish - process.join() - if not self.out_queue.empty(): - self.assert_on_exception() +""" + (C) Copyright 2020-2024 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +import queue +import re +import threading +import time + +from avocado import fail_on +from exception_utils import CommandFailure +from general_utils import run_command +from ior_test_base import IorTestBase +from mdtest_test_base import MdtestBase + + +class OSAUtils(MdtestBase, IorTestBase): + """Test Class Description: This test runs daos_server offline drain test cases. + + :avocado: recursive + """ + + def setUp(self): + """Set up for test case.""" + super().setUp() + self.pool_cont_dict = {} + self.container = None + self.obj = None + self.ioreq = None + self.dmg_command = self.get_dmg_command() + self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*', + default=[0])[0] + self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*', + default=[0])[0] + self.record_length = self.params.get("length", '/run/record/*', + default=[0])[0] + self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*', + default="") + self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*') + self.server_count = len(self.hostlist_servers) + self.engine_count = self.server_managers[0].get_config_value( + "engines_per_host") + self.out_queue = queue.Queue() + self.dmg_command.exit_status_exception = False + self.test_during_aggregation = False + self.test_during_rebuild = False + self.test_with_checksum = True + # By default, test_with_rf is set to False. + # It is up to individual test to enable it. + self.test_with_rf = False + self.test_with_blank_node = False + self.test_with_snapshot = False + + @fail_on(CommandFailure) + def assert_on_rebuild_failure(self): + """If the rebuild is not successful, raise assert.""" + rebuild_status = self.pool.get_rebuild_status(True) + self.log.info("Rebuild Status: %s", rebuild_status) + if rebuild_status in ["failed", "scanning", "aborted", "busy"]: + self.fail("Rebuild failed") + + @fail_on(CommandFailure) + def print_and_assert_on_rebuild_failure(self, out, timeout=3): + """Print the out value (daos, dmg, etc) and check for rebuild completion. + + If rebuild does not complete, raise an assertion. + """ + self.log.info(out) + self.pool.wait_for_rebuild_to_start() + self.pool.wait_for_rebuild_to_end(timeout) + self.assert_on_rebuild_failure() + + @fail_on(CommandFailure) + def get_ipaddr_for_rank(self, rank=None): + """Obtain the IPAddress and port number for a particular server rank. + + Args: + rank (int): daos_engine rank. Defaults to None. + + Returns: + ip_addr (str) : IPAddress for the rank. + port_num (str) : Port number for the rank. + """ + output = self.dmg_command.system_query() + members_length = self.server_count * self.engine_count + for index in range(0, members_length): + if rank == int(output["response"]["members"][index]["rank"]): + temp = output["response"]["members"][index]["addr"] + ip_addr = temp.split(":") + temp = output["response"]["members"][index]["fabric_uri"] + port_num = temp.split(":") + return ip_addr[0], port_num[2] + return None, None + + @fail_on(CommandFailure) + def remove_pool_dir(self, ip_addr=None, port_num=None): + """Remove the /mnt/daos[x]//vos-* directory. + + Args: + ip_addr (str): IP address of the daos server. Defaults to None. + port_number (str) : Port number the daos server. + """ + # Create the expected port list + # expected_ports = [port0] - Single engine/server + # expected_ports = [port0, port1] - Two engine/server + expected_ports = [engine_param.get_value("fabric_iface_port") + for engine_param in self.server_managers[-1]. + manager.job.yaml.engine_params] + self.log.info("Expected ports : %s", expected_ports) + if ip_addr is None or port_num is None: + self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num) + self.fail("No IP Address or Port number provided") + else: + if self.engine_count == 1: + self.log.info("Single Engine per Server") + cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ + sudo rm -rf /mnt/daos/{}/vos-*". \ + format(ip_addr, self.pool.uuid) + elif self.engine_count == 2: + if port_num == str(expected_ports[0]): + port_val = 0 + elif port_num == str(expected_ports[1]): + port_val = 1 + else: + port_val = None # To appease pylint + self.log.info("port_number: %s", port_num) + self.fail("Invalid port number") + cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \ + sudo rm -rf /mnt/daos{}/{}/vos-*". \ + format(ip_addr, port_val, self.pool.uuid) + else: + cmd = None # To appease pylint + self.fail("Not supported engine per server configuration") + run_command(cmd) + + def set_container(self, container): + """Set the OSA utils container object. + + Args: + container (TestContainer): Container object to be used within OSA utils. + """ + self.container = container + + def simple_osa_reintegrate_loop(self, rank, action="exclude", loop_time=100): + """Exclude or drain and reintegrate a rank for a certain amount of time. + + Args: + rank (int): daos server rank. + action (str, optional): "exclude" or "drain". Defaults to "exclude" + loop_time (int, optional): Total time to perform drain/reintegrate operation in a loop. + Defaults to 100. + """ + start_time = 0 + finish_time = 0 + start_time = time.time() + while int(finish_time - start_time) < loop_time: + if action == "exclude": + output = self.pool.exclude(rank) + else: + output = self.pool.drain(rank) + self.print_and_assert_on_rebuild_failure(output) + output = self.pool.reintegrate(rank) + self.print_and_assert_on_rebuild_failure(output) + finish_time = time.time() + + def prepare_cont_ior_write_read(self, oclass, flags): + """Prepare the containers for IOR write and read invocations. + + To enable aggregation: + - Create two containers and read always from first container + Normal usage (use only a single container): + - Create a single container and use the same. + + Args: + oclass (str): IOR object class + flags (str): IOR flags + """ + self.log.info(self.pool_cont_dict) + # If pool is not in the dictionary, + # initialize its container list to None + # {pool : [None, None], [None, None]} + if self.pool not in self.pool_cont_dict: + self.pool_cont_dict[self.pool] = [None] * 4 + # Create container if the pool doesn't have one. + # Otherwise, use the existing container in the pool. + # pool_cont_dict {pool A: [containerA, Updated, + # containerB, Updated], + # pool B : containerA, Updated, + # containerB, None]} + if self.pool_cont_dict[self.pool][0] is None: + self.add_container(self.pool, create=False) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + self.container.create() + self.pool_cont_dict[self.pool][0] = self.container + self.pool_cont_dict[self.pool][1] = "Updated" + else: + if ((self.test_during_aggregation is True) + and (self.pool_cont_dict[self.pool][1] == "Updated") + and (self.pool_cont_dict[self.pool][3] is None) + and ("-w" in flags)): + # Write to the second container + self.add_container(self.pool, create=False) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + self.container.create() + self.pool_cont_dict[self.pool][2] = self.container + self.pool_cont_dict[self.pool][3] = "Updated" + else: + self.container = self.pool_cont_dict[self.pool][0] + + def delete_extra_container(self, pool): + """Delete the extra container in the pool. + + Refer prepare_cont_ior_write_read. This method should be called when OSA tests intend to + enable aggregation. + + Args: + pool (TestPool): pool object + """ + self.pool.set_property("reclaim", "time") + extra_container = self.pool_cont_dict[pool][2] + extra_container.destroy() + self.pool_cont_dict[pool][3] = None + + def get_object_replica_value(self, oclass): + """Get the object replica value for an object class. + + Args: + oclass (str): Object Class (eg: RP_2G1,etc) + + Returns: + int: Object replica value + + """ + value = 0 + if "_" in oclass: + replica_list = oclass.split("_") + value = replica_list[1][0] + else: + self.log.info("Wrong Object Class. Cannot split") + return int(value) + + def update_cont_properties(self, cont_prop): + """Update the existing container properties. + + Args: + cont_prop (str): Replace existing container properties with new value + """ + self.container.properties.value = cont_prop + + def set_cont_class_properties(self, oclass="S1"): + """Update the container class to match the IOR/Mdtest object class. + + Fix the rf factor based on object replica value. + Also, remove the redundancy factor for S type object class. + + Args: + oclass (str, optional): Container object class to be set. Defaults to "S1". + """ + self.container.oclass.value = oclass + # Set the container properties properly for S!, S2 class. + # rf should not be set to 1 for S type object class. + match = re.search("^S\\d$", oclass) + prop = self.container.properties.value + if match is not None: + prop = prop.replace("rd_fac:1", "rd_fac:0") + else: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + prop = prop.replace("rd_fac:1", rf_value) + self.container.properties.value = prop + # Over-write oclass settings if using redundancy factor + # and self.test_with_rf is True. + # This has to be done so that container created doesn't + # use the object class. + if self.test_with_rf is True and \ + "rf" in self.container.properties.value: + self.log.info( + "Detected container redundancy factor: %s", + self.container.properties.value) + self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") + self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") + self.container.oclass.update(None) + + def assert_on_exception(self, out_queue=None): + """Assert on exception while executing an application. + + Args: + out_queue (queue): Check whether the queue is empty. If empty, app (ior, mdtest) didn't + encounter error. + """ + if out_queue is None: + out_queue = self.out_queue + if out_queue.empty(): + pass + else: + exc = out_queue.get(block=False) + out_queue.put(exc) + raise CommandFailure(exc) + + def cleanup_queue(self, out_queue=None): + """Cleanup the existing thread queue. + + Args: + out_queue (queue): Queue to cleanup. + """ + if out_queue is None: + out_queue = self.out_queue + while not out_queue.empty(): + out_queue.get(block=True) + + def run_ior_thread(self, action, oclass, test, single_cont_read=True, fail_on_warning=True, + pool=None): + """Start the IOR thread for either writing or reading data to/from a container. + + Args: + action (str): Start the IOR thread with Read or Write + oclass (str): IOR object class + test (list): IOR test sequence + flags (str): IOR flags + single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. + fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. + pool (TestPool, optional): Pool to run ior on. Defaults to None. + + """ + # Intermediate (between correct and hack) implementation for allowing a + # pool to be passed in. Needs to be fixed by making the pool argument + # required. + if pool is None: + pool = self.pool + + self.cleanup_queue() + if action == "Write": + flags = self.ior_w_flags + else: + flags = self.ior_r_flags + + # Add a thread for these IOR arguments + process = threading.Thread(target=self.ior_thread, + kwargs={"pool": pool, + "oclass": oclass, + "test": test, + "flags": flags, + "single_cont_read": + single_cont_read, + "fail_on_warning": + fail_on_warning}) + # Launch the IOR thread + process.start() + # Wait for the thread to finish + process.join() + if fail_on_warning and not self.out_queue.empty(): + self.assert_on_exception() + + def ior_thread(self, pool, oclass, test, flags, single_cont_read=True, fail_on_warning=True): + """Start an IOR thread. + + Args: + pool (object): pool handle + oclass (str): IOR object class, container class. + test (list): IOR test sequence + flags (str): IOR flags + single_cont_read (bool, optional): Always read from the 1st container. Defaults to True. + fail_on_warning (bool, optional): Test terminates for IOR warnings. Defaults to True. + """ + self.cleanup_queue() + self.pool = pool + self.ior_cmd.get_params(self) + self.ior_cmd.set_daos_params(self.pool, None) + self.log.info("Redundancy Factor : %s", self.test_with_rf) + self.ior_cmd.dfs_oclass.update(oclass) + self.ior_cmd.dfs_dir_oclass.update(oclass) + if single_cont_read is True: + # Prepare the containers created and use in a specific + # way defined in prepare_cont_ior_write. + self.prepare_cont_ior_write_read(oclass, flags) + elif single_cont_read is False and self.container is not None: + # Here self.container is having actual value. Just use it. + self.log.info(self.container) + else: + self.fail("Not supported option on ior_thread") + try: + job_manager = self.get_ior_job_manager_command() + except CommandFailure as err_msg: + self.out_queue.put(err_msg) + self.assert_on_exception() + job_manager.job.dfs_cont.update(self.container.identifier) + self.ior_cmd.transfer_size.update(test[2]) + self.ior_cmd.block_size.update(test[3]) + self.ior_cmd.flags.update(flags) + # Update oclass settings if using redundancy factor + # and self.test_with_rf is True. + if self.test_with_rf is True and "rf" in self.container.properties.value: + self.log.info( + "Detected container redundancy factor: %s", self.container.properties.value) + self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass") + self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass") + # Run run_ior_with_pool without invoking the pool query method for + # displaying pool space information (display_space=False) + self.run_ior_with_pool(create_pool=False, create_cont=False, + fail_on_warning=fail_on_warning, + display_space=False, + out_queue=self.out_queue) + if fail_on_warning and not self.out_queue.empty(): + self.assert_on_exception() + + def run_mdtest_thread(self, oclass="RP_2G1"): + """Start mdtest thread and wait until thread completes. + + Args: + oclass (str): IOR object class, container class. + """ + # Create container only + self.mdtest_cmd.dfs_destroy.update(False) + create_container = 0 + if self.container is None: + self.add_container(self.pool, create=False) + create_container = 1 + self.mdtest_cmd.dfs_oclass.update(oclass) + self.set_cont_class_properties(oclass) + if self.test_with_checksum is False: + tmp = self.get_object_replica_value(oclass) + rf_value = "rd_fac:{}".format(tmp - 1) + self.update_cont_properties(rf_value) + if create_container == 1: + self.container.create() + job_manager = self.get_mdtest_job_manager_command(self.manager) + job_manager.job.dfs_cont.update(self.container.identifier) + # Add a thread for these IOR arguments + process = threading.Thread(target=self.execute_mdtest) + # Launch the MDtest thread + process.start() + # Wait for the thread to finish + process.join() + if not self.out_queue.empty(): + self.assert_on_exception() From 9f6d40dd05b2c7e2a79e6411b7985f9b43dfec5d Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Fri, 24 Jan 2025 17:19:29 -0500 Subject: [PATCH 7/8] DAOS-4753 test: Fix pylint issues and add reintegration tests. Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_ranks test_osa_online_reintegration_with_multiple_ranks test_osa_offline_reintegrate_with_multiple_ranks Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/osa/offline_drain.py | 9 +++---- src/tests/ftest/osa/offline_reintegration.py | 22 +++++++++++++++-- src/tests/ftest/osa/online_drain.py | 4 ++-- src/tests/ftest/osa/online_drain.yaml | 2 +- src/tests/ftest/osa/online_reintegration.py | 25 +++++++++++++++++--- utils/githooks/pre-commit.d/70-isort.sh | 10 ++++++++ 6 files changed, 60 insertions(+), 12 deletions(-) diff --git a/src/tests/ftest/osa/offline_drain.py b/src/tests/ftest/osa/offline_drain.py index 5094fe18b7a..680a0cc497b 100644 --- a/src/tests/ftest/osa/offline_drain.py +++ b/src/tests/ftest/osa/offline_drain.py @@ -32,7 +32,8 @@ def setUp(self): # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir) - def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0, multiple_ranks=False): + def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup=0, + multiple_ranks=False): """Run the offline drain without data. Args: @@ -49,11 +50,11 @@ def run_offline_drain_test(self, num_pool, data=False, oclass=None, pool_fillup= if oclass is None: oclass = self.ior_cmd.dfs_oclass.value - + # For testing multiple ranks as dmg parameters, use a list of ranks. if multiple_ranks is True: self.ranks = self.multiple_ranks - + # Exclude target : random two targets (target idx : 0-7) exc = random.randint(0, 6) # nosec target_list.append(exc) @@ -276,6 +277,6 @@ def test_osa_offline_drain_with_multiple_ranks(self): :avocado: tags=osa,osa_drain,offline_drain,offline_drain_full :avocado: tags=OSAOfflineDrain,test_osa_offline_drain_with_multiple_ranks """ - self.log.info("Offline Drain : Test with mutiple ranks") + self.log.info("Offline Drain : Test with multiple ranks") self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') self.run_offline_drain_test(1, data=True, multiple_ranks=True) diff --git a/src/tests/ftest/osa/offline_reintegration.py b/src/tests/ftest/osa/offline_reintegration.py index d88a2ee2f9c..184bcdae32d 100644 --- a/src/tests/ftest/osa/offline_reintegration.py +++ b/src/tests/ftest/osa/offline_reintegration.py @@ -1,5 +1,6 @@ """ (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -34,7 +35,7 @@ def setUp(self): self.dmg_command.exit_status_exception = True def run_offline_reintegration_test(self, num_pool, data=False, server_boot=False, oclass=None, - pool_fillup=0): + pool_fillup=0, multiple_ranks=False): # pylint: disable=too-many-branches """Run the offline reintegration without data. @@ -46,6 +47,7 @@ def run_offline_reintegration_test(self, num_pool, data=False, server_boot=False oclass (str) : daos object class string (eg: "RP_2G8") pool_fillup (int) : Percentage of pool filled up with data before performing OSA operations. + multiple_ranks (bool) : Perform multiple ranks testing (Default: False) """ # Create 'num_pool' number of pools pools = [] @@ -82,7 +84,10 @@ def run_offline_reintegration_test(self, num_pool, data=False, server_boot=False self.run_ior_thread("Write", oclass, test_seq) # Exclude ranks 0 and 3 from a random pool - ranks = [0, 3] + if multiple_ranks is True: + ranks = ["0,3"] + else: + ranks = [0, 3] self.pool = random.choice(pools) # nosec for loop in range(0, self.loop_test_cnt): self.log.info( @@ -318,3 +323,16 @@ def test_osa_offline_reintegrate_with_less_pool_space(self): oclass = self.params.get("pool_test_oclass", '/run/pool_capacity/*') pool_fillup = self.params.get("pool_fillup", '/run/pool_capacity/*') self.run_offline_reintegration_test(1, data=True, oclass=oclass, pool_fillup=pool_fillup) + + def test_osa_offline_reintegrate_with_multiple_ranks(self): + """Test ID: DAOS-4753. + + Test Description: Exclude and Reintegrate multiple ranks. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,offline_reintegration + :avocado: tags=OSAOfflineReintegration,test_osa_offline_reintegrate_with_multiple_ranks + """ + self.log.info("Offline Reintegration : Test with multiple ranks") + self.run_offline_drain_test(1, data=True, multiple_ranks=True, multiple_ranks=True) diff --git a/src/tests/ftest/osa/online_drain.py b/src/tests/ftest/osa/online_drain.py index 05286d71b9f..23c3fb6e6cb 100644 --- a/src/tests/ftest/osa/online_drain.py +++ b/src/tests/ftest/osa/online_drain.py @@ -202,6 +202,6 @@ def test_osa_online_drain_with_multiple_ranks(self): :avocado: tags=osa,osa_drain,online_drain :avocado: tags=OSAOnlineDrain,test_osa_online_drain_with_multiple_ranks """ - self.log.info("Online Drain : Test with mutiple ranks") + self.log.info("Online Drain : Test with multiple ranks") self.multiple_ranks = self.params.get("rank_list", '/run/multiple_ranks/*') - self.run_online_drain_test(1, data=True, multiple_ranks=True) \ No newline at end of file + self.run_online_drain_test(1, data=True, multiple_ranks=True) diff --git a/src/tests/ftest/osa/online_drain.yaml b/src/tests/ftest/osa/online_drain.yaml index 911d257c950..c8509438bc5 100644 --- a/src/tests/ftest/osa/online_drain.yaml +++ b/src/tests/ftest/osa/online_drain.yaml @@ -91,4 +91,4 @@ rebuild: checksum: test_with_checksum: false mutliple_ranks: - rank_list: ["1, 2"] \ No newline at end of file + rank_list: ["1, 2"] diff --git a/src/tests/ftest/osa/online_reintegration.py b/src/tests/ftest/osa/online_reintegration.py index 3fcc74e7629..ba4c9e2531a 100644 --- a/src/tests/ftest/osa/online_reintegration.py +++ b/src/tests/ftest/osa/online_reintegration.py @@ -1,5 +1,6 @@ """ (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """ @@ -43,7 +44,8 @@ def daos_racer_thread(self): self.daos_racer.get_params(self) self.daos_racer.run() - def run_online_reintegration_test(self, num_pool, racer=False, server_boot=False, oclass=None): + def run_online_reintegration_test(self, num_pool, racer=False, server_boot=False, oclass=None, + multiple_ranks=False): """Run the Online reintegration without data. Args: @@ -52,6 +54,7 @@ def run_online_reintegration_test(self, num_pool, racer=False, server_boot=False Defaults to False. server_boot (bool) : Perform system stop/start on a rank. Defaults to False. oclass (str) : daos object class string (eg: "RP_2G8"). Defaults to None. + multiple_ranks (bool) : Perform multiple ranks testing (Default: False) """ if oclass is None: oclass = self.ior_cmd.dfs_oclass.value @@ -60,8 +63,11 @@ def run_online_reintegration_test(self, num_pool, racer=False, server_boot=False pool = {} exclude_servers = (len(self.hostlist_servers) * 2) - 1 - # Exclude one rank : other than rank 0. - rank = random.randint(1, exclude_servers) # nosec + if multiple_ranks is True: + rank = random.sample(range(1, exclude_servers), 2) + else: + # Exclude one rank : other than rank 0. + rank = random.randint(1, exclude_servers) # nosec # Start the daos_racer thread if racer is True: @@ -220,3 +226,16 @@ def test_osa_online_reintegration_oclass(self): self.log.info("Online Reintegration : Object Class") for oclass in self.test_oclass: self.run_online_reintegration_test(1, oclass=oclass) + + def test_osa_online_reintegration_with_multiple_ranks(self): + """Test ID: DAOS-4753. + + Test Description: Validate online reintegration with multiple ranks. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=osa,checksum,online_reintegration + :avocado: tags=OSAOnlineReintegration,test_osa_online_reintegration_with_multiple_ranks + """ + self.log.info("Online Reintegration : Multiple ranks") + self.run_online_reintegration_test(1, multiple_ranks=True) diff --git a/utils/githooks/pre-commit.d/70-isort.sh b/utils/githooks/pre-commit.d/70-isort.sh index 986e4642fa9..eb511f61013 100755 --- a/utils/githooks/pre-commit.d/70-isort.sh +++ b/utils/githooks/pre-commit.d/70-isort.sh @@ -21,7 +21,17 @@ fi if ! command -v isort > /dev/null 2>&1; then echo "isort not installed. Install isort command to improve pre-commit checks:" echo " python3 -m pip install -r ./utils/cq/requirements.txt" + . /etc/os-release + if [ "$ID" = "fedora" ]; then + echo " or" + echo " dnf install python3-isort" + fi + exit 0 fi echo "Checking if python imports are sorted" +if ! echo "$py_files" | xargs -r isort --check-only --jobs 8; then + echo " isort check failed, run 'isort --jobs 8 .' to fix." + exit 1 +fi From 6e82a389b7053afe6096a9f8741a6be6cb3cf497 Mon Sep 17 00:00:00 2001 From: rpadma2 Date: Mon, 3 Feb 2025 17:57:51 -0500 Subject: [PATCH 8/8] DAOS-4753 test: Merge with latest changes. Test-tag: test_osa_online_drain_with_multiple_ranks test_osa_offline_drain_with_multiple_rank test_osa_offline_reintegrate_with_multiple_ranks test_osa_online_reintegration_with_multiple_rankss Skip-unit-tests: true Signed-off-by: rpadma2 --- src/tests/ftest/util/osa_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tests/ftest/util/osa_utils.py b/src/tests/ftest/util/osa_utils.py index 5f66c4ca6a3..410b6ce46a2 100644 --- a/src/tests/ftest/util/osa_utils.py +++ b/src/tests/ftest/util/osa_utils.py @@ -1,6 +1,5 @@ """ (C) Copyright 2020-2024 Intel Corporation. - (C) Copyright 2025 Hewlett Packard Enterprise Development LP SPDX-License-Identifier: BSD-2-Clause-Patent """