-
Notifications
You must be signed in to change notification settings - Fork 2
/
handlers.go
684 lines (588 loc) · 21.7 KB
/
handlers.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
package main
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"time"
btcec "github.com/btcsuite/btcd/btcec/v2"
logrus "github.com/sirupsen/logrus"
ecrpc "github.com/ziggie1984/Distributed-Mission-Control-for-LND/ecrpc"
bbolt "go.etcd.io/bbolt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
// PubKeyCompressedSize is the size of a single compressed sec pub key
// in bytes.
PubKeyCompressedSize = 33
// PubKeyCompressedSizeDouble is the size of compressed sec pub keys
// for both the source and destination nodes in the mission control
// data pair.
PubKeyCompressedSizeDouble = PubKeyCompressedSize * 2
// mSatScale is a value that's used to scale satoshis to
// milli-satoshis, and the other way around.
mSatScale int64 = 1000
)
// externalCoordinatorServer provides methods to register and query mission
// control data.
type externalCoordinatorServer struct {
ecrpc.UnimplementedExternalCoordinatorServer
config *Config
db *bbolt.DB
}
// NewExternalCoordinatorServer creates a new instance of
// ExternalCoordinatorServer.
func NewExternalCoordinatorServer(config *Config,
db *bbolt.DB) *externalCoordinatorServer {
return &externalCoordinatorServer{db: db, config: config}
}
// RegisterMissionControl registers mission control data. It processes a
// RegisterMissionControlRequest to aggregate user-provided pair data with
// existing data in the database, removing stale history pairs and storing the
// aggregated data. This method ensures data consistency and enhances
// performance by utilizing batch operations over individual updates.
func (s *externalCoordinatorServer) RegisterMissionControl(ctx context.Context,
req *ecrpc.RegisterMissionControlRequest) (*ecrpc.RegisterMissionControlResponse, error) {
// Validate the request data first.
if err := s.validateRegisterMissionControlRequest(req); err != nil {
return nil, err
}
// Log that there is an incoming request with the number of pairs.
logrus.Infof("Received RegisterMissionControl request with %d pairs",
len(req.Pairs))
// Sanitize the request data by filtering out pairs with stale history.
stalePairsRemoved := s.sanitizeRegisterMissionControlRequest(req)
// Log how many stale history pairs are removed from the request if any.
if stalePairsRemoved != 0 {
logrus.Infof("Removed %d stale history pairs",
stalePairsRemoved)
}
// Initialize a map to aggregate mission control data.
aggregatedData := make(
map[[PubKeyCompressedSizeDouble]byte]*ecrpc.PairData,
)
// Use Batch over Update to reduce tx commits overhead and database
// locking, enhancing performance and responsiveness under high write
// loads.
err := s.db.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DatabaseBucketName))
// Retrieve all data from the database in order to aggregate
// them later with user registered data.
err := b.ForEach(func(k, v []byte) error {
// Unmarshal the pair history data.
history := &ecrpc.PairData{}
if err := json.Unmarshal(v, history); err != nil {
msg := "failed to unmarshal history data: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
aggregatedData[[PubKeyCompressedSizeDouble]byte(k)] = history
return nil
})
if err != nil {
msg := "error while retrieving all data in the " +
"bucket to aggregate them with user " +
"registered data: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
// Aggregate all data in the database with user registered data.
for _, pair := range req.Pairs {
// Aggregate the data based on the key.
key := [PubKeyCompressedSizeDouble]byte(
append(pair.NodeFrom, pair.NodeTo...),
)
if existingData, ok := aggregatedData[key]; ok {
// If data for the key exists, merge it with
// the current data.
mergePairData(existingData, pair.History)
} else {
// If no data exists for the key, set it.
aggregatedData[key] = pair.History
}
}
// Store the aggregated data.
for key, value := range aggregatedData {
// Marshal the pair history data.
data, err := json.Marshal(value)
if err != nil {
msg := "failed to unmarshal history data: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
// Store the aggregated data point in the database.
if err := b.Put([]byte(key[:]), data); err != nil {
msg := "failed to store data in the bucket: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
}
// Log how many pairs are processed and stored.
logrus.Infof("%d pairs were processed and stored successfully",
len(req.Pairs))
return nil
})
if err != nil {
msg := "batch operation failed: %v"
logrus.Errorf(msg, err)
return nil, status.Errorf(codes.Internal, msg, err)
}
// Construct the registration success message indicating the number of
// pairs registered.
successMessage := fmt.Sprintf("Successfully registered %d pairs",
len(req.Pairs))
// If there are stale pairs already removed update the registration
// success message to include the number of pairs removed.
if stalePairsRemoved > 0 {
successMessage = fmt.Sprintf("%s and removed %d stale pairs",
successMessage, stalePairsRemoved)
}
// Construct RegisterMissionControlResponse with the success message.
response := &ecrpc.RegisterMissionControlResponse{
SuccessMessage: successMessage,
}
return response, nil
}
// QueryAggregatedMissionControl queries aggregated mission control data.
func (s *externalCoordinatorServer) QueryAggregatedMissionControl(
req *ecrpc.QueryAggregatedMissionControlRequest,
stream ecrpc.ExternalCoordinator_QueryAggregatedMissionControlServer) error {
// Log the receipt of the query request.
logrus.Info("Received QueryAggregatedMissionControl request")
var pairs []*ecrpc.PairHistory
err := s.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DatabaseBucketName))
// Pre-allocate memory for the pairs slice based on the
// estimated number of key-value pairs in the bucket. This
// ensures sufficient capacity to hold all key-value pairs
// without resizing during iteration.
//
// NOTE: The number of estimated keys retrieved may be less or
// greater than the actual number of keys in the db.
pairs = make([]*ecrpc.PairHistory, 0, b.Stats().KeyN)
err := b.ForEach(func(k, v []byte) error {
history := &ecrpc.PairData{}
if err := json.Unmarshal(v, history); err != nil {
msg := "failed to unmarshal history data: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
nodeFrom := k[:PubKeyCompressedSize]
nodeTo := k[PubKeyCompressedSize:]
pair := &ecrpc.PairHistory{
NodeFrom: nodeFrom,
NodeTo: nodeTo,
History: history,
}
pairs = append(pairs, pair)
// If the batch size is reached, send the batch.
batch := s.config.Server.QueryMissionControlBatchSize
if len(pairs) == batch {
response := &ecrpc.QueryAggregatedMissionControlResponse{
Pairs: pairs,
}
if err := stream.Send(response); err != nil {
return status.Errorf(codes.Internal, "failed to send batch: %v", err)
}
// Log the number of pairs retrieved.
logrus.Infof("Retrieved %d pairs from the "+
"database", len(pairs))
// Clear the pairs slice for the next batch
// while maintaining the same original capacity.
pairs = pairs[:0]
}
return nil
})
if err != nil {
msg := "error while iterating through bucket: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
// Send any remaining pairs as the final batch.
if len(pairs) > 0 {
response := &ecrpc.QueryAggregatedMissionControlResponse{
Pairs: pairs,
}
if err := stream.Send(response); err != nil {
return status.Errorf(codes.Internal, "failed "+
"to send final batch: %v", err)
}
// Log the number of pairs retrieved.
logrus.Infof("Retrieved %d pairs from the database",
len(pairs))
}
return err
})
if err != nil {
msg := "query failed: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
return nil
}
// RunCleanupRoutine runs a routine to cleanup stale data from the database
// periodically depending on the configured cleanup interval.
func (s *externalCoordinatorServer) RunCleanupRoutine(ctx context.Context,
ticker *time.Ticker) {
staleDataCleanupIntervalFormatted := formatDuration(
s.config.Server.StaleDataCleanupInterval,
)
logrus.Infof("Cleanup routine started to remove stale mission "+
"mission control data from the database on an interval of: "+
"%s", staleDataCleanupIntervalFormatted)
// Run the cleanup routine immediately before starting the ticker.
s.cleanupStaleData()
// Start a goroutine to handle cleanup routine.
go func() {
for {
select {
case <-ctx.Done():
// Exit goroutine if the context is canceled.
return
case <-ticker.C:
// Run the cleanup routine when the ticker
// ticks.
s.cleanupStaleData()
}
}
}()
}
// cleanupStaleData cleans up stale mission control data from the database.
// It iterates through the database and removes stale data entries.
func (s *externalCoordinatorServer) cleanupStaleData() {
logrus.Infof("Running cleanup routine to remove stale mission " +
"control data from the database...")
// Initialize a counter to track the number of stale pairs removed.
stalePairsRemoved := 0
// Start a read-write transaction to the database.
err := s.db.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(DatabaseBucketName))
// Iterate through all key-value pairs in the bucket.
err := b.ForEach(func(k, v []byte) error {
history := &ecrpc.PairData{}
if err := json.Unmarshal(v, history); err != nil {
msg := "failed to unmarshal history data: %v"
logrus.Errorf(msg, err)
return status.Errorf(codes.Internal, msg, err)
}
isStale := isHistoryStale(
history,
s.config.Server.HistoryThresholdDuration,
)
if isStale {
// If the pair is stale, delete it from the
// bucket.
if err := b.Delete(k); err != nil {
logrus.Errorf("failed to delete "+
"stale mission control data "+
"from the bucket: %v", err)
return nil
}
logrus.Debugf("Stale data removed for key: %s",
hex.EncodeToString(k))
stalePairsRemoved += 1
}
return nil
})
if err != nil {
return status.Errorf(codes.Internal, "error while "+
"iterating through bucket: %v", err)
}
return nil
})
if err != nil {
logrus.Errorf("cleanup routine failed: %v", err)
return
}
logrus.Infof("Cleanup routine completed successfully and %d pairs "+
"were removed", stalePairsRemoved)
}
// validateRegisterMissionControlRequest checks the integrity and correctness
// of the RegisterMissionControlRequest.
func (s *externalCoordinatorServer) validateRegisterMissionControlRequest(req *ecrpc.RegisterMissionControlRequest) error {
if req == nil {
return status.Errorf(codes.InvalidArgument, "request cannot "+
"be nil")
}
if len(req.Pairs) == 0 {
return status.Errorf(codes.InvalidArgument, "request must "+
"include at least one pair")
}
// Flag to track if all pairs are older than the configured threshold.
allStale := true
for _, pair := range req.Pairs {
// Validate that NodeFrom is exactly 33 bytes i.e compressed sec
// pub key.
if len(pair.NodeFrom) != PubKeyCompressedSize {
return status.Errorf(codes.InvalidArgument, "NodeFrom "+
"must be exactly %d bytes",
PubKeyCompressedSize,
)
}
// Validate that NodeTo is exactly 33 bytes i.e compressed sec
// pub key.
if len(pair.NodeTo) != PubKeyCompressedSize {
return status.Errorf(codes.InvalidArgument, "NodeTo "+
"must be exactly %d bytes",
PubKeyCompressedSize,
)
}
// Validate the NodeFrom public key.
_, err := btcec.ParsePubKey(pair.NodeFrom)
if err != nil {
return status.Errorf(codes.InvalidArgument, "invalid "+
"NodeFrom public key: %v", err,
)
}
// Validate the NodeTo public key.
_, err = btcec.ParsePubKey(pair.NodeTo)
if err != nil {
return status.Errorf(codes.InvalidArgument, "invalid "+
"NodeTo public key: %v", err,
)
}
// Prettify the nodeFrom and nodeTo pairs.
pairPrefix := fmt.Sprintf("pair: %s -> %s",
hex.EncodeToString(pair.NodeFrom),
hex.EncodeToString(pair.NodeTo),
)
// Validate that NodeFrom and NodeTo pairs are not equal.
if bytes.Equal(pair.NodeFrom, pair.NodeTo) {
return status.Errorf(codes.InvalidArgument, "%s: "+
"source and destination node must differ", pairPrefix)
}
// Validate the history data.
if pair.History == nil {
return status.Errorf(codes.InvalidArgument, "%s: "+
"History cannot be nil", pairPrefix)
}
// Validate fail and success amounts are non-negative.
if pair.History.FailAmtSat < 0 ||
pair.History.SuccessAmtSat < 0 ||
pair.History.FailAmtMsat < 0 ||
pair.History.SuccessAmtMsat < 0 {
return status.Errorf(
codes.InvalidArgument, "%s: Fail and success "+
"amounts must be non-negative", pairPrefix,
)
}
// Check if failure timestamp and amount are consistent with
// each other.
failMsat, failTime, err := validatePair(
pair.History.FailAmtMsat, pair.History.FailAmtSat,
pair.History.FailTime, true,
)
if err != nil {
return status.Errorf(codes.InvalidArgument, "%s: "+
"invalid failure: %v", pairPrefix, err)
}
// Check if success timestamp and amount are consistent with
// each other.
successMsat, successTime, err := validatePair(
pair.History.SuccessAmtMsat, pair.History.SuccessAmtSat,
pair.History.SuccessTime, false,
)
if err != nil {
return status.Errorf(codes.InvalidArgument, "%s: "+
"invalid success: %v", pairPrefix, err)
}
// Throw an error if both successAmt and failAmt are not set.
if successMsat == 0 && failMsat == 0 {
return status.Errorf(codes.InvalidArgument, "%s: "+
"either success or failure result required",
pairPrefix)
}
// Update fail pair based on validated time and amt values.
pair.History.FailAmtMsat = failMsat
pair.History.FailAmtSat = failMsat / mSatScale
pair.History.FailTime = failTime
// Update success pair based on validated time and amt values.
pair.History.SuccessAmtMsat = successMsat
pair.History.SuccessAmtSat = successMsat / mSatScale
pair.History.SuccessTime = successTime
// Validate History data is not stale according to configured
// threshold duration.
isStale := isHistoryStale(
pair.History, s.config.Server.HistoryThresholdDuration,
)
if !isStale {
// At least one pair is within the threshold.
allStale = false
}
}
// If all history data pairs are older than the configured threshold,
// construct an error indicating that none of the pairs can be
// registered.
if allStale {
historyThresholdDurationFormatted := formatDuration(
s.config.Server.HistoryThresholdDuration,
)
return status.Errorf(codes.InvalidArgument, "All history data "+
"pairs exceed the configured threshold of %s "+
"and cannot be registered", historyThresholdDurationFormatted,
)
}
return nil
}
// sanitizeRegisterMissionControlRequest sanitizes the RegisterMissionControl
// request by filtering out pairs with stale history and returns the number
// of stale pairs removed.
func (s *externalCoordinatorServer) sanitizeRegisterMissionControlRequest(req *ecrpc.RegisterMissionControlRequest) int {
// Initialize a counter to track the number of stale pairs removed.
stalePairsRemoved := 0
// Iterate through the pairs in reverse order to safely remove elements.
for i := len(req.Pairs) - 1; i >= 0; i-- {
pair := req.Pairs[i]
isStale := isHistoryStale(
pair.History, s.config.Server.HistoryThresholdDuration,
)
if isStale {
// If the pair is stale, remove it from the slice.
req.Pairs = append(req.Pairs[:i], req.Pairs[i+1:]...)
// Increment the counter for stale pairs removed.
stalePairsRemoved++
}
}
// Return the number of stale pairs removed.
return stalePairsRemoved
}
// validatePair validates the values provided for a mission control result and
// returns the msat amount and timestamp for it. `isFailure` can be used to
// default values to 0 instead of returning an error.
func validatePair(amtMsat int64, amtSat int64, timestamp int64,
isFailure bool) (int64, int64, error) {
amtMsat, err := validateMsatPairValue(amtMsat, amtSat)
if err != nil {
return 0, 0, err
}
var (
timeSet = timestamp != 0
amountSet = amtMsat != 0
)
switch {
// If a timestamp and amount if provided, return those values.
case timeSet && amountSet:
return amtMsat, timestamp, nil
// Return an error if it does have a timestamp without an amount, and
// it's not expected to be a failure.
case !isFailure && timeSet && !amountSet:
return 0, 0, errors.New("non-zero timestamp requires " +
"non-zero amount for success pairs")
// Return an error if it does have an amount without a timestamp, and
// it's not expected to be a failure.
case !isFailure && !timeSet && amountSet:
return 0, 0, errors.New("non-zero amount for success pairs " +
"requires non-zero timestamp")
default:
return 0, 0, nil
}
}
// validateMsatPairValue validates the msat and sat values set for a pair and
// ensures that the values provided are either the same, or only a single value
// is set.
func validateMsatPairValue(msatValue int64, satValue int64) (int64, error) {
// If our msat value converted to sats equals our sat value, we just
// return the msat value, since the values are the same.
if msatValue/mSatScale == satValue {
return msatValue, nil
}
// If we have no msatValue, we can just return our state value even if
// it is zero, because it's impossible that we have mismatched values.
if msatValue == 0 {
return satValue * mSatScale, nil
}
// Likewise, we can just use msat value if we have no sat value set.
if satValue == 0 {
return msatValue, nil
}
// If our values are non-zero but not equal, we have invalid amounts
// set, so we fail.
return 0, status.Errorf(codes.InvalidArgument, "msat: %v and sat: %v "+
"values not equal", msatValue, satValue)
}
// isHistoryStale checks if the history data pair is stale according to the
// configured threshold.
func isHistoryStale(history *ecrpc.PairData, threshold time.Duration) bool {
// Obtain the most recent UNIX timestamp reflecting temporal
// locality from the fail_time and success_time fields of the
// pair's history data. This timestamp will be used to
// determine whether the pair's history is stale or not.
recentTimestamp := mostRecentUnixTimestamp(
history.FailTime, history.SuccessTime,
)
// Check if the current history data pair is stale according
// to the configured threshold duration.
return time.Unix(recentTimestamp, 0).Before(time.Now().Add(-threshold))
}
// mergePairData merges the pair data from two pairs based on the most recent
// timestamp. It does the following:
// - It updates the success time and amounts if there are more recent history
// pairs, ensuring that the maximum success amount of the history pair is
// retained to prevent the success range from shrinking when unnecessary,
// - It prevents the failure from updating too soon based on the configured
// MinFailureRelaxInterval value.
// - It also adjusts the failure range if the success amount goes into the
// failure range and adjusts the success range if the failure amount goes
// into the success range.
//
// Parameters:
// - existingData: The existing pair data to merge with.
// - newData: The new pair data to merge with.
func mergePairData(existingData, newData *ecrpc.PairData) {
if newData.SuccessTime > existingData.SuccessTime {
// Update success time and amounts if newer, retaining max
// success amount to avoid shrinking success range
// unnecessarily.
existingData.SuccessTime = newData.SuccessTime
if newData.SuccessAmtMsat > existingData.SuccessAmtMsat {
existingData.SuccessAmtMsat = newData.SuccessAmtMsat
}
}
if newData.FailTime > existingData.FailTime {
// Drop result if it would increase the failure amount too soon
// after a previous failure. This can happen if htlc results
// come in out of order. This check makes it easier for payment
// processes to converge to a final state
newFailureTimestamp := time.Unix(newData.FailTime, 0)
currentFailureTimestamp := time.Unix(existingData.FailTime, 0)
failInterval := newFailureTimestamp.Sub(currentFailureTimestamp)
if newData.FailAmtMsat > existingData.FailAmtMsat &&
failInterval < MinFailureRelaxInterval {
logrus.Debugf("Ignoring higher amount failure within "+
"min failure relaxation interval: "+
"prev_fail_amt=%v, fail_amt=%v, interval=%v",
existingData.FailAmtMsat, newData.FailAmtMsat,
failInterval)
return
}
existingData.FailTime = newData.FailTime
existingData.FailAmtMsat = newData.FailAmtMsat
switch {
// The failure amount is set to zero when the failure is
// amount-independent, meaning that the attempt would have
// failed regardless of the amount. This should also reset the
// success amount to zero.
case newData.FailAmtMsat == 0:
existingData.SuccessAmtMsat = 0
// If the failure range goes into the success range, move the
// success range down.
case newData.FailAmtMsat <= existingData.SuccessAmtMsat:
existingData.SuccessAmtMsat = newData.FailAmtMsat - 1
}
}
// Move the failure range up if the success amount goes into the
// failure range. We don't want to clear the failure completely
// because we haven't learnt much for amounts above the current
// success amount.
if existingData.FailTime != 0 &&
newData.SuccessAmtMsat >= existingData.FailAmtMsat {
existingData.FailAmtMsat = newData.SuccessAmtMsat + 1
}
// Update Success and Failure Satoshi amounts based on the
// millisatoshi unit type, ignoring the fractions of satoshi.
existingData.SuccessAmtSat = existingData.SuccessAmtMsat / mSatScale
existingData.FailAmtSat = existingData.FailAmtMsat / mSatScale
}