forked from Shopify/ghostferry
-
Notifications
You must be signed in to change notification settings - Fork 0
/
compression_verifier.go
248 lines (208 loc) · 7.53 KB
/
compression_verifier.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
package ghostferry
import (
"crypto/md5"
sqlorig "database/sql"
"encoding/hex"
"errors"
"fmt"
"strconv"
"strings"
sql "github.com/Shopify/ghostferry/sqlwrapper"
sq "github.com/Masterminds/squirrel"
"github.com/go-mysql-org/go-mysql/schema"
"github.com/golang/snappy"
"github.com/sirupsen/logrus"
)
const (
// CompressionSnappy is used to identify Snappy (https://google.github.io/snappy/) compressed column data
CompressionSnappy = "SNAPPY"
)
type (
// TableColumnCompressionConfig represents compression configuration for a
// column in a table as table -> column -> compression-type
// ex: books -> contents -> snappy
TableColumnCompressionConfig map[string]map[string]string
)
// UnsupportedCompressionError is used to identify errors resulting
// from attempting to decompress unsupported algorithms
type UnsupportedCompressionError struct {
table string
column string
algorithm string
}
func (e UnsupportedCompressionError) Error() string {
return "Compression algorithm: " + e.algorithm +
" not supported on table: " + e.table +
" for column: " + e.column
}
// CompressionVerifier provides support for verifying the payload of compressed columns that
// may have different hashes for the same data by first decompressing the compressed
// data before fingerprinting
type CompressionVerifier struct {
logger *logrus.Entry
supportedAlgorithms map[string]struct{}
tableColumnCompressions TableColumnCompressionConfig
}
// GetCompressedHashes compares the source data with the target data to ensure the integrity of the
// data being copied.
//
// The GetCompressedHashes method checks if the existing table contains compressed data
// and will apply the decompression algorithm to the applicable columns if necessary.
// After the columns are decompressed, the hashes of the data are used to verify equality
func (c *CompressionVerifier) GetCompressedHashes(db *sql.DB, schema, table, paginationKeyColumn string, columns []schema.TableColumn, paginationKeys []uint64) (map[uint64][]byte, error) {
c.logger.WithFields(logrus.Fields{
"tag": "compression_verifier",
"table": table,
}).Info("decompressing table data before verification")
tableCompression := c.tableColumnCompressions[table]
// Extract the raw rows using SQL to be decompressed
rows, err := getRows(db, schema, table, paginationKeyColumn, columns, paginationKeys)
if err != nil {
return nil, err
}
defer rows.Close()
// Decompress applicable columns and hash the resulting column values for comparison
resultSet := make(map[uint64][]byte)
for rows.Next() {
rowData, err := ScanByteRow(rows, len(columns)+1)
if err != nil {
return nil, err
}
paginationKey, err := strconv.ParseUint(string(rowData[0]), 10, 64)
if err != nil {
return nil, err
}
// Decompress the applicable columns and then hash them together
// to create a fingerprint. decompressedRowData contains a map of all
// the non-compressed columns and associated decompressed values by the
// index of the column
decompressedRowData := [][]byte{}
for idx, column := range columns {
if algorithm, ok := tableCompression[column.Name]; ok {
// rowData contains the result of "SELECT paginationKeyColumn, * FROM ...", so idx+1 to get each column
decompressedColData, err := c.Decompress(table, column.Name, algorithm, rowData[idx+1])
if err != nil {
return nil, err
}
decompressedRowData = append(decompressedRowData, decompressedColData)
} else {
decompressedRowData = append(decompressedRowData, rowData[idx+1])
}
}
// Hash the data of the row to be added to the result set
decompressedRowHash, err := c.HashRow(decompressedRowData)
if err != nil {
return nil, err
}
resultSet[paginationKey] = decompressedRowHash
}
metrics.Gauge(
"compression_verifier_decompress_rows",
float64(len(resultSet)),
[]MetricTag{{"table", table}},
1.0,
)
logrus.WithFields(logrus.Fields{
"tag": "compression_verifier",
"rows": len(resultSet),
"table": table,
}).Debug("decompressed rows will be compared")
return resultSet, nil
}
// Decompress will apply the configured decompression algorithm to the configured columns data
func (c *CompressionVerifier) Decompress(table, column, algorithm string, compressed []byte) ([]byte, error) {
var decompressed []byte
switch strings.ToUpper(algorithm) {
case CompressionSnappy:
return snappy.Decode(decompressed, compressed)
default:
return nil, UnsupportedCompressionError{
table: table,
column: column,
algorithm: algorithm,
}
}
}
// HashRow will fingerprint the non-primary columns of the row to verify data equality
func (c *CompressionVerifier) HashRow(decompressedRowData [][]byte) ([]byte, error) {
if len(decompressedRowData) == 0 {
return nil, errors.New("Row data to fingerprint must not be empty")
}
hash := md5.New()
var rowFingerprint []byte
for _, colData := range decompressedRowData {
rowFingerprint = append(rowFingerprint, colData...)
}
_, err := hash.Write(rowFingerprint)
if err != nil {
return nil, err
}
return []byte(hex.EncodeToString(hash.Sum(nil))), nil
}
// IsCompressedTable will identify whether or not a table is compressed
func (c *CompressionVerifier) IsCompressedTable(table string) bool {
if _, ok := c.tableColumnCompressions[table]; ok {
return true
}
return false
}
func (c *CompressionVerifier) verifyConfiguredCompression(tableColumnCompressions TableColumnCompressionConfig) error {
for table, columns := range tableColumnCompressions {
for column, algorithm := range columns {
if _, ok := c.supportedAlgorithms[strings.ToUpper(algorithm)]; !ok {
return &UnsupportedCompressionError{
table: table,
column: column,
algorithm: algorithm,
}
}
}
}
return nil
}
// NewCompressionVerifier first checks the map for supported compression algorithms before
// initializing and returning the initialized instance.
func NewCompressionVerifier(tableColumnCompressions TableColumnCompressionConfig) (*CompressionVerifier, error) {
supportedAlgorithms := make(map[string]struct{})
supportedAlgorithms[CompressionSnappy] = struct{}{}
compressionVerifier := &CompressionVerifier{
logger: logrus.WithField("tag", "compression_verifier"),
supportedAlgorithms: supportedAlgorithms,
tableColumnCompressions: tableColumnCompressions,
}
if err := compressionVerifier.verifyConfiguredCompression(tableColumnCompressions); err != nil {
return nil, err
}
return compressionVerifier, nil
}
func getRows(db *sql.DB, schema, table, paginationKeyColumn string, columns []schema.TableColumn, paginationKeys []uint64) (*sqlorig.Rows, error) {
quotedPaginationKey := QuoteField(paginationKeyColumn)
sql, args, err := rowSelector(columns, paginationKeyColumn).
From(QuotedTableNameFromString(schema, table)).
Where(sq.Eq{quotedPaginationKey: paginationKeys}).
OrderBy(quotedPaginationKey).
ToSql()
if err != nil {
return nil, err
}
// This query must be a prepared query. If it is not, querying will use
// MySQL's plain text interface, which will scan all values into []uint8
// if we give it []interface{}.
stmt, err := db.Prepare(sql)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return nil, err
}
return rows, nil
}
func rowSelector(columns []schema.TableColumn, paginationKeyColumn string) sq.SelectBuilder {
columnStrs := make([]string, len(columns))
for idx, column := range columns {
columnStrs[idx] = column.Name
}
return sq.Select(fmt.Sprintf("%s, %s", QuoteField(paginationKeyColumn), strings.Join(columnStrs, ",")))
}