-
Notifications
You must be signed in to change notification settings - Fork 12
/
multi-hbase-site.xml
376 lines (376 loc) · 48.4 KB
/
multi-hbase-site.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
<property><name>hbase.regionserver.catalog.timeout</name><value>600000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.filter.classes</name><value>org.apache.hadoop.hbase.rest.filter.GzipFilter</value><source>programatically</source></property>
<property><name>io.bytes.per.checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hbase.status.publisher.class</name><value>org.apache.hadoop.hbase.master.ClusterStatusPublisher$MulticastPublisher</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.ipc.client.tcpnodelay</name><value>true</value><source>programatically</source></property>
<property><name>hbase.regions.slop</name><value>0.2</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rs.cacheblocksonwrite</name><value>false</value><source>programatically</source></property>
<property><name>hbase.zookeeper.leaderport</name><value>3888</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.info.port</name><value>60030</value><source>hbase-default.xml</source></property>
<property><name>fs.AbstractFileSystem.file.impl</name><value>org.apache.hadoop.fs.local.LocalFs</value><source>core-default.xml</source></property>
<property><name>fs.du.interval</name><value>600000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.dns.nameserver</name><value>default</value><source>programatically</source></property>
<property><name>hbase.rs.cacheblocksonwrite</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.checksum.algorithm</name><value>CRC32</value><source>programatically</source></property>
<property><name>hadoop.ssl.keystores.factory.class</name><value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value><source>core-default.xml</source></property>
<property><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value><source>core-default.xml</source></property>
<property><name>hbase.rpc.server.engine</name><value>org.apache.hadoop.hbase.ipc.ProtobufRpcServerEngine</value><source>hbase-default.xml</source></property>
<property><name>io.seqfile.sorter.recordlimit</name><value>1000000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.keyvalue.maxsize</name><value>10485760</value><source>programatically</source></property>
<property><name>ipc.client.connect.retry.interval</name><value>1000</value><source>core-default.xml</source></property>
<property><name>s3.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.data.umask.enable</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.master.dns.nameserver</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.metrics.exposeOperationTimes</name><value>true</value><source>programatically</source></property>
<property><name>hbase.lease.recovery.dfs.timeout</name><value>64000</value><source>hbase-default.xml</source></property>
<property><name>hbase.status.listener.class</name><value>org.apache.hadoop.hbase.client.ClusterStatusListener$MulticastListener</value><source>hbase-default.xml</source></property>
<property><name>io.map.index.interval</name><value>128</value><source>core-default.xml</source></property>
<property><name>hfile.format.version</name><value>2</value><source>hbase-default.xml</source></property>
<property><name>s3.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>hbase.master.catalog.timeout</name><value>600000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.compactionThreshold</name><value>3</value><source>programatically</source></property>
<property><name>hbase.hregion.majorcompaction.jitter</name><value>0.50</value><source>hbase-default.xml</source></property>
<property><name>hbase.auth.token.max.lifetime</name><value>604800000</value><source>hbase-default.xml</source></property>
<property><name>ha.zookeeper.session-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
<property><name>hbase.master.loadbalancer.class</name><value>org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer</value><source>hbase-default.xml</source></property>
<property><name>s3.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>hfile.block.bloom.cacheonwrite</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.peerport</name><value>2888</value><source>programatically</source></property>
<property><name>hadoop.ssl.enabled</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.lease.recovery.timeout</name><value>900000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rpc.server.engine</name><value>org.apache.hadoop.hbase.ipc.ProtobufRpcServerEngine</value><source>programatically</source></property>
<property><name>hadoop.security.groups.cache.warn.after.ms</name><value>5000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.memstore.mslab.enabled</name><value>true</value><source>programatically</source></property>
<property><name>ipc.client.connect.max.retries.on.timeouts</name><value>45</value><source>core-default.xml</source></property>
<property><name>hbase.hregion.memstore.flush.size</name><value>134217728</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.preclose.flush.size</name><value>5242880</value><source>programatically</source></property>
<property><name>hbase.regionserver.logroll.period</name><value>3600000</value><source>hbase-default.xml</source></property>
<property><name>fs.trash.interval</name><value>0</value><source>core-default.xml</source></property>
<property><name>ha.health-monitor.check-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
<property><name>hadoop.jetty.logs.serve.aliases</name><value>true</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.hlog.writer.impl</name><value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/_HOST@LOCALHOST</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.auth.token.max.lifetime</name><value>604800000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.local.dir</name><value>${hbase.tmp.dir}/local/</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.defaults.for.version</name><value>0.98.1-cdh5.1.0</value><source>programatically</source></property>
<property><name>hbase.dynamic.jars.dir</name><value>${hbase.rootdir}/lib</value><source>hbase-default.xml</source></property>
<property><name>s3native.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.dns.interface</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>ha.health-monitor.sleep-after-disconnect.ms</name><value>1000</value><source>core-default.xml</source></property>
<property><name>hbase.hstore.blockingStoreFiles</name><value>10</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.balancer.period</name><value>300000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.region.split.policy</name><value>org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.hfilecleaner.plugins</name><value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value><source>programatically</source></property>
<property><name>hbase.hstore.bytes.per.checksum</name><value>16384</value><source>hbase-default.xml</source></property>
<property><name>hbase.client.write.buffer</name><value>2097152</value><source>hbase-default.xml</source></property>
<property><name>hadoop.security.instrumentation.requires.admin</name><value>false</value><source>core-default.xml</source></property>
<property><name>hadoop.security.authorization</name><value>false</value><source>core-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.filter.group</name><value>(objectClass=group)</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.dns.nameserver</name><value>default</value><source>programatically</source></property>
<property><name>hbase.zookeeper.property.clientPort</name><value>2181</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.info.port.auto</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.logcleaner.ttl</name><value>600000</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.attr.group.name</name><value>cn</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.property.syncLimit</name><value>5</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.thrift.htablepool.size.max</name><value>1000</value><source>programatically</source></property>
<property><name>hbase.master.logcleaner.ttl</name><value>600000</value><source>hbase-default.xml</source></property>
<property><name>fs.client.resolve.remote.symlinks</name><value>true</value><source>core-default.xml</source></property>
<property><name>s3native.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>tfile.fs.output.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.server.thread.wakefrequency</name><value>10000</value><source>programatically</source></property>
<property><name>fs.AbstractFileSystem.hdfs.impl</name><value>org.apache.hadoop.fs.Hdfs</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.majorcompaction</name><value>604800000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.compaction.max</name><value>12</value><source>programatically</source></property>
<property><name>hadoop.security.uid.cache.secs</name><value>14400</value><source>core-default.xml</source></property>
<property><name>hbase.rootdir</name><value>${hbase.tmp.dir}/hbase</value><source>hbase-default.xml</source></property>
<property><name>hadoop.ssl.client.conf</name><value>ssl-client.xml</value><source>core-default.xml</source></property>
<property><name>hbase.zookeeper.dns.interface</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>zookeeper.session.timeout</name><value>90000</value><source>hbase-default.xml</source></property>
<property><name>hbase.hstore.compaction.max</name><value>11</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.snapshot.restore.take.failsafe.snapshot</name><value>true</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.status.publisher.class</name><value>org.apache.hadoop.hbase.master.ClusterStatusPublisher$MulticastPublisher</value><source>programatically</source></property>
<property><name>fs.s3n.multipart.uploads.enabled</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.master.info.bindAddress</name><value>0.0.0.0</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.clusters</name><value>fail1</value><source>programatically</source></property>
<property><name>io.native.lib.available</name><value>true</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rpc.shortoperation.timeout</name><value>10000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.info.port</name><value>60030</value><source>programatically</source></property>
<property><name>hbase.snapshot.restore.failsafe.name</name><value>hbase-failsafe-{snapshot.name}-{restore.timestamp}</value><source>hbase-default.xml</source></property>
<property><name>hbase.hstore.compactionThreshold</name><value>3</value><source>hbase-default.xml</source></property>
<property><name>io.storefile.bloom.block.size</name><value>131072</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.dns.interface</name><value>default</value><source>programatically</source></property>
<property><name>hadoop.user.group.static.mapping.overrides</name><value>dr.who=;</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.write.buffer</name><value>2097152</value><source>programatically</source></property>
<property><name>hbase.offheapcache.percentage</name><value>0</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regions.slop</name><value>0.2</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.auth.key.update.interval</name><value>86400000</value><source>programatically</source></property>
<property><name>hbase.thrift.maxQueuedRequests</name><value>1000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.memstore.flush.size</name><value>134217728</value><source>programatically</source></property>
<property><name>hbase.thrift.minWorkerThreads</name><value>16</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.security.exec.permission.checks</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.catalog.timeout</name><value>600000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.max.perserver.tasks</name><value>5</value><source>programatically</source></property>
<property><name>ipc.client.connection.maxidletime</name><value>10000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.logroll.period</name><value>3600000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.info.bindAddress</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.property.initLimit</name><value>10</value><source>programatically</source></property>
<property><name>hbase.data.umask</name><value>000</value><source>hbase-default.xml</source></property>
<property><name>fs.s3.sleepTimeSeconds</name><value>10</value><source>core-default.xml</source></property>
<property><name>hadoop.ssl.server.conf</name><value>ssl-server.xml</value><source>core-default.xml</source></property>
<property><name>hbase.balancer.period</name><value>300000</value><source>hbase-default.xml</source></property>
<property><name>fs.s3n.multipart.uploads.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.lease.recovery.timeout</name><value>900000</value><source>programatically</source></property>
<property><name>ha.zookeeper.parent-znode</name><value>/hadoop-ha</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rpc.timeout</name><value>60000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.property.maxClientCnxns</name><value>300</value><source>programatically</source></property>
<property><name>io.seqfile.lazydecompress</name><value>true</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.offheapcache.percentage</name><value>0</value><source>programatically</source></property>
<property><name>hbase.zookeeper.property.maxClientCnxns</name><value>300</value><source>hbase-default.xml</source></property>
<property><name>fail.fast.expired.active.master</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.retries.number</name><value>35</value><source>programatically</source></property>
<property><name>ipc.client.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.client.scanner.caching</name><value>100</value><source>hbase-default.xml</source></property>
<property><name>hbase.ipc.client.tcpnodelay</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.thrift.minWorkerThreads</name><value>16</value><source>programatically</source></property>
<property><name>s3.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hbase.client.retries.number</name><value>35</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.handler.count</name><value>30</value><source>programatically</source></property>
<property><name>hbase.client.scanner.timeout.period</name><value>60000</value><source>hbase-default.xml</source></property>
<property><name>io.file.buffer.size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hbase.zookeeper.useMulti</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.zookeeper.dns.nameserver</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>hfile.index.block.max.size</name><value>131072</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.hlog.reader.impl</name><value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.info.port.auto</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.info.port</name><value>60010</value><source>programatically</source></property>
<property><name>hbase.zookeeper.property.dataDir</name><value>${hbase.tmp.dir}/zookeeper</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.logcleaner.plugins</name><value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value><source>programatically</source></property>
<property><name>hbase.hregion.memstore.mslab.enabled</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.dns.nameserver</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>hfile.block.index.cacheonwrite</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.info.bindAddress</name><value>0.0.0.0</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.thrift.maxQueuedRequests</name><value>1000</value><source>programatically</source></property>
<property><name>nfs.exports.allowed.hosts</name><value>* rw</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.data.umask</name><value>000</value><source>programatically</source></property>
<property><name>hfile.block.cache.size</name><value>0.4</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.dns.interface</name><value>default</value><source>programatically</source></property>
<property><name>hadoop.security.authentication</name><value>simple</value><source>core-default.xml</source></property>
<property><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.dynamic.jars.dir</name><value>${hbase.rootdir}/lib</value><source>programatically</source></property>
<property><name>hbase.hstore.checksum.algorithm</name><value>CRC32</value><source>hbase-default.xml</source></property>
<property><name>hbase.master.info.port</name><value>60010</value><source>hbase-default.xml</source></property>
<property><name>hbase.client.max.perregion.tasks</name><value>1</value><source>hbase-default.xml</source></property>
<property><name>rpc.metrics.quantile.enable</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.rest.threads.min</name><value>2</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.coprocessor.abortonerror</name><value>true</value><source>programatically</source></property>
<property><name>tfile.fs.input.buffer.size</name><value>262144</value><source>core-default.xml</source></property>
<property><name>ftp.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.status.multicast.address.port</name><value>60100</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.storescanner.parallel.seek.threads</name><value>10</value><source>programatically</source></property>
<property><name>hbase.local.dir</name><value>${hbase.tmp.dir}/local/</value><source>hbase-default.xml</source></property>
<property><name>hadoop.policy.file</name><value>hbase-policy.xml</value><source>hbase-default.xml</source></property>
<property><name>ha.failover-controller.cli-check.rpc-timeout.ms</name><value>20000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.optionalcacheflushinterval</name><value>3600000</value><source>programatically</source></property>
<property><name>hbase.regionserver.region.split.policy</name><value>org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.table.lock.enable</name><value>true</value><source>programatically</source></property>
<property><name>ipc.client.kill.max</name><value>10</value><source>core-default.xml</source></property>
<property><name>zookeeper.znode.acl.parent</name><value>acl</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.global.memstore.lowerLimit</name><value>0.38</value><source>programatically</source></property>
<property><name>hbase.auth.key.update.interval</name><value>86400000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.bytes.per.checksum</name><value>16384</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.majorcompaction.jitter</name><value>0.50</value><source>programatically</source></property>
<property><name>hbase.client.max.perserver.tasks</name><value>5</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.port</name><value>8080</value><source>programatically</source></property>
<property><name>hbase.server.compactchecker.interval.multiplier</name><value>1000</value><source>hbase-default.xml</source></property>
<property><name>hadoop.http.filter.initializers</name><value>org.apache.hadoop.http.lib.StaticUserWebFilter</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.dfs.client.read.shortcircuit.buffer.size</name><value>131072</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.msginterval</name><value>3000</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.type</name><value>simple</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.regionSplitLimit</name><value>2147483647</value><source>hbase-default.xml</source></property>
<property><name>hbase.rest.port</name><value>8080</value><source>hbase-default.xml</source></property>
<property><name>hbase.security.exec.permission.checks</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>ipc.server.listen.queue.size</name><value>128</value><source>core-default.xml</source></property>
<property><name>file.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.leaderport</name><value>3888</value><source>programatically</source></property>
<property><name>hbase.storescanner.parallel.seek.threads</name><value>10</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.catalog.timeout</name><value>600000</value><source>programatically</source></property>
<property><name>io.mapfile.bloom.size</name><value>1048576</value><source>core-default.xml</source></property>
<property><name>fs.swift.impl</name><value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value><source>core-default.xml</source></property>
<property><name>ftp.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.snapshot.enabled</name><value>true</value><source>programatically</source></property>
<property><name>hbase.client.pause</name><value>100</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.logroll.errors.tolerated</name><value>2</value><source>programatically</source></property>
<property><name>hbase.metrics.exposeOperationTimes</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hadoop.util.hash.type</name><value>murmur</value><source>core-default.xml</source></property>
<property><name>hbase.snapshot.enabled</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.rpc.timeout</name><value>60000</value><source>hbase-default.xml</source></property>
<property><name>ha.zookeeper.acl</name><value>world:anyone:rwcda</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.lease.recovery.dfs.timeout</name><value>64000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.scanner.caching</name><value>100</value><source>programatically</source></property>
<property><name>io.map.index.skip</name><value>0</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.dns.nameserver</name><value>default</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.defaults.for.version.skip</name><value>false</value><source>programatically</source></property>
<property><name>hbase.client.max.total.tasks</name><value>100</value><source>hbase-default.xml</source></property>
<property><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value><source>core-default.xml</source></property>
<property><name>fs.s3.maxRetries</name><value>4</value><source>core-default.xml</source></property>
<property><name>ha.failover-controller.new-active.rpc-timeout.ms</name><value>60000</value><source>core-default.xml</source></property>
<property><name>s3native.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>hadoop.http.staticuser.user</name><value>dr.who</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.port</name><value>60020</value><source>programatically</source></property>
<property><name>hbase.ipc.client.fallback-to-simple-auth-allowed</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.checksum.verify</name><value>true</value><source>programatically</source></property>
<property><name>hadoop.http.authentication.simple.anonymous.allowed</name><value>true</value><source>core-default.xml</source></property>
<property><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value><source>core-default.xml</source></property>
<property><name>hbase.hstore.blockingWaitTime</name><value>90000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.property.clientPort</name><value>2181</value><source>programatically</source></property>
<property><name>fs.automatic.close</name><value>true</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.compaction.kv.max</name><value>10</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.status.published</name><value>false</value><source>programatically</source></property>
<property><name>hbase.coprocessor.abortonerror</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.support.proxyuser</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.dns.interface</name><value>default</value><source>programatically</source></property>
<property><name>hbase.zookeeper.quorum</name><value>10.20.194.242</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.server.versionfile.writeattempts</name><value>3</value><source>programatically</source></property>
<property><name>hbase.hregion.max.filesize</name><value>10737418240</value><source>hbase-default.xml</source></property>
<property><name>hbase.defaults.for.version</name><value>0.98.1-cdh5.1.0</value><source>hbase-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.directory.search.timeout</name><value>10000</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.logroll.errors.tolerated</name><value>2</value><source>hbase-default.xml</source></property>
<property><name>hbase.status.published</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>zookeeper.znode.parent</name><value>/hbase</value><source>hbase-default.xml</source></property>
<property><name>ftp.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>ha.health-monitor.rpc-timeout.ms</name><value>45000</value><source>core-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.attr.member</name><value>member</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.blockingWaitTime</name><value>90000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.metrics.showTableName</name><value>true</value><source>programatically</source></property>
<property><name>io.compression.codec.bzip2.library</name><value>system-native</value><source>core-default.xml</source></property>
<property><name>hadoop.http.authentication.token.validity</name><value>36000</value><source>core-default.xml</source></property>
<property><name>hbase.snapshot.restore.take.failsafe.snapshot</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.metrics.showTableName</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>s3native.replication</name><value>3</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.hlog.writer.impl</name><value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter</value><source>hbase-default.xml</source></property>
<property><name>hbase.zookeeper.property.syncLimit</name><value>5</value><source>hbase-default.xml</source></property>
<property><name>dfs.ha.fencing.ssh.connect-timeout</name><value>30000</value><source>core-default.xml</source></property>
<property><name>hbase.thrift.htablepool.size.max</name><value>1000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.threads.min</name><value>2</value><source>programatically</source></property>
<property><name>net.topology.impl</name><value>org.apache.hadoop.net.NetworkTopology</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.config.read.zookeeper.config</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rootdir</name><value>${hbase.tmp.dir}/hbase</value><source>programatically</source></property>
<property><name>hbase.regionserver.msginterval</name><value>3000</value><source>hbase-default.xml</source></property>
<property><name>zookeeper.znode.rootserver</name><value>root-region-server</value><source>hbase-default.xml</source></property>
<property><name>hbase.rest.support.proxyuser</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name><value>5000</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.loadbalancer.class</name><value>org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer</value><source>programatically</source></property>
<property><name>hbase.table.lock.enable</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>hbase.rest.threads.max</name><value>100</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.localityCheck.threadPoolSize</name><value>2</value><source>programatically</source></property>
<property><name>hbase.master.logcleaner.plugins</name><value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value><source>hbase-default.xml</source></property>
<property><name>ipc.client.idlethreshold</name><value>4000</value><source>core-default.xml</source></property>
<property><name>ipc.server.tcpnodelay</name><value>false</value><source>core-default.xml</source></property>
<property><name>ftp.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.master.port</name><value>60000</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.status.listener.class</name><value>org.apache.hadoop.hbase.client.ClusterStatusListener$MulticastListener</value><source>programatically</source></property>
<property><name>s3.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.storescanner.parallel.seek.enable</name><value>false</value><source>programatically</source></property>
<property><name>hbase.rpc.shortoperation.timeout</name><value>10000</value><source>hbase-default.xml</source></property>
<property><name>hbase.storescanner.parallel.seek.enable</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.online.schema.update.enable</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>fs.s3.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.status.multicast.address.ip</name><value>226.1.1.3</value><source>hbase-default.xml</source></property>
<property><name>hadoop.rpc.protection</name><value>authentication</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.global.memstore.upperLimit</name><value>0.4</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.useMulti</name><value>false</value><source>programatically</source></property>
<property><name>ftp.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hstore.blockingStoreFiles</name><value>10</value><source>programatically</source></property>
<property><name>hbase.regionserver.checksum.verify</name><value>true</value><source>hbase-default.xml</source></property>
<property><name>fs.defaultFS</name><value>file:///</value><source>core-default.xml</source></property>
<property><name>file.client-write-packet-size</name><value>65536</value><source>core-default.xml</source></property>
<property><name>hbase.tmp.dir</name><value>${java.io.tmpdir}/hbase-${user.name}</value><source>hbase-default.xml</source></property>
<property><name>hbase.zookeeper.property.initLimit</name><value>10</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.pause</name><value>100</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.data.umask.enable</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.scanner.timeout.period</name><value>60000</value><source>programatically</source></property>
<property><name>fs.trash.checkpoint.interval</name><value>0</value><source>core-default.xml</source></property>
<property><name>hbase.cluster.distributed</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hadoop.http.authentication.signature.secret.file</name><value>${user.home}/hadoop-http-auth-signature-secret</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.max.total.tasks</name><value>100</value><source>programatically</source></property>
<property><name>s3native.stream-buffer-size</name><value>4096</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.hlog.reader.impl</name><value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader</value><source>programatically</source></property>
<property><name>hbase.thrift.maxWorkerThreads</name><value>1000</value><source>hbase-default.xml</source></property>
<property><name>fs.permissions.umask-mode</name><value>022</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.max.filesize</name><value>10737418240</value><source>programatically</source></property>
<property><name>hbase.hstore.compaction.kv.max</name><value>10</value><source>hbase-default.xml</source></property>
<property><name>hadoop.common.configuration.version</name><value>0.23.0</value><source>core-default.xml</source></property>
<property><name>hbase.bulkload.retries.number</name><value>0</value><source>hbase-default.xml</source></property>
<property><name>hadoop.security.group.mapping.ldap.ssl</name><value>false</value><source>core-default.xml</source></property>
<property><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.server.compactchecker.interval.multiplier</name><value>1000</value><source>programatically</source></property>
<property><name>fs.df.interval</name><value>60000</value><source>core-default.xml</source></property>
<property><name>hbase.rest.readonly</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>io.seqfile.compress.blocksize</name><value>1000000</value><source>core-default.xml</source></property>
<property><name>ipc.client.connect.max.retries</name><value>10</value><source>core-default.xml</source></property>
<property><name>hadoop.security.groups.cache.secs</name><value>300</value><source>core-default.xml</source></property>
<property><name>hbase.client.localityCheck.threadPoolSize</name><value>2</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.port</name><value>60020</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.handler.count</name><value>30</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.thrift.maxWorkerThreads</name><value>1000</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping.ldap.search.filter.user</name><value>(&(objectClass=user)(sAMAccountName={0}))</value><source>core-default.xml</source></property>
<property><name>hbase.hregion.majorcompaction</name><value>604800000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.ipc.client.fallback-to-simple-auth-allowed</name><value>false</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.readonly</name><value>false</value><source>programatically</source></property>
<property><name>fs.s3n.multipart.copy.block.size</name><value>5368709120</value><source>core-default.xml</source></property>
<property><name>fs.s3n.block.size</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.optionalcacheflushinterval</name><value>3600000</value><source>hbase-default.xml</source></property>
<property><name>fs.ftp.host</name><value>0.0.0.0</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.bulkload.retries.number</name><value>0</value><source>programatically</source></property>
<property><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.property.dataDir</name><value>${hbase.tmp.dir}/zookeeper</value><source>programatically</source></property>
<property><name>file.replication</name><value>1</value><source>core-default.xml</source></property>
<property><name>hbase.config.read.zookeeper.config</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.rest.filter.classes</name><value>org.apache.hadoop.hbase.rest.filter.GzipFilter</value><source>hbase-default.xml</source></property>
<property><name>hbase.regionserver.global.memstore.upperLimit</name><value>0.4</value><source>hbase-default.xml</source></property>
<property><name>hadoop.work.around.non.threadsafe.getpwuid</name><value>false</value><source>core-default.xml</source></property>
<property><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.global.memstore.lowerLimit</name><value>0.38</value><source>hbase-default.xml</source></property>
<property><name>hbase.hregion.preclose.flush.size</name><value>5242880</value><source>hbase-default.xml</source></property>
<property><name>hadoop.kerberos.kinit.command</name><value>kinit</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.zookeeper.quorum</name><value>10.20.195.190</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.client.max.perregion.tasks</name><value>1</value><source>programatically</source></property>
<property><name>file.bytes-per-checksum</name><value>512</value><source>core-default.xml</source></property>
<property><name>hbase.master.dns.interface</name><value>default</value><source>hbase-default.xml</source></property>
<property><name>net.topology.script.number.args</name><value>100</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.cluster.distributed</name><value>false</value><source>programatically</source></property>
<property><name>hbase.dfs.client.read.shortcircuit.buffer.size</name><value>131072</value><source>hbase-default.xml</source></property>
<property><name>hbase.defaults.for.version.skip</name><value>false</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.regionserver.regionSplitLimit</name><value>2147483647</value><source>programatically</source></property>
<property><name>hadoop.ssl.hostname.verifier</name><value>DEFAULT</value><source>core-default.xml</source></property>
<property><name>ipc.client.connect.timeout</name><value>20000</value><source>core-default.xml</source></property>
<property><name>hbase.server.versionfile.writeattempts</name><value>3</value><source>hbase-default.xml</source></property>
<property><name>io.mapfile.bloom.error.rate</name><value>0.005</value><source>core-default.xml</source></property>
<property><name>hbase.regionserver.info.bindAddress</name><value>0.0.0.0</value><source>hbase-default.xml</source></property>
<property><name>hbase.master.hfilecleaner.plugins</name><value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.online.schema.update.enable</name><value>true</value><source>programatically</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.status.multicast.address.ip</name><value>226.1.1.3</value><source>programatically</source></property>
<property><name>ha.failover-controller.graceful-fence.connection.retries</name><value>1</value><source>core-default.xml</source></property>
<property><name>ha.health-monitor.connect-retry-interval.ms</name><value>1000</value><source>core-default.xml</source></property>
<property><name>io.seqfile.local.dir</name><value>${hadoop.tmp.dir}/io/local</value><source>core-default.xml</source></property>
<property><name>hbase.security.authentication</name><value>simple</value><source>hbase-default.xml</source></property>
<property><name>tfile.io.chunk.size</name><value>1048576</value><source>core-default.xml</source></property>
<property><name>file.blocksize</name><value>67108864</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.tmp.dir</name><value>${java.io.tmpdir}/hbase-${user.name}</value><source>programatically</source></property>
<property><name>io.skip.checksum.errors</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.snapshot.restore.failsafe.name</name><value>hbase-failsafe-{snapshot.name}-{restore.timestamp}</value><source>programatically</source></property>
<property><name>hbase.zookeeper.peerport</name><value>2888</value><source>hbase-default.xml</source></property>
<property><name>hbase.master.port</name><value>60000</value><source>hbase-default.xml</source></property>
<property><name>fs.ftp.host.port</name><value>21</value><source>core-default.xml</source></property>
<property><name>fs.AbstractFileSystem.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFs</value><source>core-default.xml</source></property>
<property><name>ipc.client.fallback-to-simple-auth-allowed</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.status.multicast.address.port</name><value>60100</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.rest.threads.max</name><value>100</value><source>programatically</source></property>
<property><name>hbase.client.keyvalue.maxsize</name><value>10485760</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.hregion.memstore.block.multiplier</name><value>2</value><source>programatically</source></property>
<property><name>hadoop.ssl.require.client.cert</name><value>false</value><source>core-default.xml</source></property>
<property><name>hbase.hregion.memstore.block.multiplier</name><value>2</value><source>hbase-default.xml</source></property>
<property><name>hbase.server.thread.wakefrequency</name><value>10000</value><source>hbase-default.xml</source></property>
<property><name>hbase.failover.cluster.fail1.hbase.security.authentication</name><value>simple</value><source>programatically</source></property>
</configuration>