forked from lightvector/KataGo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
analysis_example.cfg
402 lines (304 loc) · 21.1 KB
/
analysis_example.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
# Config for KataGo C++ Analysis engine, i.e. "./katago.exe analysis"
# Example config for C++ (non-python) analysis engine
# SEE NOTES ABOUT PERFORMANCE AND MEMORY USAGE IN gtp_example.cfg
# SEE NOTES ABOUT numSearchThreads AND OTHER IMPORTANT PARAMS BELOW!
# Logs------------------------------------------------------------------------------------
# Where to output log?
logDir = analysis_logs # Each run of KataGo will log to a separate file in this dir
# logDirDated = analysis_logs # Use this instead of logDir to also write separate dated subdirs
# logFile = analysis.log # Use this instead of logDir to just specify a single file directly
# logToStderr = true # Echo everything output to log file to stderr as well
# logAllRequests = false # Log all input lines received to the analysis engine.
# logAllResponses = false # Log all lines output to stdout from the analysis engine.
# logErrorsAndWarnings = true # Log all lines output to stdout from the analysis engine that are errors and warnings
# logSearchInfo = false # Log debug info for every search performed
# Report a warning whenever a query contains a field that is unused. Helps to guard against
# typos when writing code that queries the analysis engine.
# warnUnusedFields = true
# Analysis------------------------------------------------------------------------------------
# Controls the number of moves after the first move in a variation.
# analysisPVLen = 15
# Report winrates for analysis as (BLACK|WHITE|SIDETOMOVE).
reportAnalysisWinratesAs = BLACK
# Larger values will make KataGo explore the top move(s) less deeply and accurately,
# but explore and give evaluations to a greater variety of moves.
# An extreme value like 1 will distribute many playouts across every move on the board, even very bad moves.
# NOTE: defaults to 0.04, under the presumption that the analysis engine will be used mainly for analysis.
# If you are intending to use the analysis engine to also play games and you want to maximize playing strength,
# set this to 0.0 either in this config or in the overrides.
# wideRootNoise = 0.04
# Try to limit the effect of possible bad or bogus move sequences in the
# history leading to this position from affecting KataGo's move predictions.
# ignorePreRootHistory = true
# Bot behavior---------------------------------------------------------------------------------------
# Handicap -------------
# Assume that if black makes many moves in a row right at the start of the game, then the game is a handicap game.
# This is necessary on some servers and for some GUIs and also when initializing from many SGF files, which may
# set up a handicap game using repeated GTP "play" commands for black rather than GTP "place_free_handicap" commands.
# However, it may also lead to incorrect understanding of komi if whiteHandicapBonus is used and a server does NOT
# have such a practice.
# Defaults to true! Uncomment and set to false to disable this behavior.
# assumeMultipleStartingBlackMovesAreHandicap = true
# Passing and cleanup -------------
# Make the bot never assume that its pass will end the game, even if passing would end and "win" under Tromp-Taylor rules.
# Usually this is a good idea when using it for analysis or playing on servers where scoring may be implemented non-tromp-taylorly.
# Defaults to true! Uncomment and set to false to disable this.
# conservativePass = true
# When using territory scoring, self-play games continue beyond two passes with special cleanup
# rules that may be confusing for human players. This option prevents the special cleanup phases from being
# reachable when using the bot for GTP play.
# Defaults to true! Uncomment and set to false if you want KataGo to be able to enter special cleanup.
# For example, if you are testing it against itself, or against another bot that has precisely implemented the rules
# documented at https://lightvector.github.io/KataGo/rules.html
# preventCleanupPhase = true
# Search limits-----------------------------------------------------------------------------------
# By default, if NOT specified in an individual request, limit maximum number of root visits per search to this much
maxVisits = 500
# If provided, cap search time at this many seconds
# maxTime = 60
# Search threads, batching, GPUs--------------------------------------------------------------------------
# Try a configuration like this if you only expect the engine to be handling a few queries at a time and you want
# individual queries to return more quickly, and are okay with the results being a bit lower-quality and the overall
# peak throughput on queries to be lower.
numAnalysisThreads = 2
numSearchThreadsPerAnalysisThread = 16
# Try a configuration like this if you expect to be sending large numbers of queries at a time, and want to maximize
# total throughput and also the evaluation quality of all the queries and you never care about the response latency
# of the individual queries, only the throughput as a whole.
# numAnalysisThreads = 32
# numSearchThreadsPerAnalysisThread = 1
# You will want to increase one or both numbers if you have a powerful GPU, and possibly decrease one or both if you
# have a very weak GPU, and play with the balance between them depending on your use case.
# Read the explanation below to understand how to set these parameters:
# EXPLANATION:
# numAnalysisThreads: the number of POSITIONS to be able to search in parallel.
# numSearchThreadsPerAnalysisThread: the number of threads to use in the tree search for EACH position.
# (older analysis configs might just have 'numSearchThreads', this is an alias for 'numSearchThreadsPerAnalysisThread')
# Therefore, the total number of search threads that may be active at a given time could be as large as the product:
# numAnalysisThreads * numSearchThreadsPerAnalysisThread
# Searching more positions in parallel is more efficient since the different threads aren't conflicting with each
# other on the same MCTS search tree. Using multiple threads on the same search will both make things slower
# and weaken the search (holding playouts fixed) due to out of date statistics on nodes and suboptimal exploration,
# although the cost is minor for only 2,4,8 threads.
# So unlike in GTP, which only ever searches one position at a time and where therefore you might as well make
# numSearchThreads as large as possible, in the analysis engine you often want you often want to keep numSearchThreads small,
# and instead parallelize across positions, so you can reduce conflict between threads and improve the overall throughput
# and strength of the search.
# But obviously you only get the benefit of parallelization across positions when you actually have lots of positions
# that you are querying at once! For example, setting numAnalysisThreads = 8 is useless if you only ever send one or two
# queries at a time!
# Therefore:
# * If you plan to use the analysis engine only for batch processing large numbers of positions,
# it's preferable to numSearchThreadsPerAnalysisThread to only a small number (e.g. 1,2,4) and use a higher numAnalysisThreads.
# * But if you sometimes plan to query the analysis engine for single positions, or otherwise in smaller quantities
# than -num-analysis-threads, or if you plan to be user-interactive such that the response time on some individual
# analysis requests is important to keep low, then set numSearchThreadsPerAnalysisThread to a larger number and use
# a lower numAnalysisThreads. That way, individual searches complete faster due to having more threads on each one.
# For 19x19 boards, weaker GPUs probably want a TOTAL number of threads (numAnalysisThreads * numSearchThreadsPerAnalysisThread)
# between 4 and 32. Mid-tier GPUs probably between 16 and 64. Strong GPUs probably between 32 and 256.
# But there's no substitute for experimenting and seeing what's best for your hardware and your usage case.
# Keep in mind that the number of threads you want does NOT necessarily have much to do with how many cores you have on your
# system. The optimal may easily exceed the number of cores! GPU batching is (usually) the dominant consideration.
# -------------
# nnMaxBatchSize is the max number of positions to send to a single GPU at once. Generally, it should be the case that:
# (number of GPUs you will use * nnMaxBatchSize) >= (numSearchThreads * num-analysis-threads)
# That way, when each threads tries to request a GPU eval, your batch size summed across GPUs is large enough to handle them
# all at once. However, it can be sensible to set this a little smaller if you are limited on GPU memory,
# too large a number may fail if the GPU doesn't have enough memory.
nnMaxBatchSize = 64
# Uncomment and set these smaller if you are going to use the analysis engine EXCLUSIVELY for smaller boards (or plan to
# run multiple instances, with some instances only handling smaller boards). It should improve performance.
# It may also mean you can use more threads profitably.
# maxBoardXSizeForNNBuffer = 19
# maxBoardYSizeForNNBuffer = 19
# Uncomment and set this to true if you are going to use the analysis engine EXCLUSIVELY for exactly the board size
# specified by maxBoardXSizeForNNBuffer and maxBoardYSizeForNNBuffer. It may slightly improve performance on some GPUs.
# requireMaxBoardSize = true
# TO USE MULTIPLE GPUS:
# Uncomment and set this to the number of GPUs you have and/or would like to use...
# AND if it is more than 1, uncomment the appropriate CUDA or OpenCL section below.
# numNNServerThreadsPerModel = 1
# Other General GPU Settings-------------------------------------------------------------------------------
# Cache up to 2 ** this many neural net evaluations in case of transpositions in the tree.
nnCacheSizePowerOfTwo = 23
# Size of mutex pool for nnCache is 2 ** this
nnMutexPoolSizePowerOfTwo = 17
# Randomize board orientation when running neural net evals?
nnRandomize = true
# TENSORRT GPU settings--------------------------------------
# These only apply when using the TENSORRT version of KataGo.
# IF USING ONE GPU: optionally uncomment and change this if the GPU you want to use turns out to be not device 0
# trtDeviceToUse = 0
# IF USING TWO GPUS: Uncomment these two lines (AND set numNNServerThreadsPerModel above):
# trtDeviceToUseThread0 = 0 # change this if the first GPU you want to use turns out to be not device 0
# trtDeviceToUseThread1 = 1 # change this if the second GPU you want to use turns out to be not device 1
# IF USING THREE GPUS: Uncomment these three lines (AND set numNNServerThreadsPerModel above):
# trtDeviceToUseThread0 = 0 # change this if the first GPU you want to use turns out to be not device 0
# trtDeviceToUseThread1 = 1 # change this if the second GPU you want to use turns out to be not device 1
# trtDeviceToUseThread2 = 2 # change this if the third GPU you want to use turns out to be not device 2
# You can probably guess the pattern if you have four, five, etc. GPUs.
# CUDA-specific GPU settings--------------------------------------
# These only apply when using the CUDA version of KataGo.
# IF USING ONE GPU: optionally uncomment and change this if the GPU you want to use turns out to be not device 0
# cudaDeviceToUse = 0
# IF USING TWO GPUS: Uncomment these two lines (AND set numNNServerThreadsPerModel above):
# cudaDeviceToUseThread0 = 0 # change this if the first GPU you want to use turns out to be not device 0
# cudaDeviceToUseThread1 = 1 # change this if the second GPU you want to use turns out to be not device 1
# IF USING THREE GPUS: Uncomment these three lines (AND set numNNServerThreadsPerModel above):
# cudaDeviceToUseThread0 = 0 # change this if the first GPU you want to use turns out to be not device 0
# cudaDeviceToUseThread1 = 1 # change this if the second GPU you want to use turns out to be not device 1
# cudaDeviceToUseThread2 = 2 # change this if the third GPU you want to use turns out to be not device 2
# You can probably guess the pattern if you have four, five, etc. GPUs.
# KataGo will automatically use FP16 or not based on the compute capability of your NVIDIA GPU. If you
# want to try to force a particular behavior though you can uncomment these lines and change them
# to "true" or "false". E.g. it's using FP16 but on your card that's giving an error, or it's not using
# FP16 but you think it should.
# cudaUseFP16 = auto
# cudaUseNHWC = auto
# OpenCL-specific GPU settings--------------------------------------
# These only apply when using the OpenCL version of KataGo.
# Uncomment to tune OpenCL for every board size separately, rather than only the largest possible size
# openclReTunePerBoardSize = true
# IF USING ONE GPU: optionally uncomment and change this if the best device to use is guessed incorrectly.
# The default behavior tries to guess the 'best' GPU or device on your system to use, usually it will be a good guess.
# openclDeviceToUse = 0
# IF USING TWO GPUS: Uncomment these two lines and replace X and Y with the device ids of the devices you want to use.
# It might NOT be 0 and 1, some computers will have many OpenCL devices. You can see what the devices are when
# KataGo starts up - it should print or log all the devices it finds.
# (AND also set numNNServerThreadsPerModel above)
# openclDeviceToUseThread0 = X
# openclDeviceToUseThread1 = Y
# IF USING THREE GPUS: Uncomment these three lines and replace X and Y and Z with the device ids of the devices you want to use.
# It might NOT be 0 and 1 and 2, some computers will have many OpenCL devices. You can see what the devices are when
# KataGo starts up - it should print or log all the devices it finds.
# (AND also set numNNServerThreadsPerModel above)
# openclDeviceToUseThread0 = X
# openclDeviceToUseThread1 = Y
# openclDeviceToUseThread2 = Z
# You can probably guess the pattern if you have four, five, etc. GPUs.
# KataGo will automatically use FP16 or not based on testing your GPU during tuning. If you
# want to try to force a particular behavior though you can uncomment this lines and change it
# to "true" or "false". This is a fairly blunt setting - more detailed settings are testable
# by rerunning the tuner with various arguments.
# openclUseFP16 = auto
# Eigen-specific settings--------------------------------------
# These only apply when using the Eigen (pure CPU) version of KataGo.
# This is the number of CPU threads for evaluating the neural net on the Eigen backend.
# It defaults to min(numAnalysisThreads * numSearchThreadsPerAnalysisThread, numCPUCores).
# numEigenThreadsPerModel = X
# Misc Behavior --------------------
# If the board is symmetric, search only one copy of each equivalent move. Attempts to also account for ko/superko, will not theoretically perfect for superko.
# Uncomment and set to false to disable this.
# rootSymmetryPruning = true
# Uncomment and set to true to make KataGo avoid a particular joseki that some KataGo nets misevaluate,
# and also to improve opening diversity versus some particular other bots that like to play it all the time.
# avoidMYTDaggerHack = false
# Have KataGo mildly prefer to avoid playing the same joseki in every corner of the board.
# Uncomment to set to a specific value. A small value like 0.005 should produce already a noticeable behavior change.
# avoidRepeatedPatternUtility = 0.0
# Enable some hacks that mitigate rare instances when passing messes up deeper searches.
# enablePassingHacks = true
# Root move selection and biases------------------------------------------------------------------------------
# Uncomment and edit any of the below values to change them from their default.
# Not all of these parameters are applicable to analysis, some are only used for actual play
# Temperature for the early game, randomize between chosen moves with this temperature
# chosenMoveTemperatureEarly = 0.5
# Decay temperature for the early game by 0.5 every this many moves, scaled with board size.
# chosenMoveTemperatureHalflife = 19
# At the end of search after the early game, randomize between chosen moves with this temperature
# chosenMoveTemperature = 0.10
# Subtract this many visits from each move prior to applying chosenMoveTemperature
# (unless all moves have too few visits) to downweight unlikely moves
# chosenMoveSubtract = 0
# The same as chosenMoveSubtract but only prunes moves that fall below the threshold, does not affect moves above
# chosenMovePrune = 1
# Number of symmetries to sample (WITHOUT replacement) and average at the root
# rootNumSymmetriesToSample = 1
# Using LCB for move selection?
# useLcbForSelection = true
# How many stdevs a move needs to be better than another for LCB selection
# lcbStdevs = 5.0
# Only use LCB override when a move has this proportion of visits as the top move
# minVisitPropForLCB = 0.15
# Internal params------------------------------------------------------------------------------
# Uncomment and edit any of the below values to change them from their default.
# Scales the utility of winning/losing
# winLossUtilityFactor = 1.0
# Scales the utility for trying to maximize score
# staticScoreUtilityFactor = 0.10
# dynamicScoreUtilityFactor = 0.30
# Adjust dynamic score center this proportion of the way towards zero, capped at a reasonable amount.
# dynamicScoreCenterZeroWeight = 0.20
# dynamicScoreCenterScale = 0.75
# The utility of getting a "no result" due to triple ko or other long cycle in non-superko rulesets (-1 to 1)
# noResultUtilityForWhite = 0.0
# The number of wins that a draw counts as, for white. (0 to 1)
# drawEquivalentWinsForWhite = 0.5
# Exploration constant for mcts
# cpuctExploration = 1.0
# cpuctExplorationLog = 0.45
# Parameters that control exploring more in volatile positions, exploring less in stable positions.
# cpuctUtilityStdevPrior = 0.40
# cpuctUtilityStdevPriorWeight = 2.0
# cpuctUtilityStdevScale = 0.85
# FPU reduction constant for mcts
# fpuReductionMax = 0.2
# rootFpuReductionMax = 0.1
# fpuParentWeightByVisitedPolicy = true
# Parameters that control weighting of evals based on the net's own self-reported uncertainty.
# useUncertainty = true
# uncertaintyExponent = 1.0
# uncertaintyCoeff = 0.25
# Explore using optimistic policy
# rootPolicyOptimism = 0.2
# policyOptimism = 1.0
# Amount to apply a downweighting of children with very bad values relative to good ones
# valueWeightExponent = 0.25
# Slight incentive for the bot to behave human-like with regard to passing at the end, filling the dame,
# not wasting time playing in its own territory, etc, and not play moves that are equivalent in terms of
# points but a bit more unfriendly to humans.
# rootEndingBonusPoints = 0.5
# Make the bot prune useless moves that are just prolonging the game to avoid losing yet
# rootPruneUselessMoves = true
# Apply bias correction based on local pattern keys
# subtreeValueBiasFactor = 0.45
# subtreeValueBiasWeightExponent = 0.85
# Use graph search rather than tree search - identify and share search for transpositions.
# useGraphSearch = true
# How much to shard the node table for search synchronization
# nodeTableShardsPowerOfTwo = 16
# How many virtual losses to add when a thread descends through a node
# numVirtualLossesPerThread = 1
# Improve the quality of evals under heavy multithreading
# useNoisePruning = true
# Avoid SGF Patterns ------------------------------------------------------------------------------
# The parameters in this section provide a powerful way to customize KataGo to avoid moves that follow specific patterns
# based on a set of provided SGF files loaded upon startup. Uncomment them to use this feature.
# Additionally, if the SGF file contains the string %SKIP% in a comment on a move, that move will be ignored for this purpose.
# Load sgf files from this directory when the engine is started (ONLY on startup, will not reload unless engine is restarted)
# avoidSgfPatternDirs = path/to/directory/with/sgfs/
# You can also surround the file path in double quotes if the file path contains trailing spaces or hash signs.
# Within double quotes, backslashes are escape characters.
# avoidSgfPatternDirs = "path/to/directory/with/sgfs/"
# Penalize this much utility per matching move.
# Set this negative if you instead want to make KataGo favor the SGF patterns instead of penalizing it!
# This number does not need to be large, even 0.001 will make a difference. Too-large values may lead to bad play.
# avoidSgfPatternUtility = 0.001
# Optional - load only the newest this many files
# avoidSgfPatternMaxFiles = 20
# Optional - Penalty is multiplied by this per each older SGF file, so that old sgf files matter less than newer ones.
# avoidSgfPatternLambda = 0.90
# Optional - pay attention only to moves that were made by players with this name.
# For example you can set it to the name that your bot's past games will show up as in the SGF, so that the bot will only avoid repeating
# moves that itself made in past games, not the moves that its opponents made.
# avoidSgfPatternAllowedNames = my-ogs-bot-name1,my-ogs-bot-name2
# Optional - Ignore any moves in SGF files that occurred before this turn number.
# avoidSgfPatternMinTurnNumber = 0
# For more avoid patterns:
# You can also specify a second set of parameters, and a third, fourth, etc by numbering 2,3,4,...
# avoidSgf2PatternDirs = ...
# avoidSgf2PatternUtility = ...
# avoidSgf2PatternMaxFiles = ...
# avoidSgf2PatternLambda = ...
# avoidSgf2PatternAllowedNames = ...
# avoidSgf2PatternMinTurnNumber = ...