-
Notifications
You must be signed in to change notification settings - Fork 0
/
naming-game.jl
2243 lines (1842 loc) · 80.1 KB
/
naming-game.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
### A Pluto.jl notebook ###
# v0.17.4
using Markdown
using InteractiveUtils
# ╔═╡ 22c16f23-cb6e-4b15-922b-32c4e4b9baba
# This notebook was written with `[julia version 1.6.3]`.
begin
using Agents
using Graphs
using Distributions
using StatsPlots
using ColorSchemes
using StatsFuns
using GraphRecipes
using LaTeXStrings
using DrWatson: @dict
using Random
using DataFramesMeta
using StatsBase
using PlutoUI
begin # plot settings
DPI = 300 # 1200 for poster
FONT = "Arial"
COLORSCHEME = ColorSchemes.Greys
gr(dpi = DPI)
default(fontfamily = FONT)
end
TableOfContents()
end
# ╔═╡ ff785b01-f0f3-4feb-9fc0-f07a5b86faa5
let
using ShortCodes
[
DOI("10.1103/PhysRevE.74.036105"),
DOI("10.1103/PhysRevE.102.012309"),
DOI("10.1109/NSW.2011.6004642")
]
end
# ╔═╡ b0b47fa0-b9d1-4e46-be06-31233b3b6f03
BlockDiag("""blockdiag {
default_fontsize = 15;
"ABM\t𝑞(𝑥)" -> "𝐱" -> "𝐹";
"ABM\t𝑞(𝑥)"[color = "#D6E3F4"];
"𝐱"[color = "#F2F2F2"];
"𝐹"[color = "#DAEAD2"];
"ABM\t𝑞(𝑥)" -> "𝐱"[thick];
"𝐱" -> "𝐹"[thick];
}""")
# ╔═╡ de951afb-2596-4ef6-8c96-c8c03102eea9
BlockDiag("""blockdiag {
default_fontsize = 15;
"SCABM\t𝑝(𝑥)" <- "𝐱'" <- "𝐹'";
"SCABM\t𝑝(𝑥)"[color = "#DAEAD2"];
"𝐱'"[color = "#F2F2F2"];
"𝐹'"[color = "#D6E3F4"];
"SCABM\t𝑝(𝑥)" <- "𝐱'"[thick];
"𝐱'" <- "𝐹'"[thick];
}""")
# ╔═╡ 9942dd32-e601-4a01-97ef-e3709080fea8
begin # ABM parameters
N₀ = 30 # Expected number of agents
M = 10_000 # Number of samples in one ABM ensemble
T = 5 # Number of time steps the ABM is run
GRAPHTYPES = [:ErdosRenyi, :WattsStrogatz, :BarabasiAlbert]
SEED = 123
end;
# ╔═╡ 6f1ddf1f-262b-4a47-8da8-9bf5bb27622f
"""
Sample a `(N, E)` graph with approximately the same number of edges
`E` as an `erdos_renyi(N, 0.5)` graph, i.e., `E = 0.5*N(N-1)/2`.
"""
function sample_calibrated_graph(N, graphtype; seed = -1)
if graphtype == :ErdosRenyi
g = erdos_renyi(N, 0.5; seed = seed)
elseif graphtype == :BarabasiAlbert
k = round(Int, .5*(√N + N))
g = barabasi_albert(N, k; seed = seed)
elseif graphtype == :WattsStrogatz
k = round(Int, 0.5*(N-1))
β = 0.05 # Free parameter
g = watts_strogatz(N, k, β; seed = seed)
else
error("Unknown graphtype: $graphtype")
end
return g
end
# ╔═╡ f66bed21-ea37-44c1-a238-93bc22778ac3
let
function example(graphtype; N = 10, size = 500, seed = 1111)
g = sample_calibrated_graph(N, graphtype; seed = seed)
c = local_clustering_coefficient(g)
colors = get.(Ref(COLORSCHEME), c)
graphplot(g; title=graphtype, titlefontsize = 10, size = (size, size/2), nodecolor = colors, nodesize = .2)
end
plot(example.([:ErdosRenyi, :WattsStrogatz, :BarabasiAlbert])..., layout = (1,3))
end
# ╔═╡ 7c46a491-bef6-4db1-a0d3-2cc648ce6fe9
# Define the ABM according to Dall’Asta+ (2006)
begin
mutable struct Agent <: AbstractAgent
id::Int
lexicon::Vector{Int}
end
function initialize(N₀, graphtype)
# Sample the number of agents for this model
N = rand(Poisson(N₀))
# This maps each node to an Int in 1:N
graphseed = abs(rand(Int))
graph = sample_calibrated_graph(N, graphtype; seed = graphseed)
C = mean(local_clustering_coefficient(graph))
properties = @dict(
N,
graphtype,
graphseed,
graph,
C, # Clustering coefficient ∈ [0,1]
S = 0. # Succes rate ∈ [0,1]
)
# This maps each agent to an Int in 1:N
model = ABM(Agent, nothing; properties)
init_lexicon!(agent) = add_agent!(model, [rand(model.rng, Int)])
for i in 1:N
init_lexicon!(i)
end
return model
end
function agent_step!(speaker, model)::Int
# This relies on the fact that both agents and nodes are
# mapped to Ints on 1:N (no agents or nodes are removed)
neighbors = all_neighbors(model.graph, speaker.id)
isempty(neighbors) && return 0
listener = model[rand(model.rng, neighbors)]
message = rand(model.rng, speaker.lexicon)
if message in listener.lexicon
speaker.lexicon = [message]
listener.lexicon = [message]
return 1
else
push!(listener.lexicon, message)
return 0
end
end
function model_step!(model)
successes = 0
for speaker in allagents(model)
successes += agent_step!(speaker, model)
end
model.S = successes/nagents(model)
end
end;
# ╔═╡ 9b0abb0f-6081-469e-8c3d-69f506cf1caf
begin
function ensemblerun(N₀, M, T, graphtype)
models = [initialize(N₀, graphtype) for _ in 1:M]
adf, mdf, models = ensemblerun!(
models,
agent_step!,
model_step!,
T;
mdata = [:S]
)
rename!(mdf, :ensemble => :run) # Rename awkwardly named column
mdf[!, :graphtype] .= graphtype
ensemble = @chain mdf begin
@orderby :graphtype :run
@subset :step .== maximum(:step)
@select :graphtype :run :S
end
ensemble[!, :C] = [m.C for m in models]
ensemble[!, :N] = [m.N for m in models]
ensemble[!, :graphseed] = [m.graphseed for m in models]
return ensemble
end
Random.seed!(SEED)
ensemble = vcat(ensemblerun.(N₀, M, T, GRAPHTYPES)...)
ensemble = groupby(ensemble, :graphtype)
end
# ╔═╡ 9c70b5d1-a6dd-4687-a0d4-6fa2526286ee
begin
function weigh!(ensemble, λ)
@transform!(ensemble, :weight = softmax(+λ*(:C))) # Use `+λ` convention
return ensemble
end
function sample_efficiency(weights)
νᵐ = floor(Int, 1/maximum(weights))
e = νᵐ / length(weights)
end
wmean(x, w) = mean(x, Weights(w, 1.))
wstd(x, w) = std(x, Weights(w, 1.))
wquantile(x, w, p) = quantile(x, Weights(w, 1.), p)
function project!(ensemble, λ)
weigh!(ensemble, λ)
@combine ensemble begin
:λ = λ
:ϵ = sample_efficiency(:weight)
:μᶜ = wmean(:C, :weight)
:σᶜ = wstd(:C, :weight)
:μˢ = wmean(:S, :weight)
:σˢ = wstd(:S, :weight)
:μⁿ = wmean(:N, :weight)
:σⁿ = wstd(:N, :weight)
end
end
project!(ensemble, 0.) # Get unconstrained moments
end
# ╔═╡ e4bc40ca-8958-4807-a240-64fcb1fedcb4
begin
function plotdiagram(ensemble)
@df DataFrame(ensemble) cornerplot(
[:C :S]; label = ["Clustering coefficient C", "Success rate S"],
group = :graphtype, compact = true, size=(400,300), labelfontsize=8
)
end
function lambdas(σᶜ; c = 10, n = 100)
lim = c/σᶜ # In general (λσᶜ) ∼ N(0,1)
λ = LinRange(-lim, lim, n)
end
function project_lambdas!(ensemble, λ)
s = vcat(project!.(Ref(ensemble), λ)...)
s = groupby(s, :graphtype)
end
function getrange!(ensemble, graphtype)
r = @chain project!(ensemble, 0.) begin
@subset :graphtype .== graphtype
@select :μᶜ :σᶜ
end
only(r.μᶜ), only(r.σᶜ)
end
function plotstates!(p, ensemble; c = 10, n = 100, z = 3)
σᶜ = project!(ensemble, 0.).σᶜ
λ = lambdas(minimum(σᶜ); c = c, n = n)
s = project_lambdas!(ensemble, λ)
for (k, sᵍ) in pairs(s)
μ, σ = getrange!(ensemble, k.graphtype)
keep = @. μ - z*σ < sᵍ.μᶜ < μ + z*σ
sᵍ = sᵍ[keep, :]
@df sᵍ plot!(p, :μᶜ, :μˢ; linewidth=3, linecolor = :black, ylim = (0,1))
annotate!(p, μ, 1., text(k.graphtype, FONT, 6))
###
#@df sᵍ plot!(p, :μᶜ, :μⁿ/N₀; linewidth=3, linecolor = :blue, ylim = (0,1))
end
s
end
end
# ╔═╡ 8eb2581b-e4de-4fc5-b716-13bf4ff3ffcd
begin
diagram = plotdiagram(ensemble)
states = plotstates!(diagram[2], ensemble) # Equation-of-state
diagram
end
# ╔═╡ c6dcddcf-0d12-4e9e-b7fa-f761f5f2cacf
let
using Interpolations
extractname(df) = string(first(df.graphtype))
atzero(λ, x) = LinearInterpolation(λ, x)(zero(λ))
function yticks()
ϵ = DataFrame(states).:ϵ
scale = sort(unique(round.(Int, log10.(ϵ))))
ticks = (10.) .^ scale
labels = ["1:$(10^-s)" for s in scale]
ticks, labels
end
xlim = 3
p = plot(;
xticks = -10:10, xlim = (-xlim, xlim), yscale = :log,
yticks = yticks(), xlabel = "z", ylabel = "Sample efficiency"
)
for df in states
graphtype = extractname(df)
μ = @df df atzero(:λ, :μᶜ)
σ = @df df atzero(:λ, :σᶜ)
z = @. (df.μᶜ - μ)/σ
@df df plot!(p, z, :ϵ; label = graphtype, lw = 3)
end
vspan!(p, [-2, 2]; fillalpha = .1, fillcolor = :grey, label = false)
plot!(p; title = "Worst case sampling effiency")
end
# ╔═╡ fb63305f-09d7-4111-8684-4fbab8c192df
md"""
!!! warning ""
Notebook still under active construction.
"""
# ╔═╡ 7cd47b87-fceb-4995-970c-ae23e250fb46
md"""
# **SCABMs**: Softly Constrained Agent-Based Models
Marnix Van Soom & Bart de Boer `{marnix,bart}@ai.vub.ac.be` [[VUB AI lab]](https://ai.vub.ac.be/abacus/)
!!! note "Abstract"
In this notebook we look at the effect of constraining the network clustering coefficient $C$ on the success rate $S$ of a simple language game played by agents on a networks, in order to gauge the influence of social network structure on emerging language conventions.
This is an example of a generic class of experiments on agent-based models (ABMs) made possible by a simple and flexible conceptual framework called softly constrained agent-based models (SCABMs), which we introduce and discuss here.
"""
# ╔═╡ 8a8a6115-a178-4b97-bc77-371a85288363
md"""
## Introduction and motivation
An **agent-based model (ABM)** can be thought of as a computer program that implicitly defines a probability distribution $q(x)$ over all of its possible outputs $x$.
It is essentially a high-dimensional and ingeniously crafted probability distribution from which it is easy to sample an $x \sim q(x)$, but for which the *value* of the probability density function $q(x)$ itself is unavailable.
Typically, we want to capture the behavior of the ABM by evaluating just a few well-chosen statistics that project the high-dimensional $x$ down to something lower-dimensional and simpler.
Luckily, we don't need to know the value of $q(x)$ to do this: we just estimate the expected value of the statistics by averaging them over independent runs of the ABM.
Formally, given a statistic of choice $f(x)$ and a set of $M$ samples ${\bf x} = \{x_m\}$, where each $x_m \sim q(x)$ is the output of an independent run of the ABM, the expected value $F$ is approximated by Monte Carlo integration:
````math
F := \langle f(x) \rangle_q \equiv \int dx\ q(x) f(x) \approx {1 \over M} \sum_{m=1}^M f(x_m)
````
To obtain $F$, therefore, the ABM is run in what we define as the "**forward**" direction, schematically represented as:
"""
# ╔═╡ b1d4c305-1f87-4a90-9d39-7519b1bb11ab
md"""
In words, this diagram reads that running an ABM $q(x)$ repeatedly produces a set of samples $\bf x$ from which the expected value of a statistic $F := \langle f(x) \rangle_q$ can be estimated. The object of interest $F$ is thus obtained from left to right.
"""
# ╔═╡ a598718d-b65f-41d3-829f-bbb82888002b
md"""
Now we introduce the idea of running the ABM "**backwards**":
"""
# ╔═╡ c9fda3c0-882c-4f48-a225-f3e67d107f13
md"""
The object of interest is now the set of samples ${\bf x'} = \{x_\ell'\}$, which implicity represents a new probability distribution $p(x)$.
In the backward direction the expectation
````math
\langle f(x) \rangle_p \equiv \int dx\ p(x) f(x) := F'
````
is *constrained* to take a given value $F' \neq F$ and now we solve for the probability distribution $p(x)$ which satisfies that soft constraint while still as close as possible to the prior $q(x)$.
We show in the appendix that the optimal solution to this problem can be approximated for some range of $F'$ by a simple reweighting of the original samples $\bf x$, from which the $\bf x'$ can be obtained by standard resampling such that roughly each $x_\ell' \sim p(x)$.
We pay for the efficiency of this approximation by a corresponding cost in samples, such that we can get only $L = \epsilon M$ samples $\bf x'$ from $p(x)$ given $M$ samples $\bf x$ from $q(x)$, where $\epsilon \in (0,1]$ is the sampling efficiency.
Its value depends strongly on $F'$.
Returning to the second diagram above, we may apply the same logic used in the first paragraph and conclude that the obtained $\bf x'$ represent the probability distribution $p(x)$ of a new computer program automatically derived from the original ABM, which we call a **softly constrained agent-based model (SCABM)**. "Softly constrained" means that we only constrain the *expectation* of $f(x)$, rather than demanding that $f(x)$ take on a certain "hard" value.
As the name suggests, a SCABM is itself just another ABM, except that we don't have access to its source code -- we can only study it indirectly via the $\bf x'$.
In particular, using the same rule as above we can estimate the expected value of any statistic
````math
G := \langle g(x) \rangle_p \equiv \int dx\ p(x) g(x) \approx {1 \over L} \sum_{\ell=1}^L g(x'_\ell)
````
This expectation $G$ is a function of the constraint $F'$, so $G \equiv G(F')$.
**This allows us to characterize the behavior of an ABM by studying the effect of constraining the expected value of one statistic on the expected value of another statistic.**
In this notebook, for example, we will look at the effect of constraining the average network clustering coefficient $C$ on the average success rate $S$ of a simple language game played by agents on random networks, in order to gauge certain aspects of the influence of social network structure on emerging language conventions.
"""
# ╔═╡ 8d827b33-d299-48fb-b3e6-b31dc26b9fe6
md"""
!!! terminology "Tip"
Before we continue, here are some tips for using this [Pluto notebook](https://github.com/fonsp/Pluto.jl).
- Click the 📕 book icon on the right to see a table of contents.
- (Most) cells with source code are hidden to keep this notebook legible. To see the source, run this notebook (click *Edit or run this notebook*) in Pluto and click the 👁️ eye when hovering over a cell.
- When running this notebook for the first time, Pluto will take some time to install the relevant libraries and compile the code on the first pass. After this process, things should be (very) fast.
"""
# ╔═╡ b4d0347f-4074-46da-a3d0-3ae9b7045cf8
md"""
## Playing naming games with SCABMs
To show that SCABMs are useful and computationally feasible, we investigate the influence of softly constraining the clustering coefficient $C$ on the success rate $S$ of a simple language game called the **naming game** played by the agents on three different network types.
!!! note
See this excellent [Medium blog post](https://medium.com/@ramongarciaseuma/dynamics-for-language-conventions-naming-game-8198c8383197) for a clear introduction to the naming game of Dall’Asta+ (2006).
The code in this notebook is equivalent to the code in the blog post; only our implementation of the network types differs.
"""
# ╔═╡ 017327e7-7c7d-40b5-8573-ec780f42384b
md"""
### Description of the ABM
Given a network of $N$ agents, the naming game is a simple model of how a set of shared conventions can emerge without obvious high-level intervention.
Agents need to agree on a word for one global concept $X$.
Initially, each agent comes up with a random word for $X$.
Then, taking turns, each agent communicates with a random neighbor in the network and picks a random word from their lexicon to describe $X$.
The communication can be a success or a failure:
- Success: their neighbor has heard this word before (it is in their lexicon) and both agents agree that this is the "right" word for $X$; their lexicons collapse to just this one word.
- Failure: their neighbor has not heard this word before (it is not in their lexicon) and the word is simply appended to the neighbor's lexicon.
The game is played for $T$ timesteps (during one timestep, each agent communicates once) after which the **success rate $S$** is evaluated, which is the fraction of successful communications during the last timestep.
As mentioned before, we consider three different network models to capture different types of communication in social networks.
Each network has $N$ nodes (the number of agents) and is calibrated to the Erdős–Rényi model to have roughly $E \approx N(N-1)/4$ connections.
These networks are not sparse, since $E \sim O(N^2)$.
The models are:
1. [Erdős–Rényi](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model):
A network where (in this experiment) each possible connection between a pair of agents is present or not with a probability of $p = 0.5$.
This represents a random social network with little "intentional" structure.
2. [Watts-Strogatz](https://en.wikipedia.org/wiki/Watts%E2%80%93Strogatz_model):
A "small world" network where agents are mostly connected within clusters, but sometimes to agents in "distant" clusters.
3. [Barabási–Albert](https://en.wikipedia.org/wiki/Barab%C3%A1si%E2%80%93Albert_model):
A "scale-free" network where a few popular nodes have lots of connections, while the majority has relatively few.
We will characterize these networks by a single statistic: the **clustering coefficient $C$**.
More precisely, it is defined as the [average local clustering coefficient](https://en.wikipedia.org/wiki/Clustering_coefficient):
````math
C = {1 \over N} \sum_{n=1}^N C_n
````
where $C_n \in [0,1]$ is the local clustering coefficient of node (agent) $n$.
Below we show some example networks for $N = 10$ agents where the nodes are colored according to $C_n$ (darker means higher).
"""
# ╔═╡ 49458fbd-e8e9-4e67-872f-84abd04c56f2
md"""
Finally, an important design of our ABM is that the number of agents $N$ is not constant but varies per run.
Rather, the $N_0$ parameter of the ABM specifies the *expected amount* of agents, and for a given run we sample
````math
N \sim \text{Poisson}(N_0)
````
so roughly $N \simeq N_0 \pm \sqrt{N_0}$
"""
# ╔═╡ 4efd79a5-3e7a-4b3f-8c11-c61a3c9b0e0b
md"""
### Running the ABM
"""
# ╔═╡ a2a71645-aaa3-4462-b6f8-bf8d7d9d2d4a
md"""
!!! terminology "Tip"
Pluto notebooks are reactive, so you can change the ABM parameters below and execute the cell. The changes will propagate throughout the notebook.
"""
# ╔═╡ d91ed32c-2092-4aaa-ba50-13d93d0da12b
md"""
### Unleashing the SCABMs
We now constrain the expected value (average) of the clustering coefficient $C$ to see how this influences the success rate $S$ at time $T$.
"""
# ╔═╡ 4f4c249a-eaf4-46a9-a5f3-0887715a119c
states
# ╔═╡ 23826464-15bd-4b29-8540-32982e07b709
begin
function sample_graph(ensemble, graphtype)
e = ensemble[(graphtype,)]
i = sample(1:nrow(e), Weights(e.weight, 1.))
s = e[i,:]
g = sample_calibrated_graph(s.N, graphtype; seed = s.graphseed)
g, s
end
function mark_diagram!(p, C, S, markershape)
scatter!(p, [C], [S], markershape = markershape, markercolor = :black)
end
function visualize!(ensemble, graphtype, λ, diagram = nothing; ms = :star4, title = true)
weigh!(ensemble, λ)
g, s = sample_graph(ensemble, graphtype)
isnothing(diagram) || mark_diagram!(diagram[2], [s.C], [s.S], ms)
C, S = round.([s.C, s.S], sigdigits=2)
c = local_clustering_coefficient(g)
colors = get.(Ref(COLORSCHEME), c)
name = "$graphtype ⋅ C = $C ⋅ S = $S"
p = graphplot(
g; title = title ? name : "", nodecolor = colors,
nodesize = .2, titlefontsize = 10
)
savefig(p, "$name.png")
p
end
end
# ╔═╡ 4b7f2306-a4c0-42c7-85c2-ea437bc61e50
[
visualize!(ensemble, :WattsStrogatz, -400, diagram; ms = :star4),
visualize!(ensemble, :WattsStrogatz, 0, diagram; ms = :star4),
visualize!(ensemble, :WattsStrogatz, +400, diagram; ms = :star4),
]
# ╔═╡ 08c63f0e-5a64-45e3-97be-c1fa958cc661
visualize!(ensemble, :ErdosRenyi, 0., diagram; ms = :star5)
# ╔═╡ c62e44d4-8501-45f6-bdb0-e357e4c363fb
visualize!(ensemble, :BarabasiAlbert, 0., diagram; ms = :star6)
# ╔═╡ dd40ac6e-d08f-4663-b6c5-9809493e9612
let
savefig(diagram, "diagram.png")
diagram
end
# ╔═╡ 6ab8e802-d879-4ec0-a86b-ea1797d34c2b
md"""
As $F' \rightarrow F$, then $p(x) \rightarrow q(x)$, as required.
When $F$ is outside range we need very many samples (very low sample efficiency), so outside that range we need to be careful because the SCABM will turn into a SCAM unless we have very very many samples.
By smoothly varying the soft constraint $F'$, $G(F')$ traces a path known in statistical physics as an *equation of state*.
The resulting $S(C)$ curves are shown below for Erdős–Rényi, Watts-Strogatz and Barabási–Albert networks.
"""
# ╔═╡ 51cee280-ee61-44af-8615-879750f823b4
let
p = plot(; xlabel = "Clustering coefficient C", ylabel = "Success rate S", legend = false, title = "Equations of state")
e = ensemble
plotstates!(p, ensemble)
p
end
# ╔═╡ fda9c33b-8657-4d8b-b74e-d0a035dd67eb
md"""
## Discussion
!!! warning
Under construction.
It should be realized that a difficulty with network science is that many standard network measures are strongly correlated (for example, network assortativity and degree sequence (Peixoto 2020)).
One advantage of the SCABM formalism is that the choice and number of statistics -- whether of the "explanatory" $F$ type or "dependant" $G$ type -- is completely free, and multiple statistics and constraints are possible.
Another advantage is note that we have more control over the ABM: we do not need to know how to generate networks with a given clustering coefficient $C$, not even on average; but with a SCABM it is still possible to do this automatically.
!!! note
In effect this method gives us a new set of "turning knobs" at which we can turn at will and see how the system (ABM) reacts, *without being able to control them explicitly*.
This is good because this widens scope of ABMs considerably.
For example #1, neural agents with emergent features -- these emergent features can now be constrained and the effects they have on the system can be studied (as if they were ABM parameters under control of the experimenter).
For example #2, generating networks with given (average) clustering coefficient, while possible, is not trivial (e.g. Herrera & Zufiria (2011)). And what if we wanted to constrain another network measure such as [betweenness centrality](https://en.wikipedia.org/wiki/Betweenness_centrality)?
!!! terminology "Advantages"
- Very simple conceptual framework and computationally efficient; scales in linear time
- A richer behavior than a straight line, with adaptive error bars (not shown) -- error bars just show the width of the distribution of the "effect statistic" ($G$), not some kind of regression error!
- You get weighted samples so you can go beyond moments and calculate e.g. weighted quantiles for any statistic. Or probabilistic questions such as "what is the probability that $g/f < 3$"? etc.
- You can resample from the new SCABM and inspect the samples manually to get some intuition
- Multiple constraints can be active at the same time (like temperature and volume with an ideal gas)
!!! warning "Disadvantages/caveats"
- Need to be able to sample the ABM many times
- Can realistically only constrain within one to max. three sigma $\sigma_F$
- **Everything depends on the expressivity of the original ABM!** A SCABM is only as expressive and powerful as the underlying original ABM. If there is no correlation between $F$ and $G$ and the signal is weak, the SCABM will turn into its evil twin SCAM and give you smooth and likeable curves back which are just noise. But this is can be easily detected by just generating another ensemble and seeing whether the $G(F)$ curves are similar. (For example just change the `SEED` variable in this notebook and rerun). Increasing the number of runs `M` diminishes the variability.
To do:
- Investigate variance of different estimators and communicate theoretical results
- More interesting models
- Verify predictions from $p(x)$ on unseen data
- Beyond reweighing and resampling: reconstruct the data with nested sampling in a high-dimensional space ($O(100)$ max)
"""
# ╔═╡ a0aad24f-1f6b-4f2a-9f54-b61e0609eb76
md"## Appendices"
# ╔═╡ 15a95e15-905f-4996-8000-f4cf3d60aac0
md"""
### Derivations
Given the ABM $q(x)$, we want to find a new $p(x)$, the SCABM, under which the expected value (i.e., the average) of $f(x)$ is $F'$, while being as "close" as possible to the prior $q(x)$ as measured by the Kullback-Leibler distance.
More precisely:
!!! note "Problem"
Given $q(x)$, $f(x)$ and $F'$, find
````math
p(x) = \text{argmin}_{p(x)} D_\text{KL}(p|q)
````
while satisfying
````math
\langle f(x) \rangle_p \equiv \int dx\ p(x) f(x) := F'
````
"""
# ╔═╡ 917b1c65-e48f-4e14-aff1-53cef88231a3
md"""
### Robustness and sensitivity
With the Poisson sampling for $N$ in place, the SCABM method gave consistent results throughout the following variations:
- Varying $N_0 \sim O(100)$
- Varying $T \sim O(10)$
- [Clustering statistic](https://en.wikipedia.org/wiki/Clustering_coefficient) used: average local clustering coefficient [used here] or global clustering coeffient
- Which importance sampling estimator to use: reweighing samples [used here] or adjusting the statistic by $\exp(\lambda C_i)/Z(\lambda)$
- Which estimator of $C$ to use: reweighing samples [used here] or using Automatic Differentiation Nested Sampling $\langle C \rangle = {d \over d\lambda} \log Z(\lambda)$ (Van Soom & de Boer 2022, in preparation)
"""
# ╔═╡ b604cefb-8dc6-46d9-b456-7f19af13f067
md"""
### Mediating influence of the number of agents
The two plots below show how $C$ influences $S$: through $N$, the number of agents.
Success rate is **negatively correlated** with $N$ because
- Although success rate is "corrected for" $N$ (it is divided by $N$ and one model step is a sweep through $N$ agents), it is still plausible that more agents simply need relatively more time to converge
- And more importantly: since $E = O(N^2)$, the number of agents strongly influence the average node degree, and the more connections an agent has, the slower convergence (because if the number of connections is small, you have faster collapse of lexicon)
"""
# ╔═╡ f6e4d332-c8b8-425e-a1fe-83378552836c
@df DataFrame(ensemble) cornerplot(
[:N :S]; label = ["Number of agents N", "Success rate S"], group = :graphtype, compact = true, size=(400,300), labelfontsize=8
)
# ╔═╡ 31c9068f-90f3-4185-aa4f-17c11563047b
md"""
Plot below is a good example of the power of SCABMs: intricate dependence that cannot be expressed with a simple linear regression.
The number of agents $N$ is **correlated** with clustering coefficient $C$:
- **Erdős–Rényi and Barabási–Albert**: A smaller number of agents $N$ affords for a larger variation in $C$ (because as $N$ increases, clustering of coefficient of network samples converges in the usual $O(N^{-1/2})$ way)
- **Watts-Strogatz**: the same effect as the previous point happens, but there is a real trend: Watts-Strogatz networks are designed to have high local clustering yet short average shortest path lengths. So the positive correlation with $N$ is essentially by design.
"""
# ╔═╡ e8c62155-902b-41f6-bc8d-6f8d74ed352a
let
p = @df DataFrame(ensemble) cornerplot(
[:C :N]; label = ["Clustering coefficient C", "Number of agents N"], group = :graphtype, compact = true, size=(400,300), labelfontsize=8
)
function plot_nagent_states!(p, ensemble; c = 10, n = 100, z = 3)
σᶜ = project!(ensemble, 0.).σᶜ
λ = lambdas(minimum(σᶜ); c = c, n = n)
s = project_lambdas!(ensemble, λ)
for (k, sᵍ) in pairs(s)
μ, σ = getrange!(ensemble, k.graphtype)
keep = @. μ - z*σ < sᵍ.μᶜ < μ + z*σ
sᵍ = sᵍ[keep, :]
Nmax = N₀ + 4*√N₀
annotate!(p, μ, Nmax, text(k.graphtype, FONT, 6))
@df sᵍ plot!(p, :μᶜ, :μⁿ; linewidth=3, linecolor = :blue)
end
s
end
plot_nagent_states!(p[2], ensemble)
#plot!(p[1]; title = "The influence of C on S is clearly mediated by N")
p
end
# ╔═╡ dcbfc502-4b53-40e4-b735-9a2aadc96538
md"### References"
# ╔═╡ 6a05d5cf-a51d-47ba-85e1-14c16b369a19
md"---"
# ╔═╡ 73f75064-a6d2-4ae8-bcd6-52ec610315ee
md"""
#### Resampling into a SCABM
"""
# ╔═╡ c2658679-5a20-446d-90d5-644936323b8f
"""
Sample `ν` equally weighted samples from an ensemble of weighted
(posterior) samples using the staircase sampling method from
Sivia & Skilling (2006, p. 197).
"""
function staircase_sampling(
ensemble, ν; weight = ensemble.weight, rng = Random.GLOBAL_RNG
)
νmax = 1/maximum(weight)
ν > νmax && error("Maximum number of samples is $(floor(νmax))")
S = rand(rng)
current = 0
samples = zeros(Int, ν)
for i in 1:nrow(ensemble)
S += ν*weight[i]
if floor(S) > current
current += 1
samples[current] = i
end
end
return ensemble[samples,:]
end
# ╔═╡ 0957f4e8-2d3a-4c09-b0bc-453d9c2d005a
let
#
e = weigh!(ensemble, -10.)
e = e[(:BarabasiAlbert,)]
ν = 1000
SCABM = staircase_sampling(e, ν)
wmean(e.S, e.weight), mean(SCABM.S)
end
# ╔═╡ d5409b02-89a5-45e9-bc78-67e566112869
md"""
#### Sample efficiency
!!! warning
The below sample efficiency is very pessimistic by one or two orders of magnitude.
The theoretical sample effiency (TODO: show here) is much better, maintaining a 1:10 efficiency throughout 2 sigma (please read on -- see below)
The real efficiency is somewhere in between and needs more research
An approximation is given below for theoretical sample efficiency but it is possible to calculate with (ADNS) Automatic Differentiation Nested Sampling (Van Soom & de Boer, in preparation)
"""
# ╔═╡ d6683d9e-d1c2-44c8-9422-7c9e09d0e558
md"""
Calculating sample efficiency with the relative entropy: $\epsilon(\lambda) = e^{-D_\text{KL}(p|q)}$ as "you need $e^{D_\text{KL}(p|q)}$ from $q$ to get one from $p$"
$D_\text{KL}(p|q) = \int dx\ p(x) \log({p(x) \over q(x)})$
$p(x) = q(x) \exp(\lambda f(x)) / Z(\lambda)$
$D_\text{KL}(p|q) = \int dx\ p(x) (\lambda x - \log Z(\lambda)) = \lambda \langle f(x) \rangle_p - \log Z(\lambda)$ where $\langle f(x) \rangle_p = F(\lambda)$
Given that $p(f') = \int dx\ q(x) \delta(f' - f(x))$ is almost a normal PDF $N(f'; \mu,\sigma^2)$ for our hyperparameter settings, then
$\log Z(\lambda) \simeq \log \int df \exp(\lambda f) N(f; \mu,\sigma^2) = \lambda \mu + \lambda^2 \sigma^2/2$
which is nearly linear when $\sigma^2$ is small (as is the case); it acts like a delta function.
So to second order $D_\text{KL}(p|q) = \lambda \langle f(x) \rangle_p - \log Z(\lambda) \approx \lambda(F(\lambda) - F(0)) - \lambda^2 \sigma^2/2$ where $\lambda \sigma \sim O(1)$, so we cannot expect this approximation to hold over a wide range of $\lambda$.
We can see that this approximation breaks down fast, so better to estimate $D_\text{KL}(p|q)$ directly with nested sampling with $\pi(f) = q(f), L(f) = \lambda f$.
"""
# ╔═╡ ee4c5bca-92e6-4ca5-b957-a11511fa1438
let
extractname(df) = string(first(df.graphtype))
atzero(λ, x) = LinearInterpolation(λ, x)(zero(λ))
function yticks()
ϵ = DataFrame(states).:ϵ
scale = sort(unique(round.(Int, log10.(ϵ))))
ticks = (10.) .^ scale
labels = ["1:$(10^-s)" for s in scale]
ticks, labels
end
xlim = 3
p = plot(;
xticks = -10:10, xlim = (-xlim, xlim), yscale = :log,
yticks = yticks(), xlabel = "z", ylabel = "Sample efficiency", ylim = (1e-4, 1)
)
for df in states
graphtype = extractname(df)
μ = @df df atzero(:λ, :μᶜ)
σ = @df df atzero(:λ, :σᶜ)
z = @. (df.μᶜ - μ)/σ
D = @df df :λ.*(:μᶜ .- μ) - (:λ.*σ).^2/2
ϵ = exp.(-D)
@df df plot!(p, z, ϵ; label = graphtype, lw = 3)
end
vspan!(p, [-2, 2]; fillalpha = .1, fillcolor = :grey, label = false)
plot!(p; title = "Theoretical sampling effiency: approximation")
end
# ╔═╡ ed907683-f46b-4c6b-9a8a-8263191544d1
md"""
The same curves calculated with automatic differentiation in old.jl nested sampling look good, with efficiencies aroud 1:10 for $|z| < 2$. So in general we have a very positive picture (at least for one constraint.)
Our calculation of $\epsilon$ with the staircase sampling weights depends on the number of posterior samples $M$ (because we are trying to resample $M$ weighted samples), so is a bit sketch.
The "true" sampling efficiency is somewhere between the theoretic upper bound (i.e. based on $-D_\text{KL}(p|q)$) and this more practical one (i.e. based on staircase resampling).
"""
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
Agents = "46ada45e-f475-11e8-01d0-f70cc89e6671"
ColorSchemes = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
DrWatson = "634d3b9d-ee7a-5ddf-bec9-22491ea816e1"
GraphRecipes = "bd48cda9-67a9-57be-86fa-5b3c104eda73"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
Interpolations = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59"
LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ShortCodes = "f62ebe17-55c5-4640-972f-b59c0dd11ccf"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
StatsFuns = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"
[compat]
Agents = "~5.2.0"
ColorSchemes = "~3.18.0"
DataFramesMeta = "~0.11.0"
Distributions = "~0.25.59"
DrWatson = "~2.9.1"
GraphRecipes = "~0.5.9"
Graphs = "~1.7.0"
Interpolations = "~0.13.6"
LaTeXStrings = "~1.3.0"
PlutoUI = "~0.7.39"
ShortCodes = "~0.3.3"
StatsBase = "~0.33.16"
StatsFuns = "~0.9.18"
StatsPlots = "~0.14.34"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["ChainRulesCore", "LinearAlgebra"]
git-tree-sha1 = "6f1d9bc1c08f9f4a8fa92e3ea3cb50153a1b40d4"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.1.0"
[[AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "8eaf9f1b4921132a4cff3f36a1d9ba923b14a481"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.1.4"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "af92965fb30777147966f58acb05da51c5616b5f"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.3"
[[Agents]]
deps = ["CSV", "DataFrames", "DataStructures", "Distributed", "Downloads", "Graphs", "JLD2", "LazyArtifacts", "LightOSM", "LinearAlgebra", "Pkg", "ProgressMeter", "Random", "Requires", "Scratch", "StatsBase"]
git-tree-sha1 = "4ffabc883a9021fb57686cee925a0cc501e3c15b"
uuid = "46ada45e-f475-11e8-01d0-f70cc89e6671"
version = "5.2.0"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArnoldiMethod]]
deps = ["LinearAlgebra", "Random", "StaticArrays"]
git-tree-sha1 = "62e51b39331de8911e4a7ff6f5aaf38a5f4cc0ae"
uuid = "ec485272-7323-5ecc-a04f-4719b315124d"
version = "0.2.0"
[[Arpack]]
deps = ["Arpack_jll", "Libdl", "LinearAlgebra", "Logging"]
git-tree-sha1 = "91ca22c4b8437da89b030f08d71db55a379ce958"
uuid = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
version = "0.5.3"
[[Arpack_jll]]
deps = ["Libdl", "OpenBLAS_jll", "Pkg"]
git-tree-sha1 = "e214a9b9bd1b4e1b4f15b22c0994862b66af7ff7"
uuid = "68821587-b530-5797-8361-c406ea357684"
version = "3.5.0+3"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[AxisAlgorithms]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "WoodburyMatrices"]
git-tree-sha1 = "66771c8d21c8ff5e3a93379480a2307ac36863f7"
uuid = "13072b0f-2c55-5437-9ae7-d433b7a33950"
version = "1.0.1"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+0"
[[CSV]]
deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings"]
git-tree-sha1 = "873fb188a4b9d76549b81465b1f75c82aaf59238"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.10.4"
[[Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.1+1"
[[Chain]]
git-tree-sha1 = "339237319ef4712e6e5df7758d0bccddf5c237d9"
uuid = "8be319e6-bccf-4806-a6f7-6fae938471bc"
version = "0.4.10"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "9489214b993cd42d17f44c36e359bf6a7c919abf"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.15.0"
[[ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "1e315e3f4b0b7ce40feded39c73049692126cf53"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.3"
[[Clustering]]
deps = ["Distances", "LinearAlgebra", "NearestNeighbors", "Printf", "SparseArrays", "Statistics", "StatsBase"]
git-tree-sha1 = "75479b7df4167267d75294d14b58244695beb2ac"
uuid = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5"
version = "0.14.2"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorSchemes]]
deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "Random"]
git-tree-sha1 = "7297381ccb5df764549818d9a7d57e45f1057d30"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.18.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "a985dc37e357a3b22b260a5def99f3530fb415d3"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.2"
[[ColorVectorSpace]]
deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "SpecialFunctions", "Statistics", "TensorCore"]
git-tree-sha1 = "d08c20eef1f2cbc6e60fd3612ac4340b89fea322"
uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4"
version = "0.9.9"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "b153278a25dd42c65abbf4e62344f9d22e59191b"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.43.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[Contour]]
deps = ["StaticArrays"]
git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.5.7"
[[Crayons]]
git-tree-sha1 = "249fe38abf76d48563e2f4556bebd215aa317e15"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.1.1"